Compare commits
80 Commits
Author | SHA1 | Date | |
---|---|---|---|
5a9519cfe1 | |||
![]() |
7776efc0dd | ||
![]() |
ef205c23c3 | ||
![]() |
ca6d9fa912 | ||
![]() |
161c6dfbea | ||
![]() |
787aad29db | ||
![]() |
c07fedbb47 | ||
![]() |
594a5727c7 | ||
![]() |
b115d52e67 | ||
![]() |
1b7fe0b2bf | ||
![]() |
fa5d812fea | ||
![]() |
00df3ca6cc | ||
![]() |
1eef3f65c7 | ||
![]() |
ad8dd82e4c | ||
![]() |
8f305511e2 | ||
![]() |
593d5b750d | ||
![]() |
abef6ff79a | ||
![]() |
8049c795db | ||
![]() |
405e8f52b2 | ||
![]() |
e2c99b52ea | ||
![]() |
164e3c5ff2 | ||
![]() |
b1056d4808 | ||
![]() |
b5ecad3e34 | ||
![]() |
671a3f88c9 | ||
![]() |
f0f1972c6f | ||
![]() |
9348181382 | ||
![]() |
7cd23cb088 | ||
![]() |
7283345a9e | ||
![]() |
c53f027bf1 | ||
![]() |
85459e6ef2 | ||
![]() |
d41af1e33d | ||
![]() |
e274aff1ac | ||
![]() |
601bd989b5 | ||
![]() |
c1c23501e6 | ||
![]() |
03db154fa3 | ||
![]() |
f5cbec6681 | ||
![]() |
f65b1fb959 | ||
![]() |
8d243488a7 | ||
![]() |
196d56d11c | ||
![]() |
1b7713f44c | ||
![]() |
ae7bbf10f2 | ||
![]() |
eb7ecc408f | ||
![]() |
47af6ce6d4 | ||
![]() |
d7dea47191 | ||
![]() |
6b6fcbba72 | ||
![]() |
907e6a0388 | ||
![]() |
9e00ae220d | ||
![]() |
102a804d77 | ||
![]() |
d1060989df | ||
![]() |
f008ab6e46 | ||
![]() |
b9e77f1de1 | ||
![]() |
2834b74e90 | ||
![]() |
6761e40961 | ||
![]() |
9aa44f46aa | ||
![]() |
c62cb53e31 | ||
![]() |
705d7fac01 | ||
![]() |
9c67bab29b | ||
![]() |
9baa9162d5 | ||
![]() |
3fc07e7b9c | ||
![]() |
d7d91b16bf | ||
![]() |
d1762eb426 | ||
![]() |
eb78c72616 | ||
![]() |
42bac4ff9e | ||
![]() |
e12cc62818 | ||
![]() |
b90446573f | ||
![]() |
e1e5e3e23e | ||
![]() |
4f2f15d5f2 | ||
![]() |
5611973faa | ||
![]() |
39772991f3 | ||
![]() |
e335694a91 | ||
![]() |
a2a96078a3 | ||
![]() |
85f98ca325 | ||
![]() |
52c557c8a3 | ||
![]() |
e29f4de057 | ||
![]() |
142fa3bdea | ||
![]() |
4de025257e | ||
![]() |
88e449320c | ||
![]() |
74aa2656b8 | ||
![]() |
61f3df2cfc | ||
![]() |
fecf79eb32 |
218
.gitignore
vendored
Normal file
218
.gitignore
vendored
Normal file
@@ -0,0 +1,218 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[codz]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py.cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# UV
|
||||
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
#uv.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
#poetry.toml
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
||||
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
||||
#pdm.lock
|
||||
#pdm.toml
|
||||
.pdm-python
|
||||
.pdm-build/
|
||||
|
||||
# pixi
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
||||
#pixi.lock
|
||||
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
||||
# in the .venv directory. It is recommended not to include this directory in version control.
|
||||
.pixi
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# Redis
|
||||
*.rdb
|
||||
*.aof
|
||||
*.pid
|
||||
|
||||
# RabbitMQ
|
||||
mnesia/
|
||||
rabbitmq/
|
||||
rabbitmq-data/
|
||||
|
||||
# ActiveMQ
|
||||
activemq-data/
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.envrc
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
# Abstra
|
||||
# Abstra is an AI-powered process automation framework.
|
||||
# Ignore directories containing user credentials, local state, and settings.
|
||||
# Learn more at https://abstra.io/docs
|
||||
.abstra/
|
||||
|
||||
# Visual Studio Code
|
||||
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
||||
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
||||
# you could uncomment the following to ignore the entire vscode folder
|
||||
# .vscode/
|
||||
|
||||
# Ruff stuff:
|
||||
.ruff_cache/
|
||||
|
||||
# PyPI configuration file
|
||||
.pypirc
|
||||
|
||||
# Marimo
|
||||
marimo/_static/
|
||||
marimo/_lsp/
|
||||
__marimo__/
|
||||
|
||||
# Streamlit
|
||||
.streamlit/secrets.toml
|
||||
OCRBench_v2/CN_part/
|
||||
OCRBench_v2/EN_part/
|
1
.python-version
Normal file
1
.python-version
Normal file
@@ -0,0 +1 @@
|
||||
3.13
|
21
LICENSE.txt
Normal file
21
LICENSE.txt
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Yuliang Liu
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
59
OCRBench/README.md
Normal file
59
OCRBench/README.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# OCRBench: On the Hidden Mystery of OCR in Large Multimodal Models
|
||||
<img src="./images/all_data.png" width="96%" height="96%">
|
||||
|
||||
> Large models have recently played a dominant role in natural language processing and multimodal vision-language learning. However, their effectiveness in text-related visual tasks remains relatively unexplored. In this paper, we conducted a comprehensive evaluation of Large Multimodal Models, such as GPT4V and Gemini, in various text-related visual tasks including Text Recognition, Scene Text-Centric Visual Question Answering (VQA), Document-Oriented VQA, Key Information Extraction (KIE), and Handwritten Mathematical Expression Recognition (HMER). To facilitate the assessment of Optical Character Recognition (OCR) capabilities in Large Multimodal Models, we propose OCRBench, a comprehensive evaluation benchmark. Our study encompasses 29 datasets, making it the most comprehensive OCR evaluation benchmark available. Furthermore, our study reveals both the strengths and weaknesses of these models, particularly in handling multilingual text, handwritten text, non-semantic text, and mathematical expression recognition. Most importantly, the baseline results showcased in this study could provide a foundational framework for the conception and assessment of innovative strategies targeted at enhancing zero-shot multimodal techniques.
|
||||
|
||||
**[Project Page [This Page]](https://github.com/Yuliang-Liu/MultimodalOCR)** | **[Paper](https://arxiv.org/abs/2305.07895)** |**[OCRBench Leaderboard](https://huggingface.co/spaces/echo840/ocrbench-leaderboard)**|**[Opencompass Leaderboard](https://rank.opencompass.org.cn/leaderboard-multimodal)**|
|
||||
|
||||
|
||||
# Data
|
||||
To reduce false positives, we filter out questions that have answers containing fewer than 4 symbols from all datasets.
|
||||
| Data | Link | Description |
|
||||
| --- | --- | --- |
|
||||
| Full Test Json | [Full Test](./OCRBench/FullTest.json) | This file contains the test data used in Table 1 and Table 2 from [Paper](https://arxiv.org/abs/2305.07895). |
|
||||
| OCRBench Json | [OCRBench](./OCRBench/OCRBench.json) | This file contains the test data in OCRBench used in Table3 from [Paper](https://arxiv.org/abs/2305.07895). |
|
||||
| All Test Images |[All Images](https://drive.google.com/file/d/1U5AtLoJ7FrJe9yfcbssfeLmlKb7dTosc/view?usp=drive_link) | This file contains all the testing images used in [Paper](https://arxiv.org/abs/2305.07895), including OCRBench Images.|
|
||||
| OCRBench Images | [OCRBench Images](https://drive.google.com/file/d/1a3VRJx3V3SdOmPr7499Ky0Ug8AwqGUHO/view?usp=drive_link) | This file only contains the images used in OCRBench. |
|
||||
| Test Results | [Test Results](https://drive.google.com/drive/folders/15XlHCuNTavI1Ihqm4G7u3J34BHpkaqyE?usp=drive_link) | This file file contains the result files for the test models. |
|
||||
|
||||
|
||||
# OCRBench
|
||||
|
||||
OCRBench is a comprehensive evaluation benchmark designed to assess the OCR capabilities of Large Multimodal Models. It comprises five components: Text Recognition, SceneText-Centric VQA, Document-Oriented VQA, Key Information Extraction, and Handwritten Mathematical Expression Recognition. The benchmark includes 1000 question-answer pairs, and all the answers undergo manual verification and correction to ensure a more precise evaluation.
|
||||
|
||||
You can find the results of Large Multimodal Models in **[OCRBench Leaderboard](https://huggingface.co/spaces/echo840/ocrbench-leaderboard)**, if you would like to include your model in the OCRBench leaderboard, please follow the evaluation instructions provided below and feel free to contact us via email at zhangli123@hust.edu.cn. We will update the leaderboard in time.
|
||||
|
||||
<img src="./images/GPT4V_Gemini.png" width="96%" height="96%">
|
||||
|
||||
# Evaluation
|
||||
The test code for evaluating models in the paper can be found in [scripts](./scripts). Before conducting the evaluation, you need to configure the model weights and environment based on the official code link provided in the scripts. If you want to evaluate other models, please edit the "TODO" things in [example](./example.py).
|
||||
|
||||
You can also use [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) and [lmms-eval](https://github.com/EvolvingLMMs-Lab/lmms-eval) for evaluation.
|
||||
|
||||
Example evaluation scripts:
|
||||
```python
|
||||
|
||||
python ./scripts/monkey.py --image_folder ./OCRBench_Images --OCRBench_file ./OCRBench/OCRBench.json --save_name Monkey_OCRBench --num_workers GPU_Nums # Test on OCRBench
|
||||
python ./scripts/monkey.py --image_folder ./OCRBench_Images --OCRBench_file ./OCRBench/FullTest.json --save_name Monkey_FullTest --num_workers GPU_Nums # Full Test
|
||||
|
||||
```
|
||||
|
||||
# Citation
|
||||
If you wish to refer to the baseline results published here, please use the following BibTeX entries:
|
||||
```BibTeX
|
||||
@article{Liu_2024,
|
||||
title={OCRBench: on the hidden mystery of OCR in large multimodal models},
|
||||
volume={67},
|
||||
ISSN={1869-1919},
|
||||
url={http://dx.doi.org/10.1007/s11432-024-4235-6},
|
||||
DOI={10.1007/s11432-024-4235-6},
|
||||
number={12},
|
||||
journal={Science China Information Sciences},
|
||||
publisher={Springer Science and Business Media LLC},
|
||||
author={Liu, Yuliang and Li, Zhang and Huang, Mingxin and Yang, Biao and Yu, Wenwen and Li, Chunyuan and Yin, Xu-Cheng and Liu, Cheng-Lin and Jin, Lianwen and Bai, Xiang},
|
||||
year={2024},
|
||||
month=dec }
|
||||
```
|
||||
|
||||
|
||||
|
@@ -27,7 +27,7 @@ def save_json(json_list,save_path):
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default="")#TODO Set the address of your model's weights
|
Before Width: | Height: | Size: 408 KiB After Width: | Height: | Size: 408 KiB |
Before Width: | Height: | Size: 1.8 MiB After Width: | Height: | Size: 1.8 MiB |
@@ -23,7 +23,7 @@ def save_json(json_list,save_path):
|
||||
json.dump(json_list, file,indent=4)
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_path", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--OPENAI_API_KEY", type=str, default="")
|
@@ -21,7 +21,7 @@ def save_json(json_list,save_path):
|
||||
json.dump(json_list, file,indent=4)
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_path", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--GOOGLE_API_KEY", type=str, default="")
|
@@ -32,7 +32,7 @@ def save_json(json_list,save_path):
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default="liuhaotian/llava-v1.5-7b")
|
313
OCRBench/scripts/MiniMonkey.py
Normal file
313
OCRBench/scripts/MiniMonkey.py
Normal file
@@ -0,0 +1,313 @@
|
||||
import json
|
||||
from argparse import ArgumentParser
|
||||
import torch
|
||||
import os
|
||||
import json
|
||||
from tqdm import tqdm
|
||||
from PIL import Image
|
||||
import math
|
||||
import multiprocessing
|
||||
from multiprocessing import Pool, Queue, Manager
|
||||
from PIL import Image
|
||||
from transformers import AutoModel, CLIPImageProcessor
|
||||
from transformers import AutoTokenizer
|
||||
import torchvision.transforms as T
|
||||
from torchvision.transforms.functional import InterpolationMode
|
||||
|
||||
#https://github.com/Yuliang-Liu/Monkey/tree/main/project/mini_monkey
|
||||
|
||||
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
||||
IMAGENET_STD = (0.229, 0.224, 0.225)
|
||||
|
||||
|
||||
def build_transform(input_size):
|
||||
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
|
||||
transform = T.Compose([
|
||||
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
|
||||
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
|
||||
T.ToTensor(),
|
||||
T.Normalize(mean=MEAN, std=STD)
|
||||
])
|
||||
return transform
|
||||
|
||||
|
||||
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
|
||||
best_ratio_diff = float('inf')
|
||||
best_ratio = (1, 1)
|
||||
area = width * height
|
||||
for ratio in target_ratios:
|
||||
target_aspect_ratio = ratio[0] / ratio[1]
|
||||
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
||||
if ratio_diff < best_ratio_diff:
|
||||
best_ratio_diff = ratio_diff
|
||||
best_ratio = ratio
|
||||
elif ratio_diff == best_ratio_diff:
|
||||
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
||||
best_ratio = ratio
|
||||
return best_ratio
|
||||
|
||||
|
||||
def dynamic_preprocess(image, min_num=5, max_num=6, image_size=448, use_thumbnail=False):
|
||||
orig_width, orig_height = image.size
|
||||
aspect_ratio = orig_width / orig_height
|
||||
|
||||
# calculate the existing image aspect ratio
|
||||
target_ratios = set(
|
||||
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
|
||||
i * j <= max_num and i * j >= min_num)
|
||||
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
|
||||
|
||||
# find the closest aspect ratio to the target
|
||||
target_aspect_ratio = find_closest_aspect_ratio(
|
||||
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
|
||||
|
||||
# calculate the target width and height
|
||||
target_width = image_size * target_aspect_ratio[0]
|
||||
target_height = image_size * target_aspect_ratio[1]
|
||||
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
||||
|
||||
# resize the image
|
||||
resized_img = image.resize((target_width, target_height))
|
||||
processed_images = []
|
||||
for i in range(blocks):
|
||||
box = (
|
||||
(i % (target_width // image_size)) * image_size,
|
||||
(i // (target_width // image_size)) * image_size,
|
||||
((i % (target_width // image_size)) + 1) * image_size,
|
||||
((i // (target_width // image_size)) + 1) * image_size
|
||||
)
|
||||
# split the image
|
||||
split_img = resized_img.crop(box)
|
||||
processed_images.append(split_img)
|
||||
assert len(processed_images) == blocks
|
||||
if use_thumbnail and len(processed_images) != 1:
|
||||
thumbnail_img = image.resize((image_size, image_size))
|
||||
processed_images.append(thumbnail_img)
|
||||
return processed_images, target_aspect_ratio
|
||||
|
||||
def dynamic_preprocess2(image, min_num=1, max_num=6, image_size=448, use_thumbnail=False, prior_aspect_ratio=None):
|
||||
orig_width, orig_height = image.size
|
||||
aspect_ratio = orig_width / orig_height
|
||||
|
||||
# calculate the existing image aspect ratio
|
||||
target_ratios = set(
|
||||
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
|
||||
i * j <= max_num and i * j >= min_num)
|
||||
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
|
||||
|
||||
new_target_ratios = []
|
||||
if prior_aspect_ratio is not None:
|
||||
for i in target_ratios:
|
||||
if prior_aspect_ratio[0]%i[0] !=0 or prior_aspect_ratio[1]%i[1] !=0:
|
||||
new_target_ratios.append(i)
|
||||
else:
|
||||
continue
|
||||
# find the closest aspect ratio to the target
|
||||
target_aspect_ratio = find_closest_aspect_ratio(
|
||||
aspect_ratio, new_target_ratios, orig_width, orig_height, image_size)
|
||||
|
||||
# calculate the target width and height
|
||||
target_width = image_size * target_aspect_ratio[0]
|
||||
target_height = image_size * target_aspect_ratio[1]
|
||||
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
||||
|
||||
# resize the image
|
||||
resized_img = image.resize((target_width, target_height))
|
||||
processed_images = []
|
||||
for i in range(blocks):
|
||||
box = (
|
||||
(i % (target_width // image_size)) * image_size,
|
||||
(i // (target_width // image_size)) * image_size,
|
||||
((i % (target_width // image_size)) + 1) * image_size,
|
||||
((i // (target_width // image_size)) + 1) * image_size
|
||||
)
|
||||
# split the image
|
||||
split_img = resized_img.crop(box)
|
||||
processed_images.append(split_img)
|
||||
assert len(processed_images) == blocks
|
||||
if use_thumbnail and len(processed_images) != 1:
|
||||
thumbnail_img = image.resize((image_size, image_size))
|
||||
processed_images.append(thumbnail_img)
|
||||
return processed_images
|
||||
|
||||
def load_image(image_file, input_size=448, min_num=1, max_num=6):
|
||||
image = Image.open(image_file).convert('RGB')
|
||||
transform = build_transform(input_size=input_size)
|
||||
images, target_aspect_ratio = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, min_num=min_num, max_num=max_num)
|
||||
pixel_values = [transform(image) for image in images]
|
||||
pixel_values = torch.stack(pixel_values)
|
||||
return pixel_values, target_aspect_ratio
|
||||
|
||||
def load_image2(image_file, input_size=448, target_aspect_ratio=(1,1), min_num=1, max_num=6):
|
||||
image = Image.open(image_file).convert('RGB')
|
||||
transform = build_transform(input_size=input_size)
|
||||
images = dynamic_preprocess2(image, image_size=input_size, prior_aspect_ratio=target_aspect_ratio, use_thumbnail=True, min_num=min_num, max_num=max_num)
|
||||
pixel_values = [transform(image) for image in images]
|
||||
pixel_values = torch.stack(pixel_values)
|
||||
return pixel_values
|
||||
|
||||
def split_list(lst, n):
|
||||
length = len(lst)
|
||||
avg = length // n # 每份的大小
|
||||
result = [] # 存储分割后的子列表
|
||||
for i in range(n - 1):
|
||||
result.append(lst[i*avg:(i+1)*avg])
|
||||
result.append(lst[(n-1)*avg:])
|
||||
return result
|
||||
|
||||
def save_json(json_list,save_path):
|
||||
with open(save_path, 'w') as file:
|
||||
json.dump(json_list, file,indent=4)
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default='mx262/MiniMonkey')#TODO Set the address of your model's weights
|
||||
parser.add_argument("--save_name", type=str, default="MiniMokney") #TODO Set the name of the JSON file you save in the output_folder.
|
||||
parser.add_argument("--num_workers", type=int, default=1)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
|
||||
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
|
||||
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
|
||||
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
|
||||
def eval_worker(args, data, eval_id, output_queue):
|
||||
print(f"Process {eval_id} start.")
|
||||
checkpoint = args.model_path
|
||||
model = AutoModel.from_pretrained(
|
||||
checkpoint,
|
||||
torch_dtype=torch.bfloat16,
|
||||
low_cpu_mem_usage=True,
|
||||
trust_remote_code=True).eval().to(f'cuda:{eval_id}')
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
|
||||
|
||||
for i in tqdm(range(len(data))):
|
||||
dataset_name = data[i]["dataset_name"]
|
||||
|
||||
image_path = os.path.join(args.image_folder, data[i]['image_path'])
|
||||
qs = data[i]['question']
|
||||
|
||||
pixel_values, target_aspect_ratio = load_image(image_path, min_num=12, max_num=24)
|
||||
pixel_values = pixel_values.to(f'cuda:{eval_id}').to(torch.bfloat16)
|
||||
pixel_values2 = load_image2(image_path, target_aspect_ratio=target_aspect_ratio, min_num=3, max_num=11)
|
||||
pixel_values2 = pixel_values2.to(f'cuda:{eval_id}').to(torch.bfloat16)
|
||||
pixel_values = torch.cat((pixel_values[:-1], pixel_values2[:-1], pixel_values[-1:]), 0)
|
||||
|
||||
generation_config = dict(
|
||||
num_beams=1,
|
||||
max_new_tokens=512,
|
||||
do_sample=False,
|
||||
)
|
||||
question = '<image>\n'+qs+ '\nAnswer the question using a single word or phrase.'
|
||||
response = model.chat(tokenizer, pixel_values, target_aspect_ratio, question, generation_config)
|
||||
data[i]['predict'] = response
|
||||
output_queue.put({eval_id: data})
|
||||
print(f"Process {eval_id} has completed.")
|
||||
|
||||
if __name__=="__main__":
|
||||
multiprocessing.set_start_method('spawn')
|
||||
args = _get_args()
|
||||
|
||||
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
|
||||
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
|
||||
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
|
||||
else:
|
||||
data_path = args.OCRBench_file
|
||||
|
||||
with open(data_path, "r") as f:
|
||||
data = json.load(f)
|
||||
|
||||
data_list = split_list(data, args.num_workers)
|
||||
|
||||
output_queue = Manager().Queue()
|
||||
|
||||
pool = Pool(processes=args.num_workers)
|
||||
for i in range(len(data_list)):
|
||||
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
results = {}
|
||||
while not output_queue.empty():
|
||||
result = output_queue.get()
|
||||
results.update(result)
|
||||
|
||||
data = []
|
||||
for i in range(len(data_list)):
|
||||
data.extend(results[i])
|
||||
|
||||
for i in range(len(data)):
|
||||
data_type = data[i]["type"]
|
||||
dataset_name = data[i]["dataset_name"]
|
||||
answers = data[i]["answers"]
|
||||
if data[i].get('predict',0)==0:
|
||||
continue
|
||||
predict = data[i]['predict']
|
||||
data[i]['result'] = 0
|
||||
if dataset_name == "HME100k":
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
|
||||
if len(data)==1000:
|
||||
for i in range(len(data)):
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
OCRBench_score[data[i]['type']] += data[i]['result']
|
||||
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
|
||||
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
|
||||
print("###########################OCRBench##############################")
|
||||
print(f"Text Recognition(Total 300):{recognition_score}")
|
||||
print("------------------Details of Recognition Score-------------------")
|
||||
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
|
||||
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
|
||||
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
|
||||
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
|
||||
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
|
||||
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
|
||||
print("----------------------Final Score-------------------------------")
|
||||
print(f"Final Score(Total 1000): {Final_score}")
|
||||
else:
|
||||
for i in range(len(data)):
|
||||
num_all[data[i]['dataset_name']] += 1
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
|
||||
for key in AllDataset_score.keys():
|
||||
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")
|
@@ -28,7 +28,7 @@ def save_json(json_list,save_path):
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default="./model_weights/blip2-opt-6.7b")
|
@@ -27,7 +27,7 @@ def save_json(json_list,save_path):
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default="./model_weights/instructblip-vicuna-7b")
|
@@ -36,7 +36,7 @@ def save_json(json_list,save_path):
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default="bliva_vicuna")
|
161
OCRBench/scripts/interlm.py
Normal file
161
OCRBench/scripts/interlm.py
Normal file
@@ -0,0 +1,161 @@
|
||||
import json
|
||||
from argparse import ArgumentParser
|
||||
import torch
|
||||
import os
|
||||
import json
|
||||
from tqdm import tqdm
|
||||
from PIL import Image
|
||||
import math
|
||||
import multiprocessing
|
||||
from multiprocessing import Pool, Queue, Manager
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
# https://github.com/InternLM/InternLM-XComposer/tree/main/InternLM-XComposer-1.0
|
||||
def split_list(lst, n):
|
||||
length = len(lst)
|
||||
avg = length // n # 每份的大小
|
||||
result = [] # 存储分割后的子列表
|
||||
for i in range(n - 1):
|
||||
result.append(lst[i*avg:(i+1)*avg])
|
||||
result.append(lst[(n-1)*avg:])
|
||||
return result
|
||||
|
||||
def save_json(json_list,save_path):
|
||||
with open(save_path, 'w') as file:
|
||||
json.dump(json_list, file,indent=4)
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default='internlm/internlm-xcomposer-7b')#TODO Set the address of your model's weights
|
||||
parser.add_argument("--save_name", type=str, default="internlm-xcomposer-7b") #TODO Set the name of the JSON file you save in the output_folder.
|
||||
parser.add_argument("--num_workers", type=int, default=1)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
|
||||
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
|
||||
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
|
||||
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
|
||||
def eval_worker(args, data, eval_id, output_queue):
|
||||
print(f"Process {eval_id} start.")
|
||||
checkpoint = args.model_path
|
||||
|
||||
torch.set_grad_enabled(False)
|
||||
|
||||
# init model and tokenizer
|
||||
model = AutoModel.from_pretrained(checkpoint, trust_remote_code=True,device_map=f'cuda:{eval_id}').eval()
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
|
||||
model.tokenizer = tokenizer
|
||||
|
||||
for i in tqdm(range(len(data))):
|
||||
img_path = os.path.join(args.image_folder, data[i]['image_path'])
|
||||
qs = data[i]['question']
|
||||
response = model.generate(qs, img_path)
|
||||
data[i]['predict'] = response
|
||||
output_queue.put({eval_id: data})
|
||||
print(f"Process {eval_id} has completed.")
|
||||
|
||||
if __name__=="__main__":
|
||||
multiprocessing.set_start_method('spawn')
|
||||
args = _get_args()
|
||||
|
||||
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
|
||||
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
|
||||
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
|
||||
else:
|
||||
data_path = args.OCRBench_file
|
||||
|
||||
with open(data_path, "r") as f:
|
||||
data = json.load(f)
|
||||
|
||||
data_list = split_list(data, args.num_workers)
|
||||
|
||||
output_queue = Manager().Queue()
|
||||
|
||||
pool = Pool(processes=args.num_workers)
|
||||
for i in range(len(data_list)):
|
||||
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
results = {}
|
||||
while not output_queue.empty():
|
||||
result = output_queue.get()
|
||||
results.update(result)
|
||||
data = []
|
||||
for i in range(len(data_list)):
|
||||
data.extend(results[i])
|
||||
|
||||
for i in range(len(data)):
|
||||
data_type = data[i]["type"]
|
||||
dataset_name = data[i]["dataset_name"]
|
||||
answers = data[i]["answers"]
|
||||
if data[i].get('predict',0)==0:
|
||||
continue
|
||||
predict = data[i]['predict']
|
||||
data[i]['result'] = 0
|
||||
if dataset_name == "HME100k":
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
|
||||
if len(data)==1000:
|
||||
for i in range(len(data)):
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
OCRBench_score[data[i]['type']] += data[i]['result']
|
||||
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
|
||||
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
|
||||
print("###########################OCRBench##############################")
|
||||
print(f"Text Recognition(Total 300):{recognition_score}")
|
||||
print("------------------Details of Recognition Score-------------------")
|
||||
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
|
||||
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
|
||||
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
|
||||
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
|
||||
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
|
||||
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
|
||||
print("----------------------Final Score-------------------------------")
|
||||
print(f"Final Score(Total 1000): {Final_score}")
|
||||
else:
|
||||
for i in range(len(data)):
|
||||
num_all[data[i]['dataset_name']] += 1
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
|
||||
for key in AllDataset_score.keys():
|
||||
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")
|
162
OCRBench/scripts/interlm2.py
Normal file
162
OCRBench/scripts/interlm2.py
Normal file
@@ -0,0 +1,162 @@
|
||||
import json
|
||||
from argparse import ArgumentParser
|
||||
import torch
|
||||
import os
|
||||
import json
|
||||
from tqdm import tqdm
|
||||
from PIL import Image
|
||||
import math
|
||||
import multiprocessing
|
||||
from multiprocessing import Pool, Queue, Manager
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
#https://github.com/InternLM/InternLM-XComposer/tree/main
|
||||
|
||||
def split_list(lst, n):
|
||||
length = len(lst)
|
||||
avg = length // n # 每份的大小
|
||||
result = [] # 存储分割后的子列表
|
||||
for i in range(n - 1):
|
||||
result.append(lst[i*avg:(i+1)*avg])
|
||||
result.append(lst[(n-1)*avg:])
|
||||
return result
|
||||
|
||||
def save_json(json_list,save_path):
|
||||
with open(save_path, 'w') as file:
|
||||
json.dump(json_list, file,indent=4)
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default='internlm/internlm-xcomposer2-vl-7b')#TODO Set the address of your model's weights
|
||||
parser.add_argument("--save_name", type=str, default="internlm-xcomposer2-vl-7b") #TODO Set the name of the JSON file you save in the output_folder.
|
||||
parser.add_argument("--num_workers", type=int, default=1)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
|
||||
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
|
||||
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
|
||||
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
|
||||
def eval_worker(args, data, eval_id, output_queue):
|
||||
print(f"Process {eval_id} start.")
|
||||
checkpoint = args.model_path
|
||||
torch.set_grad_enabled(False)
|
||||
|
||||
# init model and tokenizer
|
||||
model = AutoModel.from_pretrained(checkpoint, trust_remote_code=True,device_map=f'cuda:{eval_id}').eval()
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
|
||||
|
||||
for i in tqdm(range(len(data))):
|
||||
img_path = os.path.join(args.image_folder, data[i]['image_path'])
|
||||
qs = data[i]['question']
|
||||
text = f'<ImageHere>{qs}'
|
||||
with torch.cuda.amp.autocast():
|
||||
response, _ = model.chat(tokenizer, query=text, image=img_path, history=[], do_sample=False)
|
||||
data[i]['predict'] = response
|
||||
output_queue.put({eval_id: data})
|
||||
print(f"Process {eval_id} has completed.")
|
||||
|
||||
if __name__=="__main__":
|
||||
multiprocessing.set_start_method('spawn')
|
||||
args = _get_args()
|
||||
|
||||
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
|
||||
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
|
||||
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
|
||||
else:
|
||||
data_path = args.OCRBench_file
|
||||
|
||||
with open(data_path, "r") as f:
|
||||
data = json.load(f)
|
||||
|
||||
data_list = split_list(data, args.num_workers)
|
||||
|
||||
output_queue = Manager().Queue()
|
||||
|
||||
pool = Pool(processes=args.num_workers)
|
||||
for i in range(len(data_list)):
|
||||
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
results = {}
|
||||
while not output_queue.empty():
|
||||
result = output_queue.get()
|
||||
results.update(result)
|
||||
data = []
|
||||
for i in range(len(data_list)):
|
||||
data.extend(results[i])
|
||||
|
||||
for i in range(len(data)):
|
||||
data_type = data[i]["type"]
|
||||
dataset_name = data[i]["dataset_name"]
|
||||
answers = data[i]["answers"]
|
||||
if data[i].get('predict',0)==0:
|
||||
continue
|
||||
predict = data[i]['predict']
|
||||
data[i]['result'] = 0
|
||||
if dataset_name == "HME100k":
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
|
||||
if len(data)==1000:
|
||||
for i in range(len(data)):
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
OCRBench_score[data[i]['type']] += data[i]['result']
|
||||
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
|
||||
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
|
||||
print("###########################OCRBench##############################")
|
||||
print(f"Text Recognition(Total 300):{recognition_score}")
|
||||
print("------------------Details of Recognition Score-------------------")
|
||||
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
|
||||
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
|
||||
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
|
||||
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
|
||||
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
|
||||
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
|
||||
print("----------------------Final Score-------------------------------")
|
||||
print(f"Final Score(Total 1000): {Final_score}")
|
||||
else:
|
||||
for i in range(len(data)):
|
||||
num_all[data[i]['dataset_name']] += 1
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
|
||||
for key in AllDataset_score.keys():
|
||||
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")
|
247
OCRBench/scripts/internvl2_s
Normal file
247
OCRBench/scripts/internvl2_s
Normal file
@@ -0,0 +1,247 @@
|
||||
import json
|
||||
from argparse import ArgumentParser
|
||||
import torch
|
||||
import os
|
||||
import json
|
||||
from tqdm import tqdm
|
||||
from PIL import Image
|
||||
import math
|
||||
import multiprocessing
|
||||
from multiprocessing import Pool, Queue, Manager
|
||||
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer,AutoModel
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
from PIL import Image
|
||||
from torchvision.transforms.functional import InterpolationMode
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
||||
IMAGENET_STD = (0.229, 0.224, 0.225)
|
||||
|
||||
def build_transform(input_size):
|
||||
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
|
||||
transform = T.Compose([
|
||||
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
|
||||
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
|
||||
T.ToTensor(),
|
||||
T.Normalize(mean=MEAN, std=STD)
|
||||
])
|
||||
return transform
|
||||
|
||||
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
|
||||
best_ratio_diff = float('inf')
|
||||
best_ratio = (1, 1)
|
||||
area = width * height
|
||||
for ratio in target_ratios:
|
||||
target_aspect_ratio = ratio[0] / ratio[1]
|
||||
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
||||
if ratio_diff < best_ratio_diff:
|
||||
best_ratio_diff = ratio_diff
|
||||
best_ratio = ratio
|
||||
elif ratio_diff == best_ratio_diff:
|
||||
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
||||
best_ratio = ratio
|
||||
return best_ratio
|
||||
|
||||
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
|
||||
orig_width, orig_height = image.size
|
||||
aspect_ratio = orig_width / orig_height
|
||||
|
||||
# calculate the existing image aspect ratio
|
||||
target_ratios = set(
|
||||
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
|
||||
i * j <= max_num and i * j >= min_num)
|
||||
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
|
||||
|
||||
# find the closest aspect ratio to the target
|
||||
target_aspect_ratio = find_closest_aspect_ratio(
|
||||
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
|
||||
|
||||
# calculate the target width and height
|
||||
target_width = image_size * target_aspect_ratio[0]
|
||||
target_height = image_size * target_aspect_ratio[1]
|
||||
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
||||
|
||||
# resize the image
|
||||
resized_img = image.resize((target_width, target_height))
|
||||
processed_images = []
|
||||
for i in range(blocks):
|
||||
box = (
|
||||
(i % (target_width // image_size)) * image_size,
|
||||
(i // (target_width // image_size)) * image_size,
|
||||
((i % (target_width // image_size)) + 1) * image_size,
|
||||
((i // (target_width // image_size)) + 1) * image_size
|
||||
)
|
||||
# split the image
|
||||
split_img = resized_img.crop(box)
|
||||
processed_images.append(split_img)
|
||||
assert len(processed_images) == blocks
|
||||
if use_thumbnail and len(processed_images) != 1:
|
||||
thumbnail_img = image.resize((image_size, image_size))
|
||||
processed_images.append(thumbnail_img)
|
||||
return processed_images
|
||||
|
||||
def load_image(image_file, input_size=448, max_num=12):
|
||||
image = Image.open(image_file).convert('RGB')
|
||||
transform = build_transform(input_size=input_size)
|
||||
images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
|
||||
pixel_values = [transform(image) for image in images]
|
||||
pixel_values = torch.stack(pixel_values)
|
||||
return pixel_values
|
||||
|
||||
# https://huggingface.co/OpenGVLab/InternVL2-1B
|
||||
|
||||
def split_list(lst, n):
|
||||
length = len(lst)
|
||||
avg = length // n # 每份的大小
|
||||
result = [] # 存储分割后的子列表
|
||||
for i in range(n - 1):
|
||||
result.append(lst[i*avg:(i+1)*avg])
|
||||
result.append(lst[(n-1)*avg:])
|
||||
return result
|
||||
|
||||
def save_json(json_list,save_path):
|
||||
with open(save_path, 'w') as file:
|
||||
json.dump(json_list, file,indent=4)
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default='OpenGVLab/InternVL2-4B')
|
||||
parser.add_argument("--save_name", type=str, default="internvl2-4B")
|
||||
parser.add_argument("--num_workers", type=int, default=1)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
|
||||
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,
|
||||
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
|
||||
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
|
||||
def eval_worker(args, data, eval_id, output_queue):
|
||||
print(f"Process {eval_id} start.")
|
||||
checkpoint = args.model_path
|
||||
model = AutoModel.from_pretrained(
|
||||
checkpoint,
|
||||
torch_dtype=torch.bfloat16,
|
||||
low_cpu_mem_usage=True,
|
||||
trust_remote_code=True).eval().to(f'cuda:{eval_id}')
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True, use_fast=False)
|
||||
|
||||
for i in tqdm(range(len(data))):
|
||||
img_path = os.path.join(args.image_folder, data[i]['image_path'])
|
||||
qs = data[i]['question']
|
||||
pixel_values = load_image(img_path, max_num=12).to(torch.bfloat16).to(f'cuda:{eval_id}')
|
||||
generation_config = dict(max_new_tokens=1024, do_sample=False)
|
||||
question = f'<image>\n{qs}'
|
||||
response = model.chat(tokenizer, pixel_values, question, generation_config)
|
||||
data[i]['predict'] = response
|
||||
output_queue.put({eval_id: data})
|
||||
print(f"Process {eval_id} has completed.")
|
||||
|
||||
if __name__=="__main__":
|
||||
multiprocessing.set_start_method('spawn')
|
||||
args = _get_args()
|
||||
|
||||
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
|
||||
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
|
||||
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
|
||||
else:
|
||||
data_path = args.OCRBench_file
|
||||
|
||||
with open(data_path, "r") as f:
|
||||
data = json.load(f)
|
||||
|
||||
data_list = split_list(data, args.num_workers)
|
||||
|
||||
output_queue = Manager().Queue()
|
||||
|
||||
pool = Pool(processes=args.num_workers)
|
||||
for i in range(len(data_list)):
|
||||
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
|
||||
pool.close()
|
||||
pool.join()
|
||||
# eval_worker(args, data_list[0], 0, output_queue)
|
||||
|
||||
results = {}
|
||||
while not output_queue.empty():
|
||||
result = output_queue.get()
|
||||
results.update(result)
|
||||
data = []
|
||||
for i in range(len(data_list)):
|
||||
data.extend(results[i])
|
||||
|
||||
for i in range(len(data)):
|
||||
data_type = data[i]["type"]
|
||||
dataset_name = data[i]["dataset_name"]
|
||||
answers = data[i]["answers"]
|
||||
if data[i].get('predict',0)==0:
|
||||
continue
|
||||
predict = data[i]['predict']
|
||||
data[i]['result'] = 0
|
||||
if dataset_name == "HME100k":
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
|
||||
if len(data)==1000:
|
||||
for i in range(len(data)):
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
OCRBench_score[data[i]['type']] += data[i]['result']
|
||||
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
|
||||
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
|
||||
print("###########################OCRBench##############################")
|
||||
print(f"Text Recognition(Total 300):{recognition_score}")
|
||||
print("------------------Details of Recognition Score-------------------")
|
||||
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
|
||||
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
|
||||
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
|
||||
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
|
||||
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
|
||||
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
|
||||
print("----------------------Final Score-------------------------------")
|
||||
print(f"Final Score(Total 1000): {Final_score}")
|
||||
else:
|
||||
for i in range(len(data)):
|
||||
num_all[data[i]['dataset_name']] += 1
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
|
||||
for key in AllDataset_score.keys():
|
||||
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")
|
178
OCRBench/scripts/intervl.py
Normal file
178
OCRBench/scripts/intervl.py
Normal file
@@ -0,0 +1,178 @@
|
||||
import json
|
||||
from argparse import ArgumentParser
|
||||
import torch
|
||||
import os
|
||||
import json
|
||||
from tqdm import tqdm
|
||||
from PIL import Image
|
||||
import math
|
||||
import multiprocessing
|
||||
from multiprocessing import Pool, Queue, Manager
|
||||
from PIL import Image
|
||||
from transformers import AutoModel, CLIPImageProcessor
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
#https://github.com/OpenGVLab/InternVL
|
||||
|
||||
def split_list(lst, n):
|
||||
length = len(lst)
|
||||
avg = length // n # 每份的大小
|
||||
result = [] # 存储分割后的子列表
|
||||
for i in range(n - 1):
|
||||
result.append(lst[i*avg:(i+1)*avg])
|
||||
result.append(lst[(n-1)*avg:])
|
||||
return result
|
||||
|
||||
def save_json(json_list,save_path):
|
||||
with open(save_path, 'w') as file:
|
||||
json.dump(json_list, file,indent=4)
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default='OpenGVLab/InternVL-Chat-Chinese-V1-1')#TODO Set the address of your model's weights
|
||||
parser.add_argument("--save_name", type=str, default="InternVL-Chat-Chinese-V1-1") #TODO Set the name of the JSON file you save in the output_folder.
|
||||
parser.add_argument("--num_workers", type=int, default=1)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
|
||||
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
|
||||
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
|
||||
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
|
||||
def eval_worker(args, data, eval_id, output_queue):
|
||||
print(f"Process {eval_id} start.")
|
||||
checkpoint = args.model_path
|
||||
model = AutoModel.from_pretrained(
|
||||
checkpoint,
|
||||
torch_dtype=torch.bfloat16,
|
||||
low_cpu_mem_usage=True,
|
||||
trust_remote_code=True,
|
||||
device_map='cuda').eval()
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||
|
||||
for i in tqdm(range(len(data))):
|
||||
img_path = os.path.join(args.image_folder, data[i]['image_path'])
|
||||
qs = data[i]['question']
|
||||
image = Image.open(img_path).convert('RGB')
|
||||
image = image.resize((448, 448))
|
||||
image_processor = CLIPImageProcessor.from_pretrained(checkpoint)
|
||||
|
||||
pixel_values = image_processor(images=image, return_tensors='pt').pixel_values
|
||||
pixel_values = pixel_values.to(torch.bfloat16).cuda()
|
||||
|
||||
generation_config = dict(
|
||||
num_beams=1,
|
||||
max_new_tokens=512,
|
||||
do_sample=False,
|
||||
)
|
||||
response = model.chat(tokenizer, pixel_values, qs, generation_config)
|
||||
data[i]['predict'] = response
|
||||
output_queue.put({eval_id: data})
|
||||
print(f"Process {eval_id} has completed.")
|
||||
|
||||
if __name__=="__main__":
|
||||
multiprocessing.set_start_method('spawn')
|
||||
args = _get_args()
|
||||
|
||||
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
|
||||
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
|
||||
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
|
||||
else:
|
||||
data_path = args.OCRBench_file
|
||||
|
||||
with open(data_path, "r") as f:
|
||||
data = json.load(f)
|
||||
|
||||
data_list = split_list(data, args.num_workers)
|
||||
|
||||
output_queue = Manager().Queue()
|
||||
|
||||
pool = Pool(processes=args.num_workers)
|
||||
for i in range(len(data_list)):
|
||||
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
results = {}
|
||||
while not output_queue.empty():
|
||||
result = output_queue.get()
|
||||
results.update(result)
|
||||
data = []
|
||||
for i in range(len(data_list)):
|
||||
data.extend(results[i])
|
||||
|
||||
for i in range(len(data)):
|
||||
data_type = data[i]["type"]
|
||||
dataset_name = data[i]["dataset_name"]
|
||||
answers = data[i]["answers"]
|
||||
if data[i].get('predict',0)==0:
|
||||
continue
|
||||
predict = data[i]['predict']
|
||||
data[i]['result'] = 0
|
||||
if dataset_name == "HME100k":
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
|
||||
if len(data)==1000:
|
||||
for i in range(len(data)):
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
OCRBench_score[data[i]['type']] += data[i]['result']
|
||||
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
|
||||
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
|
||||
print("###########################OCRBench##############################")
|
||||
print(f"Text Recognition(Total 300):{recognition_score}")
|
||||
print("------------------Details of Recognition Score-------------------")
|
||||
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
|
||||
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
|
||||
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
|
||||
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
|
||||
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
|
||||
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
|
||||
print("----------------------Final Score-------------------------------")
|
||||
print(f"Final Score(Total 1000): {Final_score}")
|
||||
else:
|
||||
for i in range(len(data)):
|
||||
num_all[data[i]['dataset_name']] += 1
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
|
||||
for key in AllDataset_score.keys():
|
||||
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")
|
@@ -71,7 +71,7 @@ def save_json(json_list,save_path):
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default="./model_weights/LLaVar")
|
329
OCRBench/scripts/mPLUG-DocOwl15.py
Normal file
329
OCRBench/scripts/mPLUG-DocOwl15.py
Normal file
@@ -0,0 +1,329 @@
|
||||
import json
|
||||
import multiprocessing
|
||||
import os
|
||||
from argparse import ArgumentParser
|
||||
from multiprocessing import Manager, Pool, Queue
|
||||
|
||||
import torch
|
||||
from mplug_docowl.constants import DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX
|
||||
from mplug_docowl.conversation import conv_templates
|
||||
from mplug_docowl.mm_utils import (
|
||||
KeywordsStoppingCriteria,
|
||||
get_model_name_from_path,
|
||||
process_images,
|
||||
tokenizer_image_token,
|
||||
)
|
||||
from mplug_docowl.model.builder import load_pretrained_model
|
||||
from mplug_docowl.processor import DocProcessor
|
||||
from tqdm import tqdm
|
||||
from transformers import TextStreamer
|
||||
|
||||
|
||||
# https://github.com/X-PLUG/mPLUG-DocOwl/blob/main/DocOwl1.5/docowl_infer.py
|
||||
def split_list(lst, n):
|
||||
length = len(lst)
|
||||
avg = length // n # 每份的大小
|
||||
result = [] # 存储分割后的子列表
|
||||
for i in range(n - 1):
|
||||
result.append(lst[i * avg : (i + 1) * avg])
|
||||
result.append(lst[(n - 1) * avg :])
|
||||
return result
|
||||
|
||||
|
||||
def save_json(json_list, save_path):
|
||||
with open(save_path, "w", encoding="utf-8") as file:
|
||||
json.dump(json_list, file, indent=4)
|
||||
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default="mPLUG/DocOwl1.5")
|
||||
parser.add_argument("--save_name", type=str, default="mplug-DocOwl1.5")
|
||||
parser.add_argument("--conv_mode", type=str, default="mplug_owl2")
|
||||
parser.add_argument("--num_workers", type=int, default=8)
|
||||
parser.add_argument("--temperature", type=float, default=0.0)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
OCRBench_score = {
|
||||
"Regular Text Recognition": 0,
|
||||
"Irregular Text Recognition": 0,
|
||||
"Artistic Text Recognition": 0,
|
||||
"Handwriting Recognition": 0,
|
||||
"Digit String Recognition": 0,
|
||||
"Non-Semantic Text Recognition": 0,
|
||||
"Scene Text-centric VQA": 0,
|
||||
"Doc-oriented VQA": 0,
|
||||
"Key Information Extraction": 0,
|
||||
"Handwritten Mathematical Expression Recognition": 0,
|
||||
}
|
||||
AllDataset_score = {
|
||||
"IIIT5K": 0,
|
||||
"svt": 0,
|
||||
"IC13_857": 0,
|
||||
"IC15_1811": 0,
|
||||
"svtp": 0,
|
||||
"ct80": 0,
|
||||
"cocotext": 0,
|
||||
"ctw": 0,
|
||||
"totaltext": 0,
|
||||
"HOST": 0,
|
||||
"WOST": 0,
|
||||
"WordArt": 0,
|
||||
"IAM": 0,
|
||||
"ReCTS": 0,
|
||||
"ORAND": 0,
|
||||
"NonSemanticText": 0,
|
||||
"SemanticText": 0,
|
||||
"STVQA": 0,
|
||||
"textVQA": 0,
|
||||
"ocrVQA": 0,
|
||||
"ESTVQA": 0,
|
||||
"ESTVQA_cn": 0,
|
||||
"docVQA": 0,
|
||||
"infographicVQA": 0,
|
||||
"ChartQA": 0,
|
||||
"ChartQA_Human": 0,
|
||||
"FUNSD": 0,
|
||||
"SROIE": 0,
|
||||
"POIE": 0,
|
||||
"HME100k": 0,
|
||||
}
|
||||
num_all = {
|
||||
"IIIT5K": 0,
|
||||
"svt": 0,
|
||||
"IC13_857": 0,
|
||||
"IC15_1811": 0,
|
||||
"svtp": 0,
|
||||
"ct80": 0,
|
||||
"cocotext": 0,
|
||||
"ctw": 0,
|
||||
"totaltext": 0,
|
||||
"HOST": 0,
|
||||
"WOST": 0,
|
||||
"WordArt": 0,
|
||||
"IAM": 0,
|
||||
"ReCTS": 0,
|
||||
"ORAND": 0,
|
||||
"NonSemanticText": 0,
|
||||
"SemanticText": 0,
|
||||
"STVQA": 0,
|
||||
"textVQA": 0,
|
||||
"ocrVQA": 0,
|
||||
"ESTVQA": 0,
|
||||
"ESTVQA_cn": 0,
|
||||
"docVQA": 0,
|
||||
"infographicVQA": 0,
|
||||
"ChartQA": 0,
|
||||
"ChartQA_Human": 0,
|
||||
"FUNSD": 0,
|
||||
"SROIE": 0,
|
||||
"POIE": 0,
|
||||
"HME100k": 0,
|
||||
}
|
||||
|
||||
|
||||
def eval_worker(args, data, eval_id, output_queue):
|
||||
print(f"Process {eval_id} start.")
|
||||
model_name = get_model_name_from_path(args.model_path)
|
||||
tokenizer, model, _, _ = load_pretrained_model(
|
||||
args.model_path,
|
||||
None,
|
||||
model_name,
|
||||
load_8bit=False,
|
||||
load_4bit=False,
|
||||
device=f"cuda:{eval_id}",
|
||||
)
|
||||
|
||||
doc_image_processor = DocProcessor(
|
||||
image_size=448,
|
||||
anchors="grid_9",
|
||||
add_global_img=True,
|
||||
add_textual_crop_indicator=True,
|
||||
)
|
||||
|
||||
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
||||
|
||||
for i in tqdm(range(len(data))):
|
||||
img_path = os.path.join(args.image_folder, data[i]["image_path"])
|
||||
qs = data[i]["question"]
|
||||
if data[i].get("predict", 0) != 0:
|
||||
print(f"{img_path} predict exist, continue.")
|
||||
continue
|
||||
|
||||
image_tensor, patch_positions, text = doc_image_processor(
|
||||
images=img_path, query="<|image|>" + qs
|
||||
)
|
||||
image_tensor = image_tensor.to(model.device, dtype=torch.float16)
|
||||
patch_positions = patch_positions.to(model.device)
|
||||
|
||||
conv = conv_templates["mplug_owl2"].copy()
|
||||
conv.append_message(conv.roles[0], text)
|
||||
conv.append_message(conv.roles[1], None)
|
||||
prompt = conv.get_prompt()
|
||||
|
||||
input_ids = (
|
||||
tokenizer_image_token(
|
||||
prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt"
|
||||
)
|
||||
.unsqueeze(0)
|
||||
.to(model.device)
|
||||
)
|
||||
|
||||
stop_str = conv.sep2
|
||||
keywords = [stop_str]
|
||||
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
|
||||
with torch.inference_mode():
|
||||
output_ids = model.generate(
|
||||
input_ids,
|
||||
images=image_tensor,
|
||||
patch_positions=patch_positions,
|
||||
do_sample=False,
|
||||
temperature=1.0,
|
||||
max_new_tokens=512,
|
||||
streamer=streamer,
|
||||
use_cache=True,
|
||||
stopping_criteria=[stopping_criteria],
|
||||
)
|
||||
|
||||
outputs = tokenizer.decode(output_ids[0, input_ids.shape[1] :]).strip()
|
||||
data[i]["predict"] = outputs
|
||||
output_queue.put({eval_id: data})
|
||||
print(f"Process {eval_id} has completed.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
multiprocessing.set_start_method("spawn")
|
||||
args = _get_args()
|
||||
|
||||
if os.path.exists(os.path.join(args.output_folder, f"{args.save_name}.json")):
|
||||
data_path = os.path.join(args.output_folder, f"{args.save_name}.json")
|
||||
print(
|
||||
f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}."
|
||||
)
|
||||
else:
|
||||
data_path = args.OCRBench_file
|
||||
|
||||
with open(data_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
|
||||
data_list = split_list(data, args.num_workers)
|
||||
output_queue = Manager().Queue()
|
||||
|
||||
pool = Pool(processes=args.num_workers)
|
||||
for i in range(len(data_list)):
|
||||
# pool.apply(eval_worker, args=(args, data_list[i], i, output_queue))
|
||||
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
results = {}
|
||||
while not output_queue.empty():
|
||||
result = output_queue.get()
|
||||
results.update(result)
|
||||
data = []
|
||||
for i in range(len(data_list)):
|
||||
data.extend(results[i])
|
||||
|
||||
for i in range(len(data)):
|
||||
data_type = data[i]["type"]
|
||||
dataset_name = data[i]["dataset_name"]
|
||||
answers = data[i]["answers"]
|
||||
if data[i].get("predict", 0) == 0:
|
||||
continue
|
||||
predict = data[i]["predict"]
|
||||
data[i]["result"] = 0
|
||||
if dataset_name == "HME100k":
|
||||
if type(answers) == list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].strip().replace("\n", " ").replace(" ", "")
|
||||
predict = predict.strip().replace("\n", " ").replace(" ", "")
|
||||
if answer in predict:
|
||||
data[i]["result"] = 1
|
||||
else:
|
||||
answers = answers.strip().replace("\n", " ").replace(" ", "")
|
||||
predict = predict.strip().replace("\n", " ").replace(" ", "")
|
||||
if answers in predict:
|
||||
data[i]["result"] = 1
|
||||
else:
|
||||
if type(answers) == list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].lower().strip().replace("\n", " ")
|
||||
predict = predict.lower().strip().replace("\n", " ")
|
||||
if answer in predict:
|
||||
data[i]["result"] = 1
|
||||
else:
|
||||
answers = answers.lower().strip().replace("\n", " ")
|
||||
predict = predict.lower().strip().replace("\n", " ")
|
||||
if answers in predict:
|
||||
data[i]["result"] = 1
|
||||
save_json(data, os.path.join(args.output_folder, f"{args.save_name}.json"))
|
||||
if len(data) == 1000:
|
||||
for i in range(len(data)):
|
||||
if data[i].get("result", 100) == 100:
|
||||
continue
|
||||
OCRBench_score[data[i]["type"]] += data[i]["result"]
|
||||
recognition_score = (
|
||||
OCRBench_score["Regular Text Recognition"]
|
||||
+ OCRBench_score["Irregular Text Recognition"]
|
||||
+ OCRBench_score["Artistic Text Recognition"]
|
||||
+ OCRBench_score["Handwriting Recognition"]
|
||||
+ OCRBench_score["Digit String Recognition"]
|
||||
+ OCRBench_score["Non-Semantic Text Recognition"]
|
||||
)
|
||||
Final_score = (
|
||||
recognition_score
|
||||
+ OCRBench_score["Scene Text-centric VQA"]
|
||||
+ OCRBench_score["Doc-oriented VQA"]
|
||||
+ OCRBench_score["Key Information Extraction"]
|
||||
+ OCRBench_score["Handwritten Mathematical Expression Recognition"]
|
||||
)
|
||||
print("###########################OCRBench##############################")
|
||||
print(f"Text Recognition(Total 300):{recognition_score}")
|
||||
print("------------------Details of Recognition Score-------------------")
|
||||
print(
|
||||
f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}"
|
||||
)
|
||||
print(
|
||||
f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}"
|
||||
)
|
||||
print(
|
||||
f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}"
|
||||
)
|
||||
print(
|
||||
f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}"
|
||||
)
|
||||
print(
|
||||
f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}"
|
||||
)
|
||||
print(
|
||||
f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}"
|
||||
)
|
||||
print("----------------------------------------------------------------")
|
||||
print(
|
||||
f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}"
|
||||
)
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(
|
||||
f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}"
|
||||
)
|
||||
print("----------------------------------------------------------------")
|
||||
print(
|
||||
f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}"
|
||||
)
|
||||
print("----------------------Final Score-------------------------------")
|
||||
print(f"Final Score(Total 1000): {Final_score}")
|
||||
else:
|
||||
for i in range(len(data)):
|
||||
num_all[data[i]["dataset_name"]] += 1
|
||||
if data[i].get("result", 100) == 100:
|
||||
continue
|
||||
AllDataset_score[data[i]["dataset_name"]] += data[i]["result"]
|
||||
for key in AllDataset_score.keys():
|
||||
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")
|
@@ -31,7 +31,7 @@ def save_json(json_list,save_path):
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default="./model_weights/mplug-owl")
|
@@ -31,7 +31,7 @@ def save_json(json_list,save_path):
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default="./model_weights/mplug-owl2")
|
@@ -33,7 +33,7 @@ def save_json(json_list,save_path):
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--cfg-path", default='./scripts/MiniGPT-4/eval_configs/minigptv2_eval.yaml')
|
@@ -28,7 +28,7 @@ def save_json(json_list,save_path):
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default="echo840/Monkey")
|
||||
@@ -38,7 +38,7 @@ def _get_args():
|
||||
return args
|
||||
|
||||
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
|
||||
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
|
||||
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,
|
||||
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
|
||||
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
@@ -48,7 +48,7 @@ num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"coco
|
||||
def eval_worker(args, data, eval_id, output_queue):
|
||||
print(f"Process {eval_id} start.")
|
||||
checkpoint = args.model_path
|
||||
model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map='cuda', trust_remote_code=True).eval()
|
||||
model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map=f'cuda:{eval_id}', trust_remote_code=True).eval()
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
|
||||
tokenizer.padding_side = 'left'
|
||||
tokenizer.pad_token_id = tokenizer.eod_id
|
181
OCRBench/scripts/qwenvl.py
Normal file
181
OCRBench/scripts/qwenvl.py
Normal file
@@ -0,0 +1,181 @@
|
||||
import json
|
||||
from argparse import ArgumentParser
|
||||
import torch
|
||||
import os
|
||||
import json
|
||||
from tqdm import tqdm
|
||||
from PIL import Image
|
||||
import math
|
||||
import multiprocessing
|
||||
from multiprocessing import Pool, Queue, Manager
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
# https://github.com/QwenLM/Qwen-VL/blob/master/eval_mm/evaluate_vqa.py
|
||||
def split_list(lst, n):
|
||||
length = len(lst)
|
||||
avg = length // n # 每份的大小
|
||||
result = [] # 存储分割后的子列表
|
||||
for i in range(n - 1):
|
||||
result.append(lst[i*avg:(i+1)*avg])
|
||||
result.append(lst[(n-1)*avg:])
|
||||
return result
|
||||
|
||||
def save_json(json_list,save_path):
|
||||
with open(save_path, 'w') as file:
|
||||
json.dump(json_list, file,indent=4)
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./OCRBench_Images")
|
||||
parser.add_argument("--output_folder", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model_path", type=str, default="Qwen/Qwen-VL")
|
||||
parser.add_argument("--save_name", type=str, default="qwenvl")
|
||||
parser.add_argument("--num_workers", type=int, default=1)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
|
||||
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,
|
||||
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
|
||||
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
|
||||
def eval_worker(args, data, eval_id, output_queue):
|
||||
print(f"Process {eval_id} start.")
|
||||
checkpoint = args.model_path
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
checkpoint, device_map=f'cuda:{eval_id}', trust_remote_code=True).eval()
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint,
|
||||
trust_remote_code=True)
|
||||
tokenizer.padding_side = 'left'
|
||||
tokenizer.pad_token_id = tokenizer.eod_id
|
||||
|
||||
for i in tqdm(range(len(data))):
|
||||
img_path = os.path.join(args.image_folder, data[i]['image_path'])
|
||||
qs = data[i]['question']
|
||||
# query = f'<img>{img_path}</img> {qs} Answer: '
|
||||
query = f'<img>{img_path}</img>{qs} Answer:'
|
||||
input_ids = tokenizer(query, return_tensors='pt', padding='longest')
|
||||
attention_mask = input_ids.attention_mask
|
||||
input_ids = input_ids.input_ids
|
||||
|
||||
pred = model.generate(
|
||||
input_ids=input_ids.to(f'cuda:{eval_id}'),
|
||||
attention_mask=attention_mask.to(f'cuda:{eval_id}'),
|
||||
do_sample=False,
|
||||
num_beams=1,
|
||||
max_new_tokens=100,
|
||||
min_new_tokens=1,
|
||||
length_penalty=1,
|
||||
num_return_sequences=1,
|
||||
output_hidden_states=True,
|
||||
use_cache=True,
|
||||
pad_token_id=tokenizer.eod_id,
|
||||
eos_token_id=tokenizer.eod_id,
|
||||
)
|
||||
response = tokenizer.decode(pred[0][input_ids.size(1):].cpu(), skip_special_tokens=True).strip()
|
||||
data[i]['predict'] = response
|
||||
output_queue.put({eval_id: data})
|
||||
print(f"Process {eval_id} has completed.")
|
||||
|
||||
if __name__=="__main__":
|
||||
multiprocessing.set_start_method('spawn')
|
||||
args = _get_args()
|
||||
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
|
||||
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
|
||||
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
|
||||
else:
|
||||
data_path = args.OCRBench_file
|
||||
|
||||
with open(data_path, "r") as f:
|
||||
data = json.load(f)
|
||||
|
||||
data_list = split_list(data, args.num_workers)
|
||||
|
||||
output_queue = Manager().Queue()
|
||||
|
||||
pool = Pool(processes=args.num_workers)
|
||||
for i in range(len(data_list)):
|
||||
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
results = {}
|
||||
while not output_queue.empty():
|
||||
result = output_queue.get()
|
||||
results.update(result)
|
||||
data = []
|
||||
for i in range(len(data_list)):
|
||||
data.extend(results[i])
|
||||
|
||||
for i in range(len(data)):
|
||||
data_type = data[i]["type"]
|
||||
dataset_name = data[i]["dataset_name"]
|
||||
answers = data[i]["answers"]
|
||||
if data[i].get('predict',0)==0:
|
||||
continue
|
||||
predict = data[i]['predict']
|
||||
data[i]['result'] = 0
|
||||
if dataset_name == "HME100k":
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
|
||||
if len(data)==1000:
|
||||
for i in range(len(data)):
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
OCRBench_score[data[i]['type']] += data[i]['result']
|
||||
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
|
||||
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
|
||||
print("###########################OCRBench##############################")
|
||||
print(f"Text Recognition(Total 300):{recognition_score}")
|
||||
print("------------------Details of Recognition Score-------------------")
|
||||
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
|
||||
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
|
||||
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
|
||||
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
|
||||
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
|
||||
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
|
||||
print("----------------------Final Score-------------------------------")
|
||||
print(f"Final Score(Total 1000): {Final_score}")
|
||||
else:
|
||||
for i in range(len(data)):
|
||||
num_all[data[i]['dataset_name']] += 1
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
|
||||
for key in AllDataset_score.keys():
|
||||
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")
|
138
OCRBench/scripts/qwenvl_api.py
Normal file
138
OCRBench/scripts/qwenvl_api.py
Normal file
@@ -0,0 +1,138 @@
|
||||
import pathlib
|
||||
from argparse import ArgumentParser
|
||||
import json
|
||||
from tqdm import tqdm
|
||||
import os
|
||||
import sys
|
||||
from http import HTTPStatus
|
||||
from dashscope import MultiModalConversation
|
||||
import time
|
||||
# You should follow the instructions here befor strat: https://help.aliyun.com/zh/dashscope/developer-reference/vl-plus-quick-start
|
||||
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
|
||||
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,
|
||||
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
|
||||
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
|
||||
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
|
||||
def save_json(json_list,save_path):
|
||||
with open(save_path, 'w') as file:
|
||||
json.dump(json_list, file,indent=4)
|
||||
|
||||
def call_with_local_file(img_path, question, model_name):
|
||||
"""Sample of use local file.
|
||||
linux&mac file schema: file:///home/images/test.png
|
||||
windows file schema: file://D:/images/abc.png
|
||||
"""
|
||||
local_file_path1 = f'file://{img_path}'
|
||||
messages = [{
|
||||
'role': 'system',
|
||||
'content': [{
|
||||
'text': 'You are a helpful assistant.'
|
||||
}]
|
||||
}, {
|
||||
'role':
|
||||
'user',
|
||||
'content': [
|
||||
{
|
||||
'image': local_file_path1
|
||||
},
|
||||
{
|
||||
'text': question
|
||||
},
|
||||
]
|
||||
}]
|
||||
response = MultiModalConversation.call(model=model_name, messages=messages)
|
||||
# time.sleep(2) #For qwenvl-max you may need to add this line to avoid the limits.
|
||||
print(response)
|
||||
return response['output']['choices'][0]["message"]['content'][0]['text']
|
||||
|
||||
|
||||
def _get_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--image_folder", type=str, default="./data")
|
||||
parser.add_argument("--output_path", type=str, default="./results")
|
||||
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
|
||||
parser.add_argument("--model", type=str, default="qwen-vl-max")
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = _get_args()
|
||||
if os.path.exists(os.path.join(args.output_path,f"{args.model}.json")):
|
||||
data_path = os.path.join(args.output_path,f"{args.model}.json")
|
||||
else:
|
||||
data_path = args.OCRBench_file
|
||||
with open(data_path, "r") as f:
|
||||
data = json.load(f)
|
||||
for i in tqdm(range(len(data))):
|
||||
img_path = os.path.join(args.image_folder, data[i]['image_path'])
|
||||
question = data[i]['question']
|
||||
if data[i].get("predict", 0)!=0:
|
||||
print(f"{img_path} predict exist, continue.")
|
||||
continue
|
||||
try:
|
||||
response = call_with_local_file(img_path, question, args.model)
|
||||
data[i]['predict'] = response
|
||||
except:
|
||||
print("QwenVL api failed")
|
||||
save_json(data, os.path.join(args.output_path,f"{args.model}.json"))
|
||||
for i in range(len(data)):
|
||||
data_type = data[i]["type"]
|
||||
dataset_name = data[i]["dataset_name"]
|
||||
answers = data[i]["answers"]
|
||||
if data[i].get('predict',0)==0:
|
||||
continue
|
||||
predict = data[i]['predict']
|
||||
data[i]['result'] = 0
|
||||
if dataset_name == "HME100k":
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answer in predict:
|
||||
data[i]['result'] = 1
|
||||
else:
|
||||
answers = answers.lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if answers in predict:
|
||||
data[i]['result'] = 1
|
||||
save_json(data, os.path.join(args.output_path,f"{args.model}.json"))
|
||||
for i in range(len(data)):
|
||||
if data[i].get("result",100)==100:
|
||||
continue
|
||||
OCRBench_score[data[i]['type']] += data[i]['result']
|
||||
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
|
||||
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
|
||||
print("###########################OCRBench##############################")
|
||||
print(f"Text Recognition(Total 300):{recognition_score}")
|
||||
print("------------------Details of Recognition Score-------------------")
|
||||
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
|
||||
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
|
||||
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
|
||||
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
|
||||
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
|
||||
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
|
||||
print("----------------------------------------------------------------")
|
||||
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
|
||||
print("----------------------Final Score-------------------------------")
|
||||
print(f"Final Score(Total 1000): {Final_score}")
|
94
OCRBench_v2/README.md
Normal file
94
OCRBench_v2/README.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# OCRBench v2: An Improved Benchmark for Evaluating Large Multimodal Models on Visual Text Localization and Reasoning
|
||||
|
||||
> Scoring the Optical Character Recognition (OCR) capabilities of Large Multimodal Models (LMMs) has witnessed growing interest recently. Existing benchmarks have highlighted the impressive performance of LMMs in text recognition; however, their abilities in certain challenging tasks, such as text localization, handwritten content extraction, and logical reasoning, remain underexplored. To bridge this gap, we introduce OCRBench v2, a large-scale bilingual text-centric benchmark with currently the most comprehensive set of tasks (4X more tasks than the previous multi-scene benchmark OCRBench), the widest coverage of scenarios (31 diverse scenarios including street scene, receipt, formula, diagram, and so on), and thorough evaluation metrics, with a total of 10,000 human-verified question-answering pairs and a high proportion of difficult samples. After carefully benchmarking state-of-the-art LMMs on OCRBench v2, we find that 36 out of 38 LMMs score below 50 (100 in total) and suffer from five-type limitations, including less frequently encountered text recognition, fine-grained perception, layout perception, complex element parsing, and logical reasoning.
|
||||
|
||||
**[Project Page](https://github.com/Yuliang-Liu/MultimodalOCR)** | **[Paper](https://arxiv.org/abs/2501.00321)** | **[OCRBench v2 Leaderboard](https://huggingface.co/spaces/ling99/OCRBench-v2-leaderboard)**
|
||||
|
||||
<p align="center">
|
||||
<img src="https://v1.ax1x.com/2024/12/30/7VhCnP.jpg" width="88%" height="80%">
|
||||
<p>
|
||||
|
||||
# Data
|
||||
You can download OCRBench v2 from [Google Drive](https://drive.google.com/file/d/1Hk1TMu--7nr5vJ7iaNwMQZ_Iw9W_KI3C/view?usp=sharing)
|
||||
After downloading and extracting the dataset, the directory structure is as follows:
|
||||
```
|
||||
OCRBench_v2/
|
||||
├── EN_part/
|
||||
├── CN_part/
|
||||
├── OCRBench_v2.json
|
||||
```
|
||||
# Evaluation
|
||||
|
||||
## Environment
|
||||
All Python dependencies required for the evaluation process are specified in the **requirements.txt**.
|
||||
To set up the environment, simply run the following commands in the project directory:
|
||||
```python
|
||||
conda create -n ocrbench_v2 python==3.10 -y
|
||||
conda activate ocrbench_v2
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Inference
|
||||
To evaluate the model's performance on OCRBench v2, please save the model's inference results in the JSON file within the `predict` field.
|
||||
<br>
|
||||
Example structure of the JSON file:
|
||||
|
||||
```json
|
||||
{
|
||||
[
|
||||
"dataset_name": "xx",
|
||||
"type": "xx",
|
||||
"id": 0,
|
||||
"image_path": "xx",
|
||||
"question": "xx",
|
||||
"answers": [
|
||||
"xx"
|
||||
],
|
||||
"predict": "xx"
|
||||
]
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Evaluation Scripts
|
||||
After obtaining the inference results from the model, you can use the following scripts to calculate the final score for OCRBench v2. For example, `./pred_folder/internvl2_5_26b.json` contains sample inference results generated by InternVL2.5-26B using [VLMEvalKit](https://github.com/open-compass/VLMEvalKit). To compute the score for each sample, you can use the script `./eval_scripts/eval.py`. The results will be saved in the `./res_folder`.
|
||||
|
||||
```python
|
||||
python ./eval_scripts/eval.py --input_path ./pred_folder/internvl2_5_26b.json --output_path ./res_folder/internvl2_5_26b.json
|
||||
```
|
||||
|
||||
Once the scores for all samples have been calculated, you can use the script `./eval_scripts/get_score.py` to compute the overall metrics for OCRBench v2.
|
||||
|
||||
```python
|
||||
python ./eval_scripts/get_score.py --json_file ./res_folder/internvl2_5_26b.json
|
||||
```
|
||||
|
||||
# Leaderboard
|
||||
|
||||
## Performance of LMMs on English subsets
|
||||
|
||||
<p align="center">
|
||||
<img src="https://v1.ax1x.com/2024/12/30/7VGFm4.png" width="88%" height="60%">
|
||||
<p>
|
||||
|
||||
## Performance of LMMs on Chinese subsets
|
||||
|
||||
<p align="center">
|
||||
<img src="https://v1.ax1x.com/2024/12/30/7VGZ8h.png" width="88%" height="60%">
|
||||
<p>
|
||||
|
||||
# Copyright Statement
|
||||
The data are collected from public datasets and community user contributions. This dataset is for research purposes only and not for commercial use. If you have any copyright concerns, please contact ling_fu@hust.edu.cn.
|
||||
|
||||
# Citation
|
||||
```BibTeX
|
||||
@misc{fu2024ocrbenchv2improvedbenchmark,
|
||||
title={OCRBench v2: An Improved Benchmark for Evaluating Large Multimodal Models on Visual Text Localization and Reasoning},
|
||||
author={Ling Fu and Biao Yang and Zhebin Kuang and Jiajun Song and Yuzhe Li and Linghao Zhu and Qidi Luo and Xinyu Wang and Hao Lu and Mingxin Huang and Zhang Li and Guozhi Tang and Bin Shan and Chunhui Lin and Qi Liu and Binghong Wu and Hao Feng and Hao Liu and Can Huang and Jingqun Tang and Wei Chen and Lianwen Jin and Yuliang Liu and Xiang Bai},
|
||||
year={2024},
|
||||
eprint={2501.00321},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={cs.CV},
|
||||
url={https://arxiv.org/abs/2501.00321},
|
||||
}
|
||||
```
|
91
OCRBench_v2/eval_scripts/IoUscore_metric.py
Normal file
91
OCRBench_v2/eval_scripts/IoUscore_metric.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import os
|
||||
import re
|
||||
import ast
|
||||
import ipdb
|
||||
from vqa_metric import vqa_evaluation
|
||||
|
||||
|
||||
def calculate_iou(box1, box2):
|
||||
|
||||
try:
|
||||
box1 = [int(coordinate) for coordinate in box1]
|
||||
box2 = [int(coordinate) for coordinate in box2]
|
||||
except:
|
||||
return 0
|
||||
|
||||
x1_inter = max(box1[0], box2[0])
|
||||
y1_inter = max(box1[1], box2[1])
|
||||
x2_inter = min(box1[2], box2[2])
|
||||
y2_inter = min(box1[3], box2[3])
|
||||
|
||||
inter_area = max(0, x2_inter - x1_inter) * max(0, y2_inter - y1_inter)
|
||||
|
||||
box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
|
||||
box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
|
||||
|
||||
union_area = box1_area + box2_area - inter_area
|
||||
|
||||
iou = inter_area / union_area if union_area != 0 else 0
|
||||
|
||||
return iou
|
||||
|
||||
|
||||
def vqa_with_position_evaluation(predict, img_metas):
|
||||
|
||||
score_content, score_bbox = .0, .0
|
||||
if "answer" in predict.keys():
|
||||
score_content = vqa_evaluation(predict["answer"], img_metas["answers"])
|
||||
if "bbox" in predict.keys():
|
||||
gt_bbox = img_metas["bbox"]
|
||||
try:
|
||||
predict_bbox_list = ast.literal_eval(predict["bbox"])
|
||||
score_bbox = calculate_iou(predict_bbox_list, gt_bbox)
|
||||
except:
|
||||
score_bbox = 0
|
||||
return 0.5 * score_content + 0.5 * score_bbox
|
||||
|
||||
|
||||
def extract_coordinates(text):
|
||||
# Regex pattern to match coordinates in either (x1, y1, x2, y2) or [x1, y1, x2, y2] format
|
||||
|
||||
pattern = r'[\(\[]\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*[\)\]]'
|
||||
|
||||
matches = list(re.finditer(pattern, text))
|
||||
coords_list = []
|
||||
coords_set = set()
|
||||
for match in matches:
|
||||
|
||||
x1, y1, x2, y2 = map(int, match.groups())
|
||||
|
||||
if all(0 <= n <= 1000 for n in [x1, y1, x2, y2]):
|
||||
coords = (x1, y1, x2, y2)
|
||||
|
||||
if coords in coords_set:
|
||||
coords_list = [c for c in coords_list if c != coords]
|
||||
|
||||
coords_list.append(coords)
|
||||
coords_set.add(coords)
|
||||
if coords_list:
|
||||
last_coords = coords_list[-1]
|
||||
return list(last_coords)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
print("Example for Text Grounding task.")
|
||||
box1 = [50, 50, 150, 150]
|
||||
box2 = [60, 60, 140, 140]
|
||||
iou_score = calculate_iou(box1, box2)
|
||||
print(f"IoU score: {iou_score}")
|
||||
|
||||
print("Example for VQA with position task.")
|
||||
pred = {"content": "The content is Hello Buddies", "bbox": box1}
|
||||
gt = {"content": "Hello Buddies", "bbox": box2}
|
||||
|
||||
vqa_score = vqa_evaluation(pred["content"], gt["content"])
|
||||
iou_score = calculate_iou(pred["bbox"], gt["bbox"])
|
||||
|
||||
print(f"VQA score: {vqa_score}")
|
||||
print(f"IoU score: {iou_score}")
|
931
OCRBench_v2/eval_scripts/TEDS_metric.py
Normal file
931
OCRBench_v2/eval_scripts/TEDS_metric.py
Normal file
@@ -0,0 +1,931 @@
|
||||
# Copyright 2020 IBM
|
||||
# Author: peter.zhong@au1.ibm.com
|
||||
#
|
||||
# This is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the Apache 2.0 License.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# Apache 2.0 License for more details.
|
||||
|
||||
import re
|
||||
import ast
|
||||
import json
|
||||
import ipdb
|
||||
import distance
|
||||
from apted import APTED, Config
|
||||
from itertools import product
|
||||
from apted.helpers import Tree
|
||||
from lxml import etree, html
|
||||
from collections import deque
|
||||
from parallel import parallel_process
|
||||
from tqdm import tqdm
|
||||
from zss import simple_distance, Node
|
||||
import string
|
||||
from typing import Any, Callable, Optional, Sequence
|
||||
import numpy as np
|
||||
import Levenshtein
|
||||
import editdistance
|
||||
|
||||
|
||||
class TableTree(Tree):
|
||||
def __init__(self, tag, colspan=None, rowspan=None, content=None, *children):
|
||||
self.tag = tag
|
||||
self.colspan = colspan
|
||||
self.rowspan = rowspan
|
||||
self.content = content
|
||||
self.children = list(children)
|
||||
|
||||
def bracket(self):
|
||||
"""Show tree using brackets notation"""
|
||||
if self.tag == 'td':
|
||||
result = '"tag": %s, "colspan": %d, "rowspan": %d, "text": %s' % \
|
||||
(self.tag, self.colspan, self.rowspan, self.content)
|
||||
else:
|
||||
result = '"tag": %s' % self.tag
|
||||
for child in self.children:
|
||||
result += child.bracket()
|
||||
return "{{{}}}".format(result)
|
||||
|
||||
|
||||
class CustomConfig(Config):
|
||||
@staticmethod
|
||||
def maximum(*sequences):
|
||||
"""Get maximum possible value
|
||||
"""
|
||||
return max(map(len, sequences))
|
||||
|
||||
def normalized_distance(self, *sequences):
|
||||
"""Get distance from 0 to 1
|
||||
"""
|
||||
return float(distance.levenshtein(*sequences)) / self.maximum(*sequences)
|
||||
|
||||
def rename(self, node1, node2):
|
||||
"""Compares attributes of trees"""
|
||||
if (node1.tag != node2.tag) or (node1.colspan != node2.colspan) or (node1.rowspan != node2.rowspan):
|
||||
return 1.
|
||||
if node1.tag == 'td':
|
||||
if node1.content or node2.content:
|
||||
return self.normalized_distance(node1.content, node2.content)
|
||||
return 0.
|
||||
|
||||
|
||||
class TEDS(object):
|
||||
''' Tree Edit Distance basead Similarity
|
||||
'''
|
||||
def __init__(self, structure_only=False, n_jobs=1, ignore_nodes=None):
|
||||
assert isinstance(n_jobs, int) and (n_jobs >= 1), 'n_jobs must be an integer greather than 1'
|
||||
self.structure_only = structure_only
|
||||
self.n_jobs = n_jobs
|
||||
self.ignore_nodes = ignore_nodes
|
||||
self.__tokens__ = []
|
||||
|
||||
def tokenize(self, node):
|
||||
''' Tokenizes table cells
|
||||
'''
|
||||
self.__tokens__.append('<%s>' % node.tag)
|
||||
if node.text is not None:
|
||||
self.__tokens__ += list(node.text)
|
||||
for n in node.getchildren():
|
||||
self.tokenize(n)
|
||||
if node.tag != 'unk':
|
||||
self.__tokens__.append('</%s>' % node.tag)
|
||||
if node.tag != 'td' and node.tail is not None:
|
||||
self.__tokens__ += list(node.tail)
|
||||
|
||||
def load_html_tree(self, node, parent=None):
|
||||
''' Converts HTML tree to the format required by apted
|
||||
'''
|
||||
global __tokens__
|
||||
if node.tag == 'td':
|
||||
if self.structure_only:
|
||||
cell = []
|
||||
else:
|
||||
self.__tokens__ = []
|
||||
self.tokenize(node)
|
||||
cell = self.__tokens__[1:-1].copy()
|
||||
new_node = TableTree(node.tag,
|
||||
int(node.attrib.get('colspan', '1')),
|
||||
int(node.attrib.get('rowspan', '1')),
|
||||
cell, *deque())
|
||||
else:
|
||||
new_node = TableTree(node.tag, None, None, None, *deque())
|
||||
if parent is not None:
|
||||
parent.children.append(new_node)
|
||||
if node.tag != 'td':
|
||||
for n in node.getchildren():
|
||||
self.load_html_tree(n, new_node)
|
||||
if parent is None:
|
||||
return new_node
|
||||
|
||||
def evaluate(self, pred, true):
|
||||
''' Computes TEDS score between the prediction and the ground truth of a
|
||||
given sample
|
||||
'''
|
||||
if (not pred) or (not true):
|
||||
return 0.0
|
||||
parser = html.HTMLParser(remove_comments=True, encoding='utf-8')
|
||||
pred = html.fromstring(pred, parser=parser)
|
||||
true = html.fromstring(true, parser=parser)
|
||||
#print("pred:",pred)
|
||||
#print("true:",true)
|
||||
if pred.xpath('body/table') and true.xpath('body/table'):
|
||||
pred = pred.xpath('body/table')[0]
|
||||
true = true.xpath('body/table')[0]
|
||||
if self.ignore_nodes:
|
||||
etree.strip_tags(pred, *self.ignore_nodes)
|
||||
etree.strip_tags(true, *self.ignore_nodes)
|
||||
n_nodes_pred = len(pred.xpath(".//*"))
|
||||
n_nodes_true = len(true.xpath(".//*"))
|
||||
n_nodes = max(n_nodes_pred, n_nodes_true)
|
||||
tree_pred = self.load_html_tree(pred)
|
||||
tree_true = self.load_html_tree(true)
|
||||
distance = APTED(tree_pred, tree_true, CustomConfig()).compute_edit_distance()
|
||||
return 1.0 - (float(distance) / n_nodes)
|
||||
else:
|
||||
return 0.0
|
||||
|
||||
def batch_evaluate(self, pred_json, true_json):
|
||||
''' Computes TEDS score between the prediction and the ground truth of
|
||||
a batch of samples
|
||||
@params pred_json: {'FILENAME': 'HTML CODE', ...}
|
||||
@params true_json: {'FILENAME': {'html': 'HTML CODE'}, ...}
|
||||
@output: {'FILENAME': 'TEDS SCORE', ...}
|
||||
'''
|
||||
samples = true_json.keys()
|
||||
if self.n_jobs == 1:
|
||||
scores = [self.evaluate(pred_json.get(filename, ''), true_json[filename]['html']) for filename in tqdm(samples)]
|
||||
else:
|
||||
#inputs = [{'pred': pred_json.get(filename, ''), 'true': true_json[filename]['html']} for filename in samples]
|
||||
inputs = [{'pred': pred_json.get(filename, ''), 'true': true_json[filename]} for filename in samples]
|
||||
scores = parallel_process(inputs, self.evaluate, use_kwargs=True, n_jobs=self.n_jobs, front_num=1)
|
||||
scores = dict(zip(samples, scores))
|
||||
return scores
|
||||
|
||||
|
||||
def convert_table_to_html_str(table_row_list=[]):
|
||||
"""
|
||||
Given a list of table rows, build the corresponding html string, which is used to compute the TEDS score.
|
||||
We use the official code of PubTabNet to compute TEDS score, it does not consider '<th>' label.
|
||||
We also remove unneccessary spaces within a table cell and extra '\n' as they will influence the TEDS score.
|
||||
"""
|
||||
html_table_str = "<html><body><table>" + '\n'
|
||||
for data_row in table_row_list:
|
||||
html_table_str += "<tr>"
|
||||
for cell_str in data_row:
|
||||
html_table_str += f"<td>{cell_str}</td>"
|
||||
html_table_str += "</tr>"
|
||||
html_table_str += '\n'
|
||||
html_table_str += "</table></body></html>"
|
||||
html_table_str = html_table_str.replace('\n','')
|
||||
return html_table_str
|
||||
|
||||
|
||||
def convert_markdown_table_to_html(markdown_table):
|
||||
"""
|
||||
Converts a markdown table to the corresponding html string for TEDS computation.
|
||||
"""
|
||||
# remove extra code block tokens like '```markdown' and '```
|
||||
markdown_table = markdown_table.strip('```markdown').strip('```').strip()
|
||||
row_str_list = markdown_table.split('\n')
|
||||
# extra the first header row and other data rows
|
||||
valid_row_str_list = [row_str_list[0]]+row_str_list[2:]
|
||||
table_rows = []
|
||||
for row_str in valid_row_str_list:
|
||||
one_row = []
|
||||
for cell in row_str.strip().split('|')[1:-1]:
|
||||
if set(cell) != set(' '):
|
||||
one_row.append(cell.strip())
|
||||
else:
|
||||
one_row.append(' ')
|
||||
table_rows.append(one_row)
|
||||
# build html string based on table rows
|
||||
html_str = convert_table_to_html_str(table_rows)
|
||||
return html_str
|
||||
|
||||
|
||||
def dict_to_html(data):
|
||||
html = "<html><body><table>\n"
|
||||
for key, value in data.items():
|
||||
if not isinstance(value, str):
|
||||
value = str(value)
|
||||
value_str = ' '.join(value)
|
||||
|
||||
html += f" <tr><td>{key}</td><td>{value_str}</td></tr>\n"
|
||||
html += "</table></body></html>"
|
||||
return html
|
||||
|
||||
|
||||
def convert_str_to_dict(predict_str: str):
|
||||
"""
|
||||
Parses the 'predict' string and returns a dictionary.
|
||||
Missing or unparseable content is handled gracefully.
|
||||
|
||||
Parameters:
|
||||
- predict_str (str): The prediction string containing the output dict.
|
||||
|
||||
Returns:
|
||||
- dict: A dictionary extracted from the predict string.
|
||||
"""
|
||||
# Remove code fences like ```python\n...\n```
|
||||
code_fence_pattern = r'```(?:python|json)?\n(.*?)\n```'
|
||||
match = re.search(code_fence_pattern, predict_str, re.DOTALL | re.IGNORECASE)
|
||||
if match:
|
||||
content = match.group(1)
|
||||
else:
|
||||
content = predict_str.strip()
|
||||
|
||||
data = {}
|
||||
success = False
|
||||
|
||||
# try parsing with JSON
|
||||
try:
|
||||
data = json.loads(content)
|
||||
success = True
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# try parsing with ast.literal_eval
|
||||
if not success:
|
||||
try:
|
||||
data = ast.literal_eval(content)
|
||||
if isinstance(data, dict):
|
||||
success = True
|
||||
except (ValueError, SyntaxError):
|
||||
pass
|
||||
|
||||
# try parsing with regex
|
||||
if not success:
|
||||
key_value_pattern = r'["\']?([\w\s]+)["\']?\s*[:=]\s*["\']?([^\n,"\'{}]+)["\']?'
|
||||
matches = re.findall(key_value_pattern, content)
|
||||
try:
|
||||
for key, value in matches:
|
||||
data[key.strip()] = value.strip()
|
||||
except:
|
||||
return {}
|
||||
|
||||
if not data:
|
||||
return {}
|
||||
|
||||
try:
|
||||
result = {k.strip(): str(v).strip() for k, v in data.items()}
|
||||
except:
|
||||
return {}
|
||||
return result
|
||||
|
||||
|
||||
def convert_str_to_multi_dict(predict_str: str):
|
||||
"""
|
||||
Parses the 'predict' string and returns a dictionary.
|
||||
Handles nested dictionaries and missing or unparseable content gracefully.
|
||||
|
||||
Parameters:
|
||||
- predict_str (str): The prediction string containing the output dict.
|
||||
|
||||
Returns:
|
||||
- dict: A dictionary extracted from the predict string.
|
||||
"""
|
||||
# Remove code fences like ```python\n...\n```
|
||||
code_fence_pattern = r'```(?:python|json)?\n(.*?)\n```'
|
||||
matches = re.findall(code_fence_pattern, predict_str, re.DOTALL | re.IGNORECASE)
|
||||
if matches:
|
||||
content = max(matches, key=len)
|
||||
else:
|
||||
content = predict_str.strip()
|
||||
|
||||
def strip_variable_assignment(s):
|
||||
variable_assignment_pattern = r'^\s*\w+\s*=\s*'
|
||||
return re.sub(variable_assignment_pattern, '', s.strip(), count=1)
|
||||
|
||||
content = strip_variable_assignment(content)
|
||||
|
||||
def remove_comments(s):
|
||||
return re.sub(r'#.*', '', s)
|
||||
|
||||
content = remove_comments(content)
|
||||
|
||||
last_brace_pos = content.rfind('}')
|
||||
if last_brace_pos != -1:
|
||||
content = content[:last_brace_pos+1]
|
||||
|
||||
data = {}
|
||||
success = False
|
||||
|
||||
# try parsing with ast.literal_eval
|
||||
try:
|
||||
data = ast.literal_eval(content)
|
||||
if isinstance(data, dict):
|
||||
success = True
|
||||
except (ValueError, SyntaxError, TypeError):
|
||||
pass
|
||||
|
||||
if not success:
|
||||
return {}
|
||||
|
||||
def process_data(obj):
|
||||
if isinstance(obj, dict):
|
||||
return {k: process_data(v) for k, v in obj.items()}
|
||||
elif isinstance(obj, list):
|
||||
return [process_data(elem) for elem in obj]
|
||||
else:
|
||||
return obj
|
||||
|
||||
data = process_data(data)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def generate_combinations(input_dict):
|
||||
"""
|
||||
Function to generate all possible combinations of values from a dictionary.
|
||||
"""
|
||||
kie_answer = input_dict
|
||||
if not isinstance(kie_answer, dict):
|
||||
kie_answer = kie_answer.strip('"')
|
||||
try:
|
||||
kie_answer = json.loads(kie_answer)
|
||||
except json.JSONDecodeError:
|
||||
try:
|
||||
kie_answer = ast.literal_eval(kie_answer)
|
||||
if not isinstance(kie_answer, dict):
|
||||
kie_answer = ast.literal_eval(kie_answer)
|
||||
except (ValueError, SyntaxError):
|
||||
print(f"Unable to parse 'answers' field: {kie_answer}")
|
||||
return {}
|
||||
|
||||
# Ensure the parsed result is a dictionary.
|
||||
if not isinstance(kie_answer, dict):
|
||||
print("Parsed 'answers' is still not a dictionary.")
|
||||
raise ValueError("Input could not be parsed into a dictionary.")
|
||||
|
||||
keys = list(kie_answer.keys())
|
||||
|
||||
value_lists = []
|
||||
for single_key in keys:
|
||||
sinlge_value = kie_answer[single_key]
|
||||
if not isinstance(sinlge_value, list):
|
||||
sinlge_value = [sinlge_value]
|
||||
value_lists.append(sinlge_value)
|
||||
|
||||
# Compute the Cartesian product of the value lists.
|
||||
combinations = list(product(*value_lists))
|
||||
|
||||
# Create a dictionary for each combination of values.
|
||||
result = [dict(zip(keys, values)) for values in combinations]
|
||||
|
||||
return result
|
||||
|
||||
else:
|
||||
keys = list(input_dict.keys())
|
||||
value_lists = [input_dict[key] for key in keys]
|
||||
|
||||
# Compute the Cartesian product of the value lists.
|
||||
combinations = list(product(*value_lists))
|
||||
|
||||
# Create a dictionary for each combination of values.
|
||||
result = [dict(zip(keys, values)) for values in combinations]
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def compute_f1_score(preds, gts, ignores=[]):
|
||||
"""Compute the F1-score for KIE task between predicted and ground truth dictionaries.
|
||||
|
||||
Args:
|
||||
preds (dict): The predicted key-value pairs.
|
||||
gts (dict): The ground truth key-value pairs.
|
||||
ignores (list): The list of keys to ignore during evaluation.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary where keys are field names and values are their corresponding F1-scores.
|
||||
"""
|
||||
# Optionally remove ignored keys from predictions and ground truths
|
||||
keys = set(preds.keys()).union(set(gts.keys())) - set(ignores)
|
||||
f1_scores = {}
|
||||
|
||||
for key in keys:
|
||||
pred_value = preds.get(key, None)
|
||||
gt_value = gts.get(key, None)
|
||||
|
||||
if pred_value:
|
||||
pred_value = pred_value.lower().strip().replace("\n"," ").replace(" ", "")
|
||||
if gt_value:
|
||||
gt_value = gt_value.lower().strip().replace("\n"," ").replace(" ", "")
|
||||
|
||||
if pred_value is None and gt_value is None:
|
||||
continue
|
||||
elif pred_value is None:
|
||||
precision = 0.0
|
||||
recall = 0.0
|
||||
elif gt_value is None:
|
||||
# false positive
|
||||
precision = 0.0
|
||||
recall = 0.0
|
||||
else:
|
||||
if pred_value == gt_value:
|
||||
# True positive
|
||||
precision = 1.0
|
||||
recall = 1.0
|
||||
else:
|
||||
precision = 0.0
|
||||
recall = 0.0
|
||||
|
||||
# Compute F1-score
|
||||
f1_score = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0
|
||||
f1_scores[key] = f1_score
|
||||
|
||||
if len(f1_scores) == 0:
|
||||
return 0
|
||||
average_f1 = sum(f1_scores.values()) / len(f1_scores)
|
||||
|
||||
return average_f1
|
||||
|
||||
|
||||
def pre_clean(text):
|
||||
text = re.sub(r'<bos>|<eos>|<pad>|<unk>', '', text)
|
||||
text = re.sub(r'\s##(\S)', r'\1', text)
|
||||
text = re.sub(r'\\\s', r'\\', text)
|
||||
text = re.sub(r'\s\*\s\*\s', r'**', text)
|
||||
text = re.sub(r'{\s', r'{', text)
|
||||
text = re.sub(r'\s}', r'}', text)
|
||||
text = re.sub(r'\s}', r'}', text)
|
||||
text = re.sub(r'\\begin\s', r'\\begin', text)
|
||||
text = re.sub(r'\\end\s', r'\\end', text)
|
||||
text = re.sub(r'\\end{table}', r'\\end{table} \n\n', text)
|
||||
text = text.replace('\n', ' ')
|
||||
text = text.replace('*', ' ')
|
||||
text = text.replace('_', ' ')
|
||||
return text
|
||||
|
||||
|
||||
def get_tree(input_str):
|
||||
tree = (Node('ROOT').addkid(Node('TITLE')))
|
||||
|
||||
lines = input_str.split("\n")
|
||||
lines = [pre_clean(line) for line in lines]
|
||||
last_title = ''
|
||||
for line in lines:
|
||||
if line.startswith('#'):
|
||||
child = tree.get('ROOT')
|
||||
line = line.replace('#', '')
|
||||
child.addkid(Node(line))
|
||||
last_title = line
|
||||
else:
|
||||
if last_title == '':
|
||||
child = tree.get('TITLE')
|
||||
child.addkid(Node(line))
|
||||
else:
|
||||
child = tree.get(last_title)
|
||||
child.addkid(Node(line))
|
||||
return tree
|
||||
|
||||
def STEDS(pred_tree, ref_tree):
|
||||
def my_distance(pred, ref):
|
||||
if len(pred.split()) == 0 or len(ref.split()) == 0:
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
total_distance = simple_distance(pred_tree, ref_tree, label_dist=my_distance)
|
||||
num_of_nodes = max(len(list(pred_tree.iter())), len(list(ref_tree.iter())))
|
||||
return 1-total_distance/num_of_nodes
|
||||
|
||||
|
||||
def doc_parsing_evaluation(pred, gt):
|
||||
score = 0
|
||||
if not isinstance(pred, str):
|
||||
return 0
|
||||
pred_tree = get_tree(pred)
|
||||
gt_tree = get_tree(gt)
|
||||
score = STEDS(pred_tree, gt_tree)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def wrap_html_table(html_table):
|
||||
"""
|
||||
The TEDS computation from PubTabNet code requires that the input html table should have <html>, <body>, and <table> tags.
|
||||
Add them if they are missing.
|
||||
"""
|
||||
html_table = html_table.replace('\n','')
|
||||
# add missing <table> tag if missing
|
||||
if "<table" in html_table and "</table>" not in html_table:
|
||||
html_table = html_table + "</table>"
|
||||
elif "<table" not in html_table and "</table>" in html_table:
|
||||
html_table = "<table>" + html_table
|
||||
elif "<table" not in html_table and "</table>" not in html_table:
|
||||
html_table = "<table>" + html_table + "</table>"
|
||||
else:
|
||||
pass
|
||||
# add <body> and <html> tags if missing
|
||||
if '<body>' not in html_table:
|
||||
html_table = '<body>' + html_table + '</body>'
|
||||
if '<html>' not in html_table:
|
||||
html_table = '<html>' + html_table + '</html>'
|
||||
return html_table
|
||||
|
||||
|
||||
def get_anls(s1, s2):
|
||||
try:
|
||||
s1 = s1.lower()
|
||||
s2 = s2.lower()
|
||||
except:
|
||||
pass
|
||||
if s1 == s2:
|
||||
return 1.0
|
||||
iou = 1 - editdistance.eval(s1, s2) / max(len(s1), len(s2))
|
||||
anls = iou
|
||||
return anls
|
||||
|
||||
|
||||
def ocr_eval(references,predictions):
|
||||
socre_=0.0
|
||||
None_num=0
|
||||
for idx,ref_value in enumerate(references):
|
||||
pred_value = predictions[idx]
|
||||
pred_values, ref_values = [], []
|
||||
if isinstance(pred_value, str):
|
||||
pred_values.append(pred_value)
|
||||
else:
|
||||
pred_values = pred_value
|
||||
if isinstance(ref_value, str):
|
||||
ref_values.append(ref_value)
|
||||
else:
|
||||
ref_values = ref_value
|
||||
|
||||
temp_score = 0.0
|
||||
temp_num = len(ref_values)
|
||||
|
||||
for tmpidx, tmpref in enumerate(ref_values):
|
||||
tmppred = pred_values[tmpidx] if tmpidx < len(pred_values) else pred_values[0]
|
||||
if len(pred_values) == 1 and tmppred != "None" and "None" not in ref_values: # pred 1, and not None
|
||||
temp_score = max(temp_score, get_anls(tmppred, tmpref))
|
||||
temp_num = len(ref_values)
|
||||
else:
|
||||
if tmppred=='None' and tmpref!='None':
|
||||
temp_score += 0.0
|
||||
elif tmpref=='None':
|
||||
temp_num -= 1
|
||||
else:
|
||||
temp_score += get_anls(tmppred, tmpref)
|
||||
if temp_num == 0:
|
||||
ocr_score = 0.0
|
||||
None_num += 1
|
||||
else:
|
||||
ocr_score = temp_score / (temp_num)
|
||||
socre_ += ocr_score
|
||||
if None_num == len(references):
|
||||
return 9999
|
||||
else:
|
||||
return round(socre_ / (len(references)-None_num), 5)
|
||||
|
||||
|
||||
def csv_eval(predictions,references,easy, pred_type='json'):
|
||||
predictions = predictions
|
||||
labels = references
|
||||
def is_int(val):
|
||||
try:
|
||||
int(val)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def is_float(val):
|
||||
try:
|
||||
float(val)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def convert_dict_to_list(data):
|
||||
"""
|
||||
Convert a dictionary to a list of tuples, handling both simple and nested dictionaries.
|
||||
|
||||
Args:
|
||||
data (dict): The input dictionary, which might be nested or simple.
|
||||
|
||||
Returns:
|
||||
list: A list of tuples generated from the input dictionary.
|
||||
"""
|
||||
# print(data)
|
||||
converted_list = []
|
||||
for key, value in data.items():
|
||||
# Check if the value is a dictionary (indicating a nested structure)
|
||||
if isinstance(value, dict):
|
||||
# Handle nested dictionary
|
||||
for subkey, subvalue in value.items():
|
||||
# converted_list.append((key, subkey, subvalue))
|
||||
converted_list.append((key, subkey, re.sub(r'[^\d.-]', '', str(subvalue))))
|
||||
|
||||
else:
|
||||
# Handle simple key-value pair
|
||||
# converted_list.append((key, "value", value))
|
||||
converted_list.append((key, "value", re.sub(r'[^\d.-]', '', str(value))))
|
||||
return converted_list
|
||||
|
||||
|
||||
def csv2triples(csv, separator='\\t', delimiter='\\n'):
|
||||
lines = csv.strip().split(delimiter)
|
||||
header = lines[0].split(separator)
|
||||
triples = []
|
||||
for line in lines[1:]:
|
||||
if not line:
|
||||
continue
|
||||
values = line.split(separator)
|
||||
entity = values[0]
|
||||
for i in range(1, len(values)):
|
||||
if i >= len(header):
|
||||
break
|
||||
#---------------------------------------------------------
|
||||
temp = [entity.strip(), header[i].strip()]
|
||||
temp = [x if len(x)==0 or x[-1] != ':' else x[:-1] for x in temp]
|
||||
value = values[i].strip()
|
||||
value = re.sub(r'[^\d.-]', '', str(value))
|
||||
# value = value.replace("%","")
|
||||
# value = value.replace("$","")
|
||||
triples.append((temp[0], temp[1], value))
|
||||
#---------------------------------------------------------
|
||||
return triples
|
||||
|
||||
def csv2triples_noheader(csv, separator='\\t', delimiter='\\n'):
|
||||
lines = csv.strip().split(delimiter)
|
||||
maybe_header = [x.strip() for x in lines[0].split(separator)]
|
||||
not_header = False
|
||||
if len(maybe_header) > 2:
|
||||
for c in maybe_header[1:]:
|
||||
try:
|
||||
num = float(c)
|
||||
not_header = True
|
||||
except:
|
||||
continue
|
||||
if not_header:
|
||||
break
|
||||
header = None if not_header else maybe_header
|
||||
data_start = 0 if not_header and separator in lines[0] else 1
|
||||
triples = []
|
||||
for line in lines[data_start:]:
|
||||
if not line:
|
||||
continue
|
||||
values = [x.strip() for x in line.split(separator)]
|
||||
entity = values[0]
|
||||
for i in range(1, len(values)):
|
||||
try:
|
||||
temp = [entity if entity[-1]!=':' else entity[:-1], ""]
|
||||
except:
|
||||
temp = [entity, ""]
|
||||
if header is not None:
|
||||
try:
|
||||
this_header = header[i]
|
||||
temp = [entity, this_header]
|
||||
temp = [x if x[-1] != ':' else x[:-1] for x in temp]
|
||||
except:
|
||||
this_header = entity.strip()
|
||||
value = values[i].strip()
|
||||
value = re.sub(r'[^\d.-]', '', str(value))
|
||||
# value = value.replace("%","")
|
||||
# value = value.replace("$","")
|
||||
triples.append((temp[0], temp[1], value))
|
||||
#---------------------------------------------------------
|
||||
return triples
|
||||
|
||||
def process_triplets(triplets):
|
||||
new_triplets = []
|
||||
for triplet in triplets:
|
||||
new_triplet = []
|
||||
triplet_temp = []
|
||||
if len(triplet) > 2:
|
||||
if is_int(triplet[2]) or is_float(triplet[2]):
|
||||
triplet_temp = (triplet[0].lower(), triplet[1].lower(), float(triplet[2]))
|
||||
else:
|
||||
triplet_temp = (triplet[0].lower(), triplet[1].lower(), triplet[2].lower())
|
||||
else:
|
||||
triplet_temp = (triplet[0].lower(), triplet[1].lower(), "no meaning")
|
||||
new_triplets.append(triplet_temp)
|
||||
return new_triplets
|
||||
|
||||
def intersection_with_tolerance(a, b, tol_word, tol_num):
|
||||
a = set(a)
|
||||
b = set(b)
|
||||
c = set()
|
||||
for elem1 in a:
|
||||
for elem2 in b:
|
||||
if is_float(elem1[-1]) and is_float(elem2[-1]):
|
||||
if ((Levenshtein.distance(''.join(elem1[:-1]),''.join(elem2[:-1])) <= tol_word) and (abs(elem1[-1] - elem2[-1]) / (abs(elem2[-1])+0.000001) <= tol_num))or \
|
||||
((''.join(elem1[:-1]) in ''.join(elem2[:-1])) and (abs(elem1[-1] - elem2[-1]) / (abs(elem2[-1])+0.000001) <= tol_num)) or \
|
||||
((''.join(elem2[:-1]) in ''.join(elem1[:-1])) and (abs(elem1[-1] - elem2[-1]) / (abs(elem2[-1])+0.000001) <= tol_num)):
|
||||
c.add(elem1)
|
||||
else:
|
||||
if (Levenshtein.distance(''.join([str(i) for i in elem1]),''.join([str(j) for j in elem2])) <= tol_word):
|
||||
c.add(elem1)
|
||||
return list(c)
|
||||
|
||||
def union_with_tolerance(a, b, tol_word, tol_num):
|
||||
c = set(a) | set(b)
|
||||
d = set(a) & set(b)
|
||||
e = intersection_with_tolerance(a, b, tol_word, tol_num)
|
||||
f = set(e)
|
||||
g = c-(f-d)
|
||||
return list(g)
|
||||
|
||||
def get_eval_list(pred_csv, label_csv, separator='\\t', delimiter='\\n', tol_word=3, tol_num=0.05, pred_type='json'):
|
||||
|
||||
if pred_type == 'json':
|
||||
pred_triple_list=[]
|
||||
for it in pred_csv:
|
||||
pred_triple_temp = convert_dict_to_list(it)
|
||||
pred_triple_pre = process_triplets(pred_triple_temp)
|
||||
pred_triple_list.append(pred_triple_pre)
|
||||
else:
|
||||
pred_triple_list=[]
|
||||
for it in pred_csv:
|
||||
pred_triple_temp = csv2triples(it, separator=separator, delimiter=delimiter)
|
||||
# pred_triple_temp = csv2triples_noheader(it, separator=separator, delimiter=delimiter)
|
||||
pred_triple_pre = process_triplets(pred_triple_temp)
|
||||
pred_triple_list.append(pred_triple_pre)
|
||||
|
||||
label_triple_list=[]
|
||||
for it in label_csv:
|
||||
label_triple_temp = convert_dict_to_list(it)
|
||||
label_triple_pre = process_triplets(label_triple_temp)
|
||||
label_triple_list.append(label_triple_pre)
|
||||
|
||||
|
||||
intersection_list=[]
|
||||
union_list=[]
|
||||
sim_list=[]
|
||||
# for each chart image
|
||||
for pred,label in zip(pred_triple_list, label_triple_list):
|
||||
for idx in range(len(pred)):
|
||||
try:
|
||||
if label[idx][1] == "value" and "value" not in pred[idx][:2]:
|
||||
pred[idx] = (pred[idx][0], "value", pred[idx][2])
|
||||
temp_pred_head = sorted(pred[idx][:2])
|
||||
temp_gt_head = sorted(label[idx][:2])
|
||||
pred[idx] = (temp_pred_head[0], temp_pred_head[1], pred[idx][2])
|
||||
label[idx] = (temp_gt_head[0], temp_gt_head[1], label[idx][2])
|
||||
except:
|
||||
continue
|
||||
intersection = intersection_with_tolerance(pred, label, tol_word = tol_word, tol_num=tol_num)
|
||||
union = union_with_tolerance(pred, label, tol_word = tol_word, tol_num=tol_num)
|
||||
sim = len(intersection)/len(union)
|
||||
intersection_list.append(intersection)
|
||||
union_list.append(union)
|
||||
sim_list.append(sim)
|
||||
return intersection_list, union_list, sim_list
|
||||
|
||||
def get_ap(predictions, labels, sim_threhold, tolerance, separator='\\t', delimiter='\\n', easy=1):
|
||||
if tolerance == 'strict':
|
||||
tol_word=0
|
||||
if easy == 1:
|
||||
tol_num=0
|
||||
else:
|
||||
tol_num=0.1
|
||||
|
||||
elif tolerance == 'slight':
|
||||
tol_word=2
|
||||
if easy == 1:
|
||||
tol_num=0.05
|
||||
else:
|
||||
tol_num=0.3
|
||||
|
||||
elif tolerance == 'high':
|
||||
tol_word= 5
|
||||
if easy == 1:
|
||||
tol_num=0.1
|
||||
else:
|
||||
tol_num=0.5
|
||||
intersection_list, union_list, sim_list = get_eval_list(predictions, labels, separator=separator, delimiter=delimiter, tol_word=tol_word, tol_num=tol_num, pred_type=pred_type)
|
||||
ap = len([num for num in sim_list if num >= sim_threhold])/(len(sim_list)+1e-16)
|
||||
return ap
|
||||
|
||||
map_strict = 0
|
||||
map_slight = 0
|
||||
map_high = 0
|
||||
s="\\t"
|
||||
d="\\n"
|
||||
|
||||
for sim_threhold in np.arange (0.5, 1, 0.05):
|
||||
map_temp_strict = get_ap(predictions, labels, sim_threhold=sim_threhold, tolerance='strict', separator=s, delimiter=d, easy=easy)
|
||||
map_temp_slight = get_ap(predictions, labels, sim_threhold=sim_threhold, tolerance='slight', separator=s, delimiter=d, easy=easy)
|
||||
map_temp_high = get_ap(predictions, labels, sim_threhold=sim_threhold, tolerance='high', separator=s, delimiter=d, easy=easy)
|
||||
map_strict += map_temp_strict/10
|
||||
map_slight += map_temp_slight/10
|
||||
map_high += map_temp_high/10
|
||||
|
||||
em = get_ap(predictions, labels, sim_threhold=1, tolerance='strict', separator=s, delimiter=d, easy=easy)
|
||||
ap_50_strict = get_ap(predictions, labels, sim_threhold=0.5, tolerance='strict', separator=s, delimiter=d, easy=easy)
|
||||
ap_75_strict = get_ap(predictions, labels, sim_threhold=0.75, tolerance='strict', separator=s, delimiter=d, easy=easy)
|
||||
ap_90_strict = get_ap(predictions, labels, sim_threhold=0.90, tolerance='strict', separator=s, delimiter=d, easy=easy)
|
||||
ap_50_slight = get_ap(predictions, labels, sim_threhold=0.5, tolerance='slight', separator=s, delimiter=d, easy=easy)
|
||||
ap_75_slight = get_ap(predictions, labels, sim_threhold=0.75, tolerance='slight', separator=s, delimiter=d, easy=easy)
|
||||
ap_90_slight = get_ap(predictions, labels, sim_threhold=0.90, tolerance='slight', separator=s, delimiter=d, easy=easy)
|
||||
ap_50_high = get_ap(predictions, labels, sim_threhold=0.5, tolerance='high', separator=s, delimiter=d, easy=easy)
|
||||
ap_75_high = get_ap(predictions, labels, sim_threhold=0.75, tolerance='high', separator=s, delimiter=d, easy=easy)
|
||||
ap_90_high = get_ap(predictions, labels, sim_threhold=0.90, tolerance='high', separator=s, delimiter=d, easy=easy)
|
||||
|
||||
|
||||
return em, map_strict, map_slight, map_high, ap_50_strict, ap_75_strict, ap_90_strict, ap_50_slight, ap_75_slight, ap_90_slight, ap_50_high, ap_75_high, ap_90_high
|
||||
|
||||
def draw_SCRM_table(em, map_strict, map_slight, map_high, ap_50_strict, ap_75_strict, ap_90_strict, ap_50_slight, ap_75_slight, ap_90_slight, ap_50_high, ap_75_high, ap_90_high,title_ocr_socre,source_ocr_socre,x_title_ocr_socre,y_title_ocr_socre,structure_accuracy):
|
||||
|
||||
result=f'''
|
||||
-----------------------------------------------------------\n
|
||||
| Metrics | Sim_threshold | Tolerance | Value |\n
|
||||
-----------------------------------------------------------\n
|
||||
| | | strict | {'%.4f' % map_strict} | \n
|
||||
| | ----------------------------\n
|
||||
| mPrecison | 0.5:0.05:0.95 | slight | {'%.4f' % map_slight} |\n
|
||||
| | ---------------------------\n
|
||||
| | | high | {'%.4f' % map_high} |\n
|
||||
-----------------------------------------------------------\n
|
||||
| | | strict | {'%.4f' % ap_50_strict} |\n
|
||||
| | ---------------------------\n
|
||||
| Precison | 0.5 | slight | {'%.4f' % ap_50_slight } |\n
|
||||
| | ---------------------------\n
|
||||
| | | high | {'%.4f' % ap_50_high } |\n
|
||||
-----------------------------------------------------------\n
|
||||
| | | strict | {'%.4f' % ap_75_strict} |\n
|
||||
| | ---------------------------\n
|
||||
| Precison | 0.75 | slight | {'%.4f' % ap_75_slight} |\n
|
||||
| | ---------------------------\n
|
||||
| | | high | {'%.4f' % ap_75_high} |\n
|
||||
-----------------------------------------------------------\n
|
||||
| | | strict | {'%.4f' % ap_90_strict} |\n
|
||||
| | ---------------------------\n
|
||||
| Precison | 0.9 | slight | {'%.4f' % ap_90_slight } |\n
|
||||
| | ---------------------------\n
|
||||
| | | high | {'%.4f' % ap_90_high} |\n
|
||||
-----------------------------------------------------------\n
|
||||
|Precison(EM) | {'%.4f' % em} |\n
|
||||
-----------------------------------------------------------\n
|
||||
|Title(EM) | {'%.4f' % title_ocr_socre} |\n
|
||||
-----------------------------------------------------------\n
|
||||
|Source(EM) | {'%.4f' % source_ocr_socre} |\n
|
||||
-----------------------------------------------------------\n
|
||||
|X_title(EM) | {'%.4f' % x_title_ocr_socre} |\n
|
||||
-----------------------------------------------------------\n
|
||||
|Y_title(EM) | {'%.4f' % y_title_ocr_socre} |\n
|
||||
-----------------------------------------------------------\n
|
||||
|structure_acc| {'%.4f' % structure_accuracy} |\n
|
||||
-----------------------------------------------------------\n
|
||||
|
||||
|
||||
'''
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import json
|
||||
import pprint
|
||||
|
||||
# markdown structure for Table Parsing task
|
||||
pred_markdown = "| 1 | august 5 , 1972 | detroit lions | l 23 - 31 | 0 - 1 |\n| 2 | august 12 , 1972 | green bay packers | l 13 - 14 | 0 - 2 |\n| 3 | august 19 , 1972 | cincinnati bengals | w 35 - 17 | 1 - 2 |\n| 4 | august 25 , 1972 | atlanta falcons | w 24 - 10 | 2 - 2 |\n| 5 | august 31 , 1972 | washington redskins | l 24 - 27 | 2 - 3 |\n| 6 | september 10 , 1972 | minnesota vikings | w 21 - 19 | 3 - 3 |"
|
||||
true_markdown = "| week | date | opponent | result | record |\n| --- | --- | --- | --- | --- |\n| 1 | august 5 , 1972 | detroit lions | l 23 - 31 | 0 - 1 |\n| 2 | august 12 , 1972 | green bay packers | l 13 - 14 | 0 - 2 |\n| 3 | august 19 , 1972 | cincinnati bengals | w 35 - 17 | 1 - 2 |\n| 4 | august 25 , 1972 | atlanta falcons | w 24 - 10 | 2 - 2 |\n| 5 | august 31 , 1972 | washington redskins | l 24 - 27 | 2 - 3 |\n| 6 | september 10 , 1972 | minnesota vikings | w 21 - 19 | 3 - 3 |"
|
||||
teds = TEDS(n_jobs=4)
|
||||
pred_table_html = convert_markdown_table_to_html(pred_markdown)
|
||||
true_table_html = convert_markdown_table_to_html(true_markdown)
|
||||
|
||||
scores = teds.evaluate(pred_table_html, true_table_html)
|
||||
|
||||
pp = pprint.PrettyPrinter()
|
||||
pp.pprint(scores)
|
||||
|
||||
# dict structure for Key Information Extraction task
|
||||
pred_dict = {
|
||||
"company": [
|
||||
"OLD TOWN "
|
||||
],
|
||||
"date": [
|
||||
"2024"
|
||||
],
|
||||
"address": [
|
||||
"SRI RAMPAI"
|
||||
],
|
||||
"total": [
|
||||
"30"
|
||||
]
|
||||
}
|
||||
true_dict = {
|
||||
"company": [
|
||||
"OLD TOWN KOPITAM SND BHD"
|
||||
],
|
||||
"date": [
|
||||
"2024/9/27"
|
||||
],
|
||||
"address": [
|
||||
"SRI RAMPAI"
|
||||
],
|
||||
"total": [
|
||||
"30"
|
||||
]
|
||||
}
|
||||
teds = TEDS(n_jobs=4)
|
||||
pred_dict_html = dict_to_html(pred_dict)
|
||||
true_dict_html = dict_to_html(true_dict)
|
||||
print(pred_dict_html)
|
||||
print(true_dict_html)
|
||||
|
||||
scores = teds.evaluate(pred_dict_html, true_dict_html)
|
||||
|
||||
pp = pprint.PrettyPrinter()
|
||||
pp.pprint(scores)
|
Binary file not shown.
BIN
OCRBench_v2/eval_scripts/__pycache__/TEDS_metric.cpython-310.pyc
Normal file
BIN
OCRBench_v2/eval_scripts/__pycache__/TEDS_metric.cpython-310.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
OCRBench_v2/eval_scripts/__pycache__/parallel.cpython-310.pyc
Normal file
BIN
OCRBench_v2/eval_scripts/__pycache__/parallel.cpython-310.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
OCRBench_v2/eval_scripts/__pycache__/vqa_metric.cpython-310.pyc
Normal file
BIN
OCRBench_v2/eval_scripts/__pycache__/vqa_metric.cpython-310.pyc
Normal file
Binary file not shown.
381
OCRBench_v2/eval_scripts/eval.py
Normal file
381
OCRBench_v2/eval_scripts/eval.py
Normal file
@@ -0,0 +1,381 @@
|
||||
import os
|
||||
import re
|
||||
import ast
|
||||
import json
|
||||
import argparse
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
from vqa_metric import vqa_evaluation, cn_vqa_evaluation, math_expression_evaluation, vqa_evaluation_case_sensitive, counting_evaluation, cn_math_expression_evaluation
|
||||
from IoUscore_metric import vqa_with_position_evaluation, calculate_iou, extract_coordinates
|
||||
from TEDS_metric import TEDS, convert_markdown_table_to_html, convert_str_to_dict, convert_str_to_multi_dict, generate_combinations, dict_to_html, compute_f1_score, doc_parsing_evaluation, wrap_html_table
|
||||
from page_ocr_metric import cal_per_metrics
|
||||
from spotting_metric import extract_bounding_boxes_robust, spotting_evaluation
|
||||
|
||||
|
||||
def is_nan_value(value):
|
||||
if value is None:
|
||||
return True
|
||||
if isinstance(value, str) and value.lower() == 'nan':
|
||||
return True
|
||||
try:
|
||||
import pandas as pd
|
||||
if pd.isna(value):
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
def get_value_or_zero(value):
|
||||
return 0.0 if value is None else value
|
||||
|
||||
|
||||
def process_predictions(input_path, output_path):
|
||||
with open(input_path, "r") as f:
|
||||
predict_file = json.load(f)
|
||||
|
||||
teds = TEDS(n_jobs=32)
|
||||
|
||||
task_type_list = ["APP agent en", "ASCII art classification en", "key information extraction en", "key information mapping en", "math QA en", "full-page OCR en", \
|
||||
"reasoning VQA en", "fine-grained text recognition en", "science QA en", "table parsing en", "text counting en", "text grounding en", \
|
||||
"text recognition en", "text spotting en", "document classification en", "cognition VQA en", "VQA with position en", \
|
||||
"chart parsing en", "document parsing en", "formula recognition en", "diagram QA en", \
|
||||
"cognition VQA cn", "key information extraction cn", "formula recognition cn", "full-page OCR cn", "reasoning VQA cn", \
|
||||
"text translation cn", "table parsing cn", "handwritten answer extraction cn", "document parsing cn"]
|
||||
|
||||
res_data_list = []
|
||||
|
||||
for index, data_item in enumerate(tqdm(predict_file)):
|
||||
if data_item["type"] == "APP agent en" or data_item["type"] == "ASCII art classification en" or data_item["type"] == "math QA en" \
|
||||
or data_item["type"] == "reasoning VQA en" or data_item["type"] == "science QA en" \
|
||||
or data_item["type"] == "text recognition en" or data_item["type"] == "document classification en" \
|
||||
or data_item["type"] == "cognition VQA en" or data_item["type"] == "diagram QA en":
|
||||
if "eval" in data_item.keys():
|
||||
if data_item["eval"] == "multiple choice":
|
||||
if not isinstance(data_item["answers"], list):
|
||||
data_item["answers"] = [data_item["answers"]]
|
||||
assert len(data_item["answers"]) == 1
|
||||
|
||||
if not isinstance(data_item["predict"], str):
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
predict = ''.join(c for c in data_item["predict"] if c.isalpha())
|
||||
|
||||
if predict == data_item["answers"][0]:
|
||||
data_item["score"] = 1
|
||||
else:
|
||||
data_item["score"] = 0
|
||||
elif data_item["eval"] == "case sensitive":
|
||||
data_item["score"] = vqa_evaluation_case_sensitive(data_item["predict"], data_item["answers"])
|
||||
else:
|
||||
raise ValueError("No such evaluation method")
|
||||
else:
|
||||
data_item["score"] = vqa_evaluation(data_item["predict"], data_item["answers"])
|
||||
|
||||
elif data_item["type"] == "cognition VQA cn" or data_item["type"] == "reasoning VQA cn":
|
||||
if "eval" in data_item.keys():
|
||||
if data_item["eval"] == "multiple choice":
|
||||
assert len(data_item["answers"]) == 1
|
||||
predict = ''.join(c for c in data_item["predict"] if c.isalpha())
|
||||
|
||||
if predict == data_item["answers"][0]:
|
||||
data_item["score"] = 1
|
||||
else:
|
||||
data_item["score"] = 0
|
||||
elif data_item["eval"] == "case sensitive":
|
||||
data_item["score"] = vqa_evaluation_case_sensitive(data_item["predict"], data_item["answers"])
|
||||
else:
|
||||
raise ValueError("No such evaluation method")
|
||||
else:
|
||||
data_item["score"] = cn_vqa_evaluation(data_item["predict"], data_item["answers"])
|
||||
|
||||
elif data_item["type"] == "handwritten answer extraction cn":
|
||||
if "简答" in data_item["question"]:
|
||||
ocr_metric = cal_per_metrics(data_item["predict"], data_item["answers"][0])
|
||||
data_item["score"] = (
|
||||
get_value_or_zero(ocr_metric["bleu"]) +
|
||||
get_value_or_zero(ocr_metric["meteor"]) +
|
||||
get_value_or_zero(ocr_metric["f_measure"]) +
|
||||
(1 - get_value_or_zero(ocr_metric["edit_dist"]))
|
||||
) / 4
|
||||
else:
|
||||
assert len(data_item["answers"]) == 1
|
||||
answer = data_item["answers"][0]
|
||||
chars = list(answer)
|
||||
if len(answer) > 1:
|
||||
|
||||
answer_list = [
|
||||
"".join(chars),
|
||||
".".join(chars),
|
||||
". ".join(chars),
|
||||
",".join(chars),
|
||||
", ".join(chars),
|
||||
"、".join(chars),
|
||||
";".join(chars),
|
||||
"; ".join(chars),
|
||||
" ".join(chars),
|
||||
"和".join(chars)
|
||||
]
|
||||
max_score = 0
|
||||
for answer in answer_list:
|
||||
if answer in data_item["predict"]:
|
||||
temp_score = 1
|
||||
else:
|
||||
temp_score = 0
|
||||
if temp_score > max_score:
|
||||
max_score = temp_score
|
||||
data_item["score"] = max_score
|
||||
|
||||
else:
|
||||
if data_item["answers"][0] in data_item["predict"]:
|
||||
data_item["score"] = 1
|
||||
else:
|
||||
data_item["score"] = 0
|
||||
|
||||
elif data_item["type"] == "formula recognition cn":
|
||||
if is_nan_value(data_item["predict"]):
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
data_item["score"] = cn_math_expression_evaluation(data_item["predict"], data_item["answers"])
|
||||
|
||||
elif data_item["type"] == "text counting en":
|
||||
data_item["score"] = counting_evaluation(data_item["predict"], data_item["answers"], data_item["eval"])
|
||||
|
||||
elif data_item["type"] == "formula recognition en":
|
||||
data_item["score"] = math_expression_evaluation(data_item["predict"], data_item["answers"])
|
||||
|
||||
elif data_item["type"] == "table parsing en":
|
||||
if type(data_item["answers"])==list and len(data_item["answers"]) == 1:
|
||||
if not isinstance(data_item["predict"], str):
|
||||
data_item["score"] = 0
|
||||
elif not isinstance(data_item["question"], str):
|
||||
data_item["ignore"] = "True"
|
||||
data_item["score"] = 0
|
||||
|
||||
elif "html" in data_item["question"].lower():
|
||||
no_find = False
|
||||
predict_table = data_item["predict"].replace('\n','')
|
||||
if "<body" in predict_table:
|
||||
predict_table = re.findall('<body.*', predict_table)[0]
|
||||
elif "<table" in predict_table:
|
||||
predict_table = re.findall('<table.*', predict_table)[0]
|
||||
else:
|
||||
no_find = True
|
||||
|
||||
if no_find:
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
pred_table_html = wrap_html_table(predict_table)
|
||||
gold_table_html = wrap_html_table(data_item["answers"][0])
|
||||
try:
|
||||
data_item["score"] = teds.evaluate(pred_table_html, gold_table_html)
|
||||
except:
|
||||
data_item["score"] = 0
|
||||
|
||||
elif "markdown" in data_item["question"].lower():
|
||||
if not isinstance(data_item["predict"], str):
|
||||
|
||||
prediction = str(data_item["predict"])
|
||||
pred_table_html = convert_markdown_table_to_html(prediction)
|
||||
gt_table_html = convert_markdown_table_to_html(data_item["answers"][0])
|
||||
data_item["score"] = teds.evaluate(pred_table_html, gt_table_html)
|
||||
|
||||
else:
|
||||
pred_table_html = convert_markdown_table_to_html(data_item["predict"])
|
||||
gt_table_html = convert_markdown_table_to_html(data_item["answers"][0])
|
||||
data_item["score"] = teds.evaluate(pred_table_html, gt_table_html)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
elif data_item["type"] == "table parsing cn":
|
||||
if not isinstance(data_item["predict"], str):
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
no_find = False
|
||||
predict_table = data_item["predict"].replace('\n','')
|
||||
if "<body" in predict_table:
|
||||
predict_table = re.findall('<body.*', predict_table)[0]
|
||||
elif "<table" in predict_table:
|
||||
predict_table = re.findall('<table.*', predict_table)[0]
|
||||
else:
|
||||
no_find = True
|
||||
|
||||
if no_find:
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
pred_table_html = wrap_html_table(predict_table)
|
||||
gold_table_html = wrap_html_table(data_item["answers"][0])
|
||||
try:
|
||||
data_item["score"] = teds.evaluate(pred_table_html, gold_table_html)
|
||||
except:
|
||||
data_item["score"] = 0
|
||||
print("error")
|
||||
|
||||
elif data_item["type"] == "chart parsing en":
|
||||
answer = data_item["answers"][0]
|
||||
if data_item["predict"]:
|
||||
|
||||
pred_chart_dict = convert_str_to_multi_dict(data_item["predict"])
|
||||
if len(pred_chart_dict) == 0:
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
pred_chart_html = dict_to_html(pred_chart_dict)
|
||||
gt_chart_html = dict_to_html(answer)
|
||||
data_item["score"] = teds.evaluate(pred_chart_html, gt_chart_html)
|
||||
else:
|
||||
data_item["score"] = 0
|
||||
|
||||
elif data_item["type"] == "document parsing en":
|
||||
assert type(data_item["answers"])==list and len(data_item["answers"]) == 1
|
||||
data_item["score"] = doc_parsing_evaluation(data_item["predict"], data_item["answers"][0])
|
||||
|
||||
elif data_item["type"] == "document parsing cn":
|
||||
assert type(data_item["answers"])==list and len(data_item["answers"]) == 1
|
||||
data_item["score"] = doc_parsing_evaluation(data_item["predict"], data_item["answers"][0])
|
||||
|
||||
elif data_item["type"] == "key information extraction en" or data_item["type"] == "key information mapping en":
|
||||
assert len(data_item["answers"]) == 1
|
||||
answers = generate_combinations(data_item["answers"][0])
|
||||
|
||||
if type(answers)==list and len(answers) == 1:
|
||||
if not isinstance(data_item["predict"], str):
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
pred_kie_dict = convert_str_to_dict(data_item["predict"])
|
||||
data_item["score"] = compute_f1_score(pred_kie_dict, answers[0])
|
||||
else:
|
||||
max_score = 0
|
||||
for answer in answers:
|
||||
pred_kie_dict = convert_str_to_dict(data_item["predict"])
|
||||
data_item["score"] = compute_f1_score(pred_kie_dict, answer)
|
||||
|
||||
if data_item["score"] > max_score:
|
||||
max_score = data_item["score"]
|
||||
data_item["score"] = max_score
|
||||
|
||||
elif data_item["type"] == "key information extraction cn":
|
||||
assert len(data_item["answers"]) == 1
|
||||
answers = ast.literal_eval(data_item["answers"][0])
|
||||
answers = {k: v if isinstance(v, list) else [v] for k, v in answers.items()}
|
||||
answers = generate_combinations(answers)
|
||||
if type(answers)==list and len(answers) == 1:
|
||||
if not isinstance(data_item["predict"], str):
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
pred_kie_dict = convert_str_to_dict(data_item["predict"])
|
||||
data_item["score"] = compute_f1_score(pred_kie_dict, answers[0])
|
||||
else:
|
||||
max_score = 0
|
||||
for answer in answers:
|
||||
pred_kie_dict = convert_str_to_dict(data_item["predict"])
|
||||
data_item["score"] = compute_f1_score(pred_kie_dict, answer)
|
||||
|
||||
if data_item["score"] > max_score:
|
||||
max_score = data_item["score"]
|
||||
data_item["score"] = max_score
|
||||
|
||||
elif data_item["type"] == "VQA with position en":
|
||||
if not isinstance(data_item["predict"], str):
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
pred_dict = convert_str_to_dict(data_item["predict"])
|
||||
data_item["score"] = vqa_with_position_evaluation(pred_dict, data_item)
|
||||
|
||||
elif data_item["type"] == "text translation cn":
|
||||
if len(data_item["predict"]) == 0:
|
||||
data_item["score"] = 0
|
||||
elif len(data_item["answers"][0]) == 0:
|
||||
data_item["score"] = 0
|
||||
data_item["ignore"] = "True"
|
||||
else:
|
||||
ocr_metric = cal_per_metrics(data_item["predict"], data_item["answers"][0])
|
||||
data_item["score"] = (ocr_metric["bleu"] + ocr_metric["meteor"] + ocr_metric["f_measure"] + (1 - ocr_metric["edit_dist"])) / 4
|
||||
|
||||
elif data_item["type"] == "fine-grained text recognition en":
|
||||
if not isinstance(data_item["predict"], str):
|
||||
data_item["score"] = 0
|
||||
elif len(data_item["predict"]) == 0:
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
ocr_metric = cal_per_metrics(data_item["predict"], data_item["answers"][0])
|
||||
data_item["score"] = (
|
||||
get_value_or_zero(ocr_metric["bleu"]) +
|
||||
get_value_or_zero(ocr_metric["meteor"]) +
|
||||
get_value_or_zero(ocr_metric["f_measure"]) +
|
||||
(1 - get_value_or_zero(ocr_metric["edit_dist"]))
|
||||
) / 4
|
||||
elif data_item["type"] == "full-page OCR en":
|
||||
if not data_item["predict"]:
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
ocr_metric = cal_per_metrics(data_item["predict"], data_item["answers"][0])
|
||||
data_item["score"] = (
|
||||
get_value_or_zero(ocr_metric["bleu"]) +
|
||||
get_value_or_zero(ocr_metric["meteor"]) +
|
||||
get_value_or_zero(ocr_metric["f_measure"]) +
|
||||
(1 - get_value_or_zero(ocr_metric["edit_dist"]))
|
||||
) / 4
|
||||
|
||||
elif data_item["type"] == "full-page OCR cn":
|
||||
if not isinstance(data_item["predict"], str):
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
if len(data_item["predict"]) == 0:
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
ocr_metric = cal_per_metrics(data_item["predict"], data_item["answers"][0])
|
||||
data_item["score"] = (ocr_metric["bleu"] + ocr_metric["meteor"] + ocr_metric["f_measure"] + (1 - ocr_metric["edit_dist"])) / 4
|
||||
|
||||
elif data_item["type"] == "text grounding en":
|
||||
if not isinstance(data_item["predict"], str):
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
predict_bbox = extract_coordinates(data_item["predict"])
|
||||
if not predict_bbox:
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
data_item["score"] = calculate_iou(predict_bbox, data_item["answers"])
|
||||
|
||||
elif data_item["type"] == "text spotting en":
|
||||
if not isinstance(data_item["predict"], str):
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
predict_bbox = extract_bounding_boxes_robust(data_item["predict"])
|
||||
if not predict_bbox:
|
||||
data_item["score"] = 0
|
||||
else:
|
||||
data_item["score"] = spotting_evaluation(predict_bbox, data_item)
|
||||
|
||||
else:
|
||||
raise ValueError("Unknown task type!")
|
||||
|
||||
res_data_list.append(data_item)
|
||||
|
||||
for task_name in task_type_list:
|
||||
print("\n" + task_name)
|
||||
mean_score, total_len = 0, .0
|
||||
for item in res_data_list:
|
||||
if item["type"] == task_name:
|
||||
total_len += 1
|
||||
mean_score += item["score"]
|
||||
|
||||
mean_score = mean_score / total_len if total_len > 0 else 0
|
||||
print(f"Task {task_name}, total instructions: {total_len}, average score: {mean_score:.3f}\n")
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8') as file:
|
||||
json.dump(predict_file, file, ensure_ascii=False, indent=4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Process prediction JSON files and evaluate results.")
|
||||
parser.add_argument(
|
||||
"--input_path", type=str, required=True, help="Path to the input prediction JSON file."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output_path", type=str, required=True, help="Path to save the results JSON file."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
process_predictions(args.input_path, args.output_path)
|
||||
|
||||
print("End of Code!")
|
125
OCRBench_v2/eval_scripts/get_score.py
Normal file
125
OCRBench_v2/eval_scripts/get_score.py
Normal file
@@ -0,0 +1,125 @@
|
||||
import os
|
||||
import json
|
||||
import ipdb
|
||||
import argparse
|
||||
|
||||
|
||||
def calculate_average(scores_dict):
|
||||
averages = {key: sum(values) / len(values) for key, values in scores_dict.items() if len(values) > 0}
|
||||
return averages
|
||||
|
||||
|
||||
def main():
|
||||
# Set up argument parser
|
||||
parser = argparse.ArgumentParser(description="Process a JSON file to calculate scores.")
|
||||
parser.add_argument("--json_file", type=str, required=True, help="Path to the JSON file containing inference data.")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load data from JSON file
|
||||
inference_file = args.json_file
|
||||
if not os.path.exists(inference_file):
|
||||
print(f"Error: File '{inference_file}' does not exist.")
|
||||
return
|
||||
|
||||
with open(inference_file, "r") as f:
|
||||
data_list = json.load(f)
|
||||
|
||||
en_text_recognition_list, en_text_detection_list, en_text_spotting_list, en_relationship_extraction_list = [], [], [], []
|
||||
en_element_parsing_list, en_mathematical_calculation_list, en_visual_text_understanding_list = [], [], []
|
||||
en_knowledge_reasoning_list = []
|
||||
|
||||
cn_text_recognition_list, cn_relationship_extraction_list = [], []
|
||||
cn_element_parsing_list, cn_visual_text_understanding_list = [], []
|
||||
cn_knowledge_reasoning_list = []
|
||||
|
||||
res_list = []
|
||||
for item in data_list:
|
||||
if "ignore" in item.keys():
|
||||
assert item["ignore"] == "True"
|
||||
|
||||
elif item["type"] == "text recognition en" or item["type"] == "fine-grained text recognition en" or item["type"] == "full-page OCR en":
|
||||
en_text_recognition_list.append(item["score"])
|
||||
|
||||
elif item["type"] == "text grounding en" or item["type"] == "VQA with position en":
|
||||
en_text_detection_list.append(item["score"])
|
||||
|
||||
elif item["type"] == "text spotting en":
|
||||
en_text_spotting_list.append(item["score"])
|
||||
|
||||
elif item["type"] == "key information extraction en" or item["type"] == "key information mapping en":
|
||||
en_relationship_extraction_list.append(item["score"])
|
||||
|
||||
elif item["type"] == "document parsing en" or item["type"] == "chart parsing en" \
|
||||
or item["type"] == "table parsing en" or item["type"] == "formula recognition en":
|
||||
en_element_parsing_list.append(item["score"])
|
||||
|
||||
elif item["type"] == "math QA en" or item["type"] == "text counting en":
|
||||
en_mathematical_calculation_list.append(item["score"])
|
||||
|
||||
elif item["type"] == "document classification en" \
|
||||
or item["type"] == "cognition VQA en" or item["type"] == "diagram QA en":
|
||||
en_visual_text_understanding_list.append(item["score"])
|
||||
|
||||
elif item["type"] == "reasoning VQA en" or item["type"] == "science QA en" \
|
||||
or item["type"] == "APP agent en" or item["type"] == "ASCII art classification en":
|
||||
en_knowledge_reasoning_list.append(item["score"])
|
||||
|
||||
elif item["type"] == "full-page OCR cn":
|
||||
cn_text_recognition_list.append(item["score"])
|
||||
|
||||
elif item["type"] == "key information extraction cn" or item["type"] == "handwritten answer extraction cn":
|
||||
cn_relationship_extraction_list.append(item["score"])
|
||||
|
||||
elif item["type"] == "document parsing cn" or item["type"] == "table parsing cn" or item["type"] == "formula recognition cn":
|
||||
cn_element_parsing_list.append(item["score"])
|
||||
|
||||
elif item["type"] == "cognition VQA cn":
|
||||
cn_visual_text_understanding_list.append(item["score"])
|
||||
|
||||
elif item["type"] == "reasoning VQA cn" or item["type"] == "text translation cn":
|
||||
cn_knowledge_reasoning_list.append(item["score"])
|
||||
|
||||
else:
|
||||
raise ValueError("Unknown task type!")
|
||||
|
||||
en_scores = {
|
||||
"text_recognition": en_text_recognition_list,
|
||||
"text_detection": en_text_detection_list,
|
||||
"text_spotting": en_text_spotting_list,
|
||||
"relationship_extraction": en_relationship_extraction_list,
|
||||
"element_parsing": en_element_parsing_list,
|
||||
"mathematical_calculation": en_mathematical_calculation_list,
|
||||
"visual_text_understanding": en_visual_text_understanding_list,
|
||||
"knowledge_reasoning": en_knowledge_reasoning_list
|
||||
}
|
||||
|
||||
cn_scores = {
|
||||
"text_recognition": cn_text_recognition_list,
|
||||
"relationship_extraction": cn_relationship_extraction_list,
|
||||
"element_parsing": cn_element_parsing_list,
|
||||
"visual_text_understanding": cn_visual_text_understanding_list,
|
||||
"knowledge_reasoning": cn_knowledge_reasoning_list
|
||||
}
|
||||
|
||||
en_averages = calculate_average(en_scores)
|
||||
cn_averages = calculate_average(cn_scores)
|
||||
|
||||
print("English Scores:")
|
||||
for key, score in en_averages.items():
|
||||
print(f"{key}: {score:.3f} (Count: {len(en_scores[key])})")
|
||||
|
||||
print("\nChinese Scores:")
|
||||
for key, score in cn_averages.items():
|
||||
print(f"{key}: {score:.3f} (Count: {len(cn_scores[key])})")
|
||||
|
||||
score_en_overall = sum(en_averages.values()) / len(en_averages)
|
||||
score_cn_overall = sum(cn_averages.values()) / len(cn_averages)
|
||||
|
||||
print("\nOverall Scores:")
|
||||
print(f"English Overall Score: {score_en_overall:.3f}")
|
||||
print(f"Chinese Overall Score: {score_cn_overall:.3f}")
|
||||
|
||||
print("End of Code!")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
50
OCRBench_v2/eval_scripts/page_ocr_metric.py
Normal file
50
OCRBench_v2/eval_scripts/page_ocr_metric.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import json
|
||||
import argparse
|
||||
import nltk
|
||||
from nltk.metrics import precision, recall, f_measure
|
||||
import numpy as np
|
||||
import jieba
|
||||
import re
|
||||
from nltk.translate import meteor_score
|
||||
|
||||
|
||||
def contain_chinese_string(text):
|
||||
chinese_pattern = re.compile(r'[\u4e00-\u9fa5]')
|
||||
return bool(chinese_pattern.search(text))
|
||||
|
||||
def cal_per_metrics(pred, gt):
|
||||
metrics = {}
|
||||
|
||||
if contain_chinese_string(gt) or contain_chinese_string(pred):
|
||||
reference = jieba.lcut(gt)
|
||||
hypothesis = jieba.lcut(pred)
|
||||
else:
|
||||
reference = gt.split()
|
||||
hypothesis = pred.split()
|
||||
|
||||
metrics["bleu"] = nltk.translate.bleu([reference], hypothesis)
|
||||
metrics["meteor"] = meteor_score.meteor_score([reference], hypothesis)
|
||||
|
||||
reference = set(reference)
|
||||
hypothesis = set(hypothesis)
|
||||
metrics["f_measure"] = f_measure(reference, hypothesis)
|
||||
|
||||
metrics["precision"] = precision(reference, hypothesis)
|
||||
metrics["recall"] = recall(reference, hypothesis)
|
||||
metrics["edit_dist"] = nltk.edit_distance(pred, gt) / max(len(pred), len(gt))
|
||||
return metrics
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# Examples for region text recognition and read all text tasks
|
||||
predict_text = "metrics['edit_dist'] = nltk.edit_distance(pred, gt) / max(len(pred), len(gt))"
|
||||
true_text = "metrics = nltk.edit_distance(pred, gt) / max(len(pred), len(gt))"
|
||||
|
||||
scores = cal_per_metrics(predict_text, true_text)
|
||||
|
||||
predict_text = "metrics['edit_dist'] len(gt))"
|
||||
true_text = "metrics = nltk.edit_distance(pred, gt) / max(len(pred), len(gt))"
|
||||
|
||||
scores = cal_per_metrics(predict_text, true_text)
|
||||
print(scores)
|
50
OCRBench_v2/eval_scripts/parallel.py
Normal file
50
OCRBench_v2/eval_scripts/parallel.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from tqdm import tqdm
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
|
||||
def parallel_process(array, function, n_jobs=16, use_kwargs=False, front_num=0):
|
||||
"""
|
||||
A parallel version of the map function with a progress bar.
|
||||
|
||||
Args:
|
||||
array (array-like): An array to iterate over.
|
||||
function (function): A python function to apply to the elements of array
|
||||
n_jobs (int, default=16): The number of cores to use
|
||||
use_kwargs (boolean, default=False): Whether to consider the elements of array as dictionaries of
|
||||
keyword arguments to function
|
||||
front_num (int, default=3): The number of iterations to run serially before kicking off the parallel job.
|
||||
Useful for catching bugs
|
||||
Returns:
|
||||
[function(array[0]), function(array[1]), ...]
|
||||
"""
|
||||
# We run the first few iterations serially to catch bugs
|
||||
if front_num > 0:
|
||||
front = [function(**a) if use_kwargs else function(a) for a in array[:front_num]]
|
||||
else:
|
||||
front = []
|
||||
# If we set n_jobs to 1, just run a list comprehension. This is useful for benchmarking and debugging.
|
||||
if n_jobs == 1:
|
||||
return front + [function(**a) if use_kwargs else function(a) for a in tqdm(array[front_num:])]
|
||||
# Assemble the workers
|
||||
with ProcessPoolExecutor(max_workers=n_jobs) as pool:
|
||||
# Pass the elements of array into function
|
||||
if use_kwargs:
|
||||
futures = [pool.submit(function, **a) for a in array[front_num:]]
|
||||
else:
|
||||
futures = [pool.submit(function, a) for a in array[front_num:]]
|
||||
kwargs = {
|
||||
'total': len(futures),
|
||||
'unit': 'it',
|
||||
'unit_scale': True,
|
||||
'leave': True
|
||||
}
|
||||
# Print out the progress as tasks complete
|
||||
for f in tqdm(as_completed(futures), **kwargs):
|
||||
pass
|
||||
out = []
|
||||
# Get the results from the futures.
|
||||
for i, future in tqdm(enumerate(futures)):
|
||||
try:
|
||||
out.append(future.result())
|
||||
except Exception as e:
|
||||
out.append(e)
|
||||
return front + out
|
0
OCRBench_v2/eval_scripts/spotting_eval/__init__.py
Normal file
0
OCRBench_v2/eval_scripts/spotting_eval/__init__.py
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
OCRBench_v2/eval_scripts/spotting_eval/gt.zip
Normal file
BIN
OCRBench_v2/eval_scripts/spotting_eval/gt.zip
Normal file
Binary file not shown.
6
OCRBench_v2/eval_scripts/spotting_eval/gt/gt_img_0.txt
Normal file
6
OCRBench_v2/eval_scripts/spotting_eval/gt/gt_img_0.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
442,380,507,380,507,399,442,399,CHEROKEE
|
||||
506,380,547,380,547,397,506,397,STREET
|
||||
481,399,536,399,536,417,481,417,BIKES
|
||||
443,425,469,425,469,438,443,438,###
|
||||
471,425,505,425,505,438,471,438,###
|
||||
513,425,543,425,543,439,513,439,###
|
26
OCRBench_v2/eval_scripts/spotting_eval/readme.txt
Normal file
26
OCRBench_v2/eval_scripts/spotting_eval/readme.txt
Normal file
@@ -0,0 +1,26 @@
|
||||
INSTRUCTIONS FOR THE STANDALONE SCRIPTS
|
||||
Requirements:
|
||||
- Python version 3.
|
||||
- Each Task requires different Python modules. When running the script, if some module is not installed you will see a notification and installation instructions.
|
||||
|
||||
Procedure:
|
||||
Download the ZIP file for the requested script and unzip it to a directory.
|
||||
|
||||
Open a terminal in the directory and run the command:
|
||||
python script.py –g=gt.zip –s=submit.zip
|
||||
|
||||
If you have already installed all the required modules, then you will see the method’s results or an error message if the submitted file is not correct.
|
||||
|
||||
If a module is not present, you should install them with PIP: pip install 'module'
|
||||
|
||||
In case of Polygon module, use: 'pip install Polygon3'
|
||||
|
||||
parameters:
|
||||
-g: Path of the Ground Truth file. In most cases, the Ground Truth will be included in the same Zip file named 'gt.zip', gt.txt' or 'gt.json'. If not, you will be able to get it on the Downloads page of the Task.
|
||||
-s: Path of your method's results file.
|
||||
|
||||
Optional parameters:
|
||||
-o: Path to a directory where to copy the file ‘results.zip’ that contains per-sample results.
|
||||
-p: JSON string parameters to override the script default parameters. The parameters that can be overrided are inside the function 'default_evaluation_params' located at the begining of the evaluation Script.
|
||||
|
||||
Example: python script.py –g=gt.zip –s=submit.zip –o=./ -p={\"IOU_CONSTRAINT\":0.8}
|
BIN
OCRBench_v2/eval_scripts/spotting_eval/results.zip
Normal file
BIN
OCRBench_v2/eval_scripts/spotting_eval/results.zip
Normal file
Binary file not shown.
@@ -0,0 +1,456 @@
|
||||
#!/usr/bin/env python3
|
||||
#encoding: UTF-8
|
||||
|
||||
#File: rrc_evaluation_funcs_1_1.py
|
||||
#Version: 1.1
|
||||
#Version info: changes for Python 3
|
||||
#Date: 2019-12-29
|
||||
#Description: File with useful functions to use by the evaluation scripts in the RRC website.
|
||||
|
||||
import json
|
||||
import sys;
|
||||
sys.path.append('./')
|
||||
import zipfile
|
||||
import re
|
||||
import os
|
||||
import importlib
|
||||
|
||||
def print_help():
|
||||
sys.stdout.write('Usage: python %s.py -g=<gtFile> -s=<submFile> [-o=<outputFolder> -p=<jsonParams>]' %sys.argv[0])
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
def load_zip_file_keys(file,fileNameRegExp=''):
|
||||
"""
|
||||
Returns an array with the entries of the ZIP file that match with the regular expression.
|
||||
The key's are the names or the file or the capturing group definied in the fileNameRegExp
|
||||
"""
|
||||
try:
|
||||
archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
|
||||
except :
|
||||
raise Exception('Error loading the ZIP archive.')
|
||||
|
||||
pairs = []
|
||||
|
||||
for name in archive.namelist():
|
||||
addFile = True
|
||||
keyName = name
|
||||
if fileNameRegExp!="":
|
||||
m = re.match(fileNameRegExp,name)
|
||||
if m == None:
|
||||
addFile = False
|
||||
else:
|
||||
if len(m.groups())>0:
|
||||
keyName = m.group(1)
|
||||
|
||||
if addFile:
|
||||
pairs.append( keyName )
|
||||
|
||||
return pairs
|
||||
|
||||
|
||||
def load_zip_file(file,fileNameRegExp='',allEntries=False):
|
||||
"""
|
||||
Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file.
|
||||
The key's are the names or the file or the capturing group definied in the fileNameRegExp
|
||||
allEntries validates that all entries in the ZIP file pass the fileNameRegExp
|
||||
"""
|
||||
try:
|
||||
archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
|
||||
except :
|
||||
raise Exception('Error loading the ZIP archive')
|
||||
|
||||
pairs = []
|
||||
for name in archive.namelist():
|
||||
addFile = True
|
||||
keyName = name
|
||||
if fileNameRegExp!="":
|
||||
m = re.match(fileNameRegExp,name)
|
||||
if m == None:
|
||||
addFile = False
|
||||
else:
|
||||
if len(m.groups())>0:
|
||||
keyName = m.group(1)
|
||||
|
||||
if addFile:
|
||||
pairs.append( [ keyName , archive.read(name)] )
|
||||
else:
|
||||
if allEntries:
|
||||
raise Exception('ZIP entry not valid: %s' %name)
|
||||
|
||||
return dict(pairs)
|
||||
|
||||
def decode_utf8(raw):
|
||||
"""
|
||||
Returns a Unicode object on success, or None on failure
|
||||
"""
|
||||
try:
|
||||
return raw.decode('utf-8-sig',errors = 'replace')
|
||||
except:
|
||||
return None
|
||||
|
||||
def validate_lines_in_file(fileName,file_contents,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
|
||||
"""
|
||||
This function validates that all lines of the file calling the Line validation function for each line
|
||||
"""
|
||||
utf8File = decode_utf8(file_contents)
|
||||
if (utf8File is None) :
|
||||
raise Exception("The file %s is not UTF-8" %fileName)
|
||||
|
||||
lines = utf8File.split( "\r\n" if CRLF else "\n" )
|
||||
for line in lines:
|
||||
line = line.replace("\r","").replace("\n","")
|
||||
if(line != ""):
|
||||
try:
|
||||
validate_tl_line(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
|
||||
except Exception as e:
|
||||
raise Exception(("Line in sample not valid. Sample: %s Line: %s Error: %s" %(fileName,line,str(e))).encode('utf-8', 'replace'))
|
||||
|
||||
|
||||
|
||||
def validate_tl_line(line,LTRB=True,withTranscription=True,withConfidence=True,imWidth=0,imHeight=0):
|
||||
"""
|
||||
Validate the format of the line. If the line is not valid an exception will be raised.
|
||||
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
|
||||
Posible values are:
|
||||
LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
|
||||
LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
|
||||
"""
|
||||
get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
|
||||
|
||||
|
||||
def get_tl_line_values(line,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
|
||||
"""
|
||||
Validate the format of the line. If the line is not valid an exception will be raised.
|
||||
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
|
||||
Posible values are:
|
||||
LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
|
||||
LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
|
||||
Returns values from a textline. Points , [Confidences], [Transcriptions]
|
||||
"""
|
||||
confidence = 0.0
|
||||
transcription = "";
|
||||
points = []
|
||||
|
||||
numPoints = 4;
|
||||
|
||||
if LTRB:
|
||||
|
||||
numPoints = 4;
|
||||
|
||||
if withTranscription and withConfidence:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
|
||||
if m == None :
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
|
||||
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription")
|
||||
elif withConfidence:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence")
|
||||
elif withTranscription:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,(.*)$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription")
|
||||
else:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,?\s*$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax")
|
||||
|
||||
xmin = int(m.group(1))
|
||||
ymin = int(m.group(2))
|
||||
xmax = int(m.group(3))
|
||||
ymax = int(m.group(4))
|
||||
if(xmax<xmin):
|
||||
raise Exception("Xmax value (%s) not valid (Xmax < Xmin)." %(xmax))
|
||||
if(ymax<ymin):
|
||||
raise Exception("Ymax value (%s) not valid (Ymax < Ymin)." %(ymax))
|
||||
|
||||
points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
|
||||
|
||||
if (imWidth>0 and imHeight>0):
|
||||
validate_point_inside_bounds(xmin,ymin,imWidth,imHeight);
|
||||
validate_point_inside_bounds(xmax,ymax,imWidth,imHeight);
|
||||
|
||||
else:
|
||||
|
||||
numPoints = 8;
|
||||
|
||||
if withTranscription and withConfidence:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription")
|
||||
elif withConfidence:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence")
|
||||
elif withTranscription:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,(.*)$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription")
|
||||
else:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4")
|
||||
|
||||
points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
|
||||
|
||||
validate_clockwise_points(points)
|
||||
|
||||
if (imWidth>0 and imHeight>0):
|
||||
validate_point_inside_bounds(points[0],points[1],imWidth,imHeight);
|
||||
validate_point_inside_bounds(points[2],points[3],imWidth,imHeight);
|
||||
validate_point_inside_bounds(points[4],points[5],imWidth,imHeight);
|
||||
validate_point_inside_bounds(points[6],points[7],imWidth,imHeight);
|
||||
|
||||
|
||||
if withConfidence:
|
||||
try:
|
||||
confidence = float(m.group(numPoints+1))
|
||||
except ValueError:
|
||||
raise Exception("Confidence value must be a float")
|
||||
|
||||
if withTranscription:
|
||||
posTranscription = numPoints + (2 if withConfidence else 1)
|
||||
transcription = m.group(posTranscription)
|
||||
m2 = re.match(r'^\s*\"(.*)\"\s*$',transcription)
|
||||
if m2 != None : #Transcription with double quotes, we extract the value and replace escaped characters
|
||||
transcription = m2.group(1).replace("\\\\", "\\").replace("\\\"", "\"")
|
||||
|
||||
return points,confidence,transcription
|
||||
|
||||
def get_tl_dict_values(detection,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,validNumPoints=[],validate_cw=True):
|
||||
"""
|
||||
Validate the format of the dictionary. If the dictionary is not valid an exception will be raised.
|
||||
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
|
||||
Posible values:
|
||||
{"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]]}
|
||||
{"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]],"transcription":"###","confidence":0.4,"illegibility":false}
|
||||
{"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]],"transcription":"###","confidence":0.4,"dontCare":false}
|
||||
Returns values from the dictionary. Points , [Confidences], [Transcriptions]
|
||||
"""
|
||||
confidence = 0.0
|
||||
transcription = "";
|
||||
points = []
|
||||
|
||||
if isinstance(detection, dict) == False :
|
||||
raise Exception("Incorrect format. Object has to be a dictionary")
|
||||
|
||||
if not 'points' in detection:
|
||||
raise Exception("Incorrect format. Object has no points key)")
|
||||
|
||||
if isinstance(detection['points'], list) == False :
|
||||
raise Exception("Incorrect format. Object points key have to be an array)")
|
||||
|
||||
num_points = len(detection['points'])
|
||||
|
||||
if num_points<3 :
|
||||
raise Exception("Incorrect format. Incorrect number of points. At least 3 points are necessary. Found: " + str(num_points))
|
||||
|
||||
if(len(validNumPoints)>0 and num_points in validNumPoints == False ):
|
||||
raise Exception("Incorrect format. Incorrect number of points. Only allowed 4,8 or 12 points)")
|
||||
|
||||
for i in range(num_points):
|
||||
if isinstance(detection['points'][i], list) == False :
|
||||
raise Exception("Incorrect format. Point #" + str(i+1) + " has to be an array)")
|
||||
|
||||
if len(detection['points'][i]) != 2 :
|
||||
raise Exception("Incorrect format. Point #" + str(i+1) + " has to be an array with 2 objects(x,y) )")
|
||||
|
||||
if isinstance(detection['points'][i][0], (int,float) ) == False or isinstance(detection['points'][i][1], (int,float) ) == False :
|
||||
raise Exception("Incorrect format. Point #" + str(i+1) + " childs have to be Integers)")
|
||||
|
||||
if (imWidth>0 and imHeight>0):
|
||||
validate_point_inside_bounds(detection['points'][i][0],detection['points'][i][1],imWidth,imHeight);
|
||||
|
||||
points.append(float(detection['points'][i][0]))
|
||||
points.append(float(detection['points'][i][1]))
|
||||
|
||||
if validate_cw :
|
||||
validate_clockwise_points(points)
|
||||
|
||||
if withConfidence:
|
||||
if not 'confidence' in detection:
|
||||
raise Exception("Incorrect format. No confidence key)")
|
||||
|
||||
if isinstance(detection['confidence'], (int,float)) == False :
|
||||
raise Exception("Incorrect format. Confidence key has to be a float)")
|
||||
|
||||
if detection['confidence']<0 or detection['confidence']>1 :
|
||||
raise Exception("Incorrect format. Confidence key has to be a float between 0.0 and 1.0")
|
||||
|
||||
confidence = detection['confidence']
|
||||
|
||||
if withTranscription:
|
||||
if not 'transcription' in detection:
|
||||
raise Exception("Incorrect format. No transcription key)")
|
||||
|
||||
if isinstance(detection['transcription'], str) == False :
|
||||
raise Exception("Incorrect format. Transcription has to be a string. Detected: " + type(detection['transcription']).__name__ )
|
||||
|
||||
transcription = detection['transcription']
|
||||
|
||||
if 'illegibility' in detection: #Ensures that if illegibility atribute is present and is True the transcription is set to ### (don't care)
|
||||
if detection['illegibility'] == True:
|
||||
transcription = "###"
|
||||
|
||||
if 'dontCare' in detection: #Ensures that if dontCare atribute is present and is True the transcription is set to ### (don't care)
|
||||
if detection['dontCare'] == True:
|
||||
transcription = "###"
|
||||
|
||||
return points,confidence,transcription
|
||||
|
||||
def validate_point_inside_bounds(x,y,imWidth,imHeight):
|
||||
if(x<0 or x>imWidth):
|
||||
raise Exception("X value (%s) not valid. Image dimensions: (%s,%s)" %(xmin,imWidth,imHeight))
|
||||
if(y<0 or y>imHeight):
|
||||
raise Exception("Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s" %(ymin,imWidth,imHeight))
|
||||
|
||||
def validate_clockwise_points(points):
|
||||
"""
|
||||
Validates that the points are in clockwise order.
|
||||
"""
|
||||
edge = []
|
||||
for i in range(len(points)//2):
|
||||
edge.append( (int(points[(i+1)*2 % len(points)]) - int(points[i*2])) * (int(points[ ((i+1)*2+1) % len(points)]) + int(points[i*2+1])) )
|
||||
if sum(edge)>0:
|
||||
raise Exception("Points are not clockwise. The coordinates of bounding points have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.")
|
||||
|
||||
def get_tl_line_values_from_file_contents(content,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True):
|
||||
"""
|
||||
Returns all points, confindences and transcriptions of a file in lists. Valid line formats:
|
||||
xmin,ymin,xmax,ymax,[confidence],[transcription]
|
||||
x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription]
|
||||
"""
|
||||
pointsList = []
|
||||
transcriptionsList = []
|
||||
confidencesList = []
|
||||
|
||||
lines = content.split( "\r\n" if CRLF else "\n" )
|
||||
for line in lines:
|
||||
line = line.replace("\r","").replace("\n","")
|
||||
if(line != "") :
|
||||
points, confidence, transcription = get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight);
|
||||
pointsList.append(points)
|
||||
transcriptionsList.append(transcription)
|
||||
confidencesList.append(confidence)
|
||||
|
||||
if withConfidence and len(confidencesList)>0 and sort_by_confidences:
|
||||
import numpy as np
|
||||
sorted_ind = np.argsort(-np.array(confidencesList))
|
||||
confidencesList = [confidencesList[i] for i in sorted_ind]
|
||||
pointsList = [pointsList[i] for i in sorted_ind]
|
||||
transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
|
||||
|
||||
return pointsList,confidencesList,transcriptionsList
|
||||
|
||||
def get_tl_dict_values_from_array(array,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True,validNumPoints=[],validate_cw=True):
|
||||
"""
|
||||
Returns all points, confindences and transcriptions of a file in lists. Valid dict formats:
|
||||
{"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]],"transcription":"###","confidence":0.4}
|
||||
"""
|
||||
pointsList = []
|
||||
transcriptionsList = []
|
||||
confidencesList = []
|
||||
|
||||
for n in range(len(array)):
|
||||
objectDict = array[n]
|
||||
points, confidence, transcription = get_tl_dict_values(objectDict,withTranscription,withConfidence,imWidth,imHeight,validNumPoints,validate_cw);
|
||||
pointsList.append(points)
|
||||
transcriptionsList.append(transcription)
|
||||
confidencesList.append(confidence)
|
||||
|
||||
if withConfidence and len(confidencesList)>0 and sort_by_confidences:
|
||||
import numpy as np
|
||||
sorted_ind = np.argsort(-np.array(confidencesList))
|
||||
confidencesList = [confidencesList[i] for i in sorted_ind]
|
||||
pointsList = [pointsList[i] for i in sorted_ind]
|
||||
transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
|
||||
|
||||
return pointsList,confidencesList,transcriptionsList
|
||||
|
||||
def main_evaluation(p,default_evaluation_params_fn,validate_data_fn,evaluate_method_fn,show_result=True,per_sample=True):
|
||||
"""
|
||||
This process validates a method, evaluates it and if it succed generates a ZIP file with a JSON entry for each sample.
|
||||
Params:
|
||||
p: Dictionary of parmeters with the GT/submission locations. If None is passed, the parameters send by the system are used.
|
||||
default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
|
||||
validate_data_fn: points to a method that validates the corrct format of the submission
|
||||
evaluate_method_fn: points to a function that evaluated the submission and return a Dictionary with the results
|
||||
"""
|
||||
|
||||
if (p == None):
|
||||
p = dict([s[1:].split('=') for s in sys.argv[1:]])
|
||||
if(len(sys.argv)<3):
|
||||
print_help()
|
||||
|
||||
evalParams = default_evaluation_params_fn()
|
||||
if 'p' in p.keys():
|
||||
evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p']) )
|
||||
|
||||
resDict={'calculated':True,'Message':'','method':'{}','per_sample':'{}'}
|
||||
try:
|
||||
validate_data_fn(p['g'], p['s'], evalParams)
|
||||
evalData = evaluate_method_fn(p['g'], p['s'], evalParams)
|
||||
resDict.update(evalData)
|
||||
|
||||
except Exception as e:
|
||||
resDict['Message']= str(e)
|
||||
resDict['calculated']=False
|
||||
|
||||
if 'o' in p:
|
||||
if not os.path.exists(p['o']):
|
||||
os.makedirs(p['o'])
|
||||
|
||||
resultsOutputname = p['o'] + '/results.zip'
|
||||
outZip = zipfile.ZipFile(resultsOutputname, mode='w', allowZip64=True)
|
||||
|
||||
del resDict['per_sample']
|
||||
if 'output_items' in resDict.keys():
|
||||
del resDict['output_items']
|
||||
|
||||
outZip.writestr('method.json',json.dumps(resDict))
|
||||
|
||||
if not resDict['calculated']:
|
||||
if show_result:
|
||||
sys.stderr.write('Error!\n'+ resDict['Message']+'\n\n')
|
||||
if 'o' in p:
|
||||
outZip.close()
|
||||
return resDict
|
||||
|
||||
if 'o' in p:
|
||||
if per_sample == True:
|
||||
for k,v in evalData['per_sample'].items():
|
||||
outZip.writestr( k + '.json',json.dumps(v))
|
||||
|
||||
if 'output_items' in evalData.keys():
|
||||
for k, v in evalData['output_items'].items():
|
||||
outZip.writestr( k,v)
|
||||
|
||||
outZip.close()
|
||||
|
||||
if show_result:
|
||||
sys.stdout.write("Calculated!")
|
||||
sys.stdout.write(json.dumps(resDict['method']))
|
||||
|
||||
return resDict
|
||||
|
||||
|
||||
def main_validation(default_evaluation_params_fn,validate_data_fn):
|
||||
"""
|
||||
This process validates a method
|
||||
Params:
|
||||
default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
|
||||
validate_data_fn: points to a method that validates the corrct format of the submission
|
||||
"""
|
||||
try:
|
||||
p = dict([s[1:].split('=') for s in sys.argv[1:]])
|
||||
evalParams = default_evaluation_params_fn()
|
||||
if 'p' in p.keys():
|
||||
evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p']) )
|
||||
|
||||
validate_data_fn(p['g'], p['s'], evalParams)
|
||||
print ('SUCCESS')
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
print (str(e))
|
||||
sys.exit(101)
|
451
OCRBench_v2/eval_scripts/spotting_eval/script.py
Normal file
451
OCRBench_v2/eval_scripts/spotting_eval/script.py
Normal file
@@ -0,0 +1,451 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# encoding=utf8
|
||||
#File: E2E_iou_1_1.py
|
||||
#Version: 1.1
|
||||
#Version info: changes for Python 3
|
||||
#Date: 2019-12-29
|
||||
#Description: Evaluation script that computes End to End Recognition. For Text Localization it's used Intersection over Union criteria.
|
||||
#Average Precision is also calcuted when 'CONFIDENCES' parameter is True
|
||||
#There are 2 modes to determine if a detection is correct or not:
|
||||
#with Word Spotting: The detected word must coincide (ingnoring case) to a filtered Ground Truth containing only dictionary words (see include_in_dictionary and include_in_dictionary_transcription functions)
|
||||
#without Word Spotting: words must be equal excluding a set of special characters
|
||||
|
||||
from collections import namedtuple
|
||||
import spotting_eval.rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs
|
||||
import importlib
|
||||
|
||||
def evaluation_imports():
|
||||
"""
|
||||
evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation.
|
||||
"""
|
||||
return {
|
||||
'Polygon':'plg',
|
||||
'numpy':'np'
|
||||
}
|
||||
|
||||
def default_evaluation_params():
|
||||
"""
|
||||
default_evaluation_params: Default parameters to use for the validation and evaluation.
|
||||
"""
|
||||
return {
|
||||
'IOU_CONSTRAINT' :0.5,
|
||||
'AREA_PRECISION_CONSTRAINT' :0.5,
|
||||
'WORD_SPOTTING' :False,
|
||||
'MIN_LENGTH_CARE_WORD' :3,
|
||||
'GT_SAMPLE_NAME_2_ID':'gt_img_([0-9]+).txt',
|
||||
'DET_SAMPLE_NAME_2_ID':'res_img_([0-9]+).txt',
|
||||
'LTRB':False, #LTRB:2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4)
|
||||
'CRLF':False, # Lines are delimited by Windows CRLF format
|
||||
'CONFIDENCES':False, #Detections must include confidence value. AP will be calculated,
|
||||
'SPECIAL_CHARACTERS':'!?.:,*"()·[]/\'',
|
||||
'ONLY_REMOVE_FIRST_LAST_CHARACTER' : True
|
||||
}
|
||||
|
||||
def validate_data(gtFilePath, submFilePath, evaluationParams):
|
||||
"""
|
||||
Method validate_data: validates that all files in the results folder are correct (have the correct name contents).
|
||||
Validates also that there are no missing files in the folder.
|
||||
If some error detected, the method raises the error
|
||||
"""
|
||||
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])
|
||||
|
||||
subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)
|
||||
|
||||
#Validate format of GroundTruth
|
||||
for k in gt:
|
||||
rrc_evaluation_funcs.validate_lines_in_file(k,gt[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True)
|
||||
|
||||
#Validate format of results
|
||||
for k in subm:
|
||||
if (k in gt) == False :
|
||||
raise Exception("The sample %s not present in GT" %k)
|
||||
|
||||
rrc_evaluation_funcs.validate_lines_in_file(k,subm[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES'])
|
||||
|
||||
|
||||
def evaluate_method(gtFilePath, submFilePath, evaluationParams):
|
||||
"""
|
||||
Method evaluate_method: evaluate method and returns the results
|
||||
Results. Dictionary with the following values:
|
||||
- method (required) Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 }
|
||||
- samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 }
|
||||
"""
|
||||
for module,alias in evaluation_imports().items():
|
||||
globals()[alias] = importlib.import_module(module)
|
||||
|
||||
def polygon_from_points(points,correctOffset=False):
|
||||
"""
|
||||
Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4
|
||||
"""
|
||||
|
||||
if correctOffset: #this will substract 1 from the coordinates that correspond to the xmax and ymax
|
||||
points[2] -= 1
|
||||
points[4] -= 1
|
||||
points[5] -= 1
|
||||
points[7] -= 1
|
||||
|
||||
resBoxes=np.empty([1,8],dtype='int32')
|
||||
resBoxes[0,0]=int(points[0])
|
||||
resBoxes[0,4]=int(points[1])
|
||||
resBoxes[0,1]=int(points[2])
|
||||
resBoxes[0,5]=int(points[3])
|
||||
resBoxes[0,2]=int(points[4])
|
||||
resBoxes[0,6]=int(points[5])
|
||||
resBoxes[0,3]=int(points[6])
|
||||
resBoxes[0,7]=int(points[7])
|
||||
pointMat = resBoxes[0].reshape([2,4]).T
|
||||
return plg.Polygon( pointMat)
|
||||
|
||||
def rectangle_to_polygon(rect):
|
||||
resBoxes=np.empty([1,8],dtype='int32')
|
||||
resBoxes[0,0]=int(rect.xmin)
|
||||
resBoxes[0,4]=int(rect.ymax)
|
||||
resBoxes[0,1]=int(rect.xmin)
|
||||
resBoxes[0,5]=int(rect.ymin)
|
||||
resBoxes[0,2]=int(rect.xmax)
|
||||
resBoxes[0,6]=int(rect.ymin)
|
||||
resBoxes[0,3]=int(rect.xmax)
|
||||
resBoxes[0,7]=int(rect.ymax)
|
||||
|
||||
pointMat = resBoxes[0].reshape([2,4]).T
|
||||
|
||||
return plg.Polygon( pointMat)
|
||||
|
||||
def rectangle_to_points(rect):
|
||||
points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin), int(rect.xmin), int(rect.ymin)]
|
||||
return points
|
||||
|
||||
def get_union(pD,pG):
|
||||
areaA = pD.area();
|
||||
areaB = pG.area();
|
||||
return areaA + areaB - get_intersection(pD, pG);
|
||||
|
||||
def get_intersection_over_union(pD,pG):
|
||||
try:
|
||||
return get_intersection(pD, pG) / get_union(pD, pG);
|
||||
except:
|
||||
return 0
|
||||
|
||||
def get_intersection(pD,pG):
|
||||
pInt = pD & pG
|
||||
if len(pInt) == 0:
|
||||
return 0
|
||||
return pInt.area()
|
||||
|
||||
def compute_ap(confList, matchList,numGtCare):
|
||||
correct = 0
|
||||
AP = 0
|
||||
if len(confList)>0:
|
||||
confList = np.array(confList)
|
||||
matchList = np.array(matchList)
|
||||
sorted_ind = np.argsort(-confList)
|
||||
confList = confList[sorted_ind]
|
||||
matchList = matchList[sorted_ind]
|
||||
for n in range(len(confList)):
|
||||
match = matchList[n]
|
||||
if match:
|
||||
correct += 1
|
||||
AP += float(correct)/(n + 1)
|
||||
|
||||
if numGtCare>0:
|
||||
AP /= numGtCare
|
||||
|
||||
return AP
|
||||
|
||||
def transcription_match(transGt,transDet,specialCharacters='!?.:,*"()·[]/\'',onlyRemoveFirstLastCharacterGT=True):
|
||||
|
||||
if onlyRemoveFirstLastCharacterGT:
|
||||
#special characters in GT are allowed only at initial or final position
|
||||
if (transGt==transDet):
|
||||
return True
|
||||
|
||||
if specialCharacters.find(transGt[0])>-1:
|
||||
if transGt[1:]==transDet:
|
||||
return True
|
||||
|
||||
if specialCharacters.find(transGt[-1])>-1:
|
||||
if transGt[0:len(transGt)-1]==transDet:
|
||||
return True
|
||||
|
||||
if specialCharacters.find(transGt[0])>-1 and specialCharacters.find(transGt[-1])>-1:
|
||||
if transGt[1:len(transGt)-1]==transDet:
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
#Special characters are removed from the begining and the end of both Detection and GroundTruth
|
||||
while len(transGt)>0 and specialCharacters.find(transGt[0])>-1:
|
||||
transGt = transGt[1:]
|
||||
|
||||
while len(transDet)>0 and specialCharacters.find(transDet[0])>-1:
|
||||
transDet = transDet[1:]
|
||||
|
||||
while len(transGt)>0 and specialCharacters.find(transGt[-1])>-1 :
|
||||
transGt = transGt[0:len(transGt)-1]
|
||||
|
||||
while len(transDet)>0 and specialCharacters.find(transDet[-1])>-1:
|
||||
transDet = transDet[0:len(transDet)-1]
|
||||
|
||||
return transGt == transDet
|
||||
|
||||
|
||||
def include_in_dictionary(transcription):
|
||||
"""
|
||||
Function used in Word Spotting that finds if the Ground Truth transcription meets the rules to enter into the dictionary. If not, the transcription will be cared as don't care
|
||||
"""
|
||||
#special case 's at final
|
||||
if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S":
|
||||
transcription = transcription[0:len(transcription)-2]
|
||||
|
||||
#hypens at init or final of the word
|
||||
transcription = transcription.strip('-');
|
||||
|
||||
specialCharacters = "'!?.:,*\"()·[]/";
|
||||
for character in specialCharacters:
|
||||
transcription = transcription.replace(character,' ')
|
||||
|
||||
transcription = transcription.strip()
|
||||
|
||||
if len(transcription) != len(transcription.replace(" ","")) :
|
||||
return False;
|
||||
|
||||
if len(transcription) < evaluationParams['MIN_LENGTH_CARE_WORD']:
|
||||
return False;
|
||||
|
||||
notAllowed = "×÷·";
|
||||
|
||||
range1 = [ ord(u'a'), ord(u'z') ]
|
||||
range2 = [ ord(u'A'), ord(u'Z') ]
|
||||
range3 = [ ord(u'À'), ord(u'ƿ') ]
|
||||
range4 = [ ord(u'DŽ'), ord(u'ɿ') ]
|
||||
range5 = [ ord(u'Ά'), ord(u'Ͽ') ]
|
||||
range6 = [ ord(u'-'), ord(u'-') ]
|
||||
|
||||
for char in transcription :
|
||||
charCode = ord(char)
|
||||
if(notAllowed.find(char) != -1):
|
||||
return False
|
||||
|
||||
valid = ( charCode>=range1[0] and charCode<=range1[1] ) or ( charCode>=range2[0] and charCode<=range2[1] ) or ( charCode>=range3[0] and charCode<=range3[1] ) or ( charCode>=range4[0] and charCode<=range4[1] ) or ( charCode>=range5[0] and charCode<=range5[1] ) or ( charCode>=range6[0] and charCode<=range6[1] )
|
||||
if valid == False:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def include_in_dictionary_transcription(transcription):
|
||||
"""
|
||||
Function applied to the Ground Truth transcriptions used in Word Spotting. It removes special characters or terminations
|
||||
"""
|
||||
#special case 's at final
|
||||
if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S":
|
||||
transcription = transcription[0:len(transcription)-2]
|
||||
|
||||
#hypens at init or final of the word
|
||||
transcription = transcription.strip('-');
|
||||
|
||||
specialCharacters = "'!?.:,*\"()·[]/";
|
||||
for character in specialCharacters:
|
||||
transcription = transcription.replace(character,' ')
|
||||
|
||||
transcription = transcription.strip()
|
||||
|
||||
return transcription
|
||||
|
||||
perSampleMetrics = {}
|
||||
|
||||
matchedSum = 0
|
||||
|
||||
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
|
||||
|
||||
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID'])
|
||||
subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True)
|
||||
|
||||
numGlobalCareGt = 0;
|
||||
numGlobalCareDet = 0;
|
||||
|
||||
arrGlobalConfidences = [];
|
||||
arrGlobalMatches = [];
|
||||
|
||||
for resFile in gt:
|
||||
|
||||
gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])
|
||||
if (gtFile is None) :
|
||||
raise Exception("The file %s is not UTF-8" %resFile)
|
||||
|
||||
recall = 0
|
||||
precision = 0
|
||||
hmean = 0
|
||||
detCorrect = 0
|
||||
iouMat = np.empty([1,1])
|
||||
gtPols = []
|
||||
detPols = []
|
||||
gtTrans = []
|
||||
detTrans = []
|
||||
gtPolPoints = []
|
||||
detPolPoints = []
|
||||
gtDontCarePolsNum = [] #Array of Ground Truth Polygons' keys marked as don't Care
|
||||
detDontCarePolsNum = [] #Array of Detected Polygons' matched with a don't Care GT
|
||||
detMatchedNums = []
|
||||
pairs = []
|
||||
|
||||
arrSampleConfidences = [];
|
||||
arrSampleMatch = [];
|
||||
sampleAP = 0;
|
||||
|
||||
evaluationLog = ""
|
||||
|
||||
pointsList,_,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,False)
|
||||
for n in range(len(pointsList)):
|
||||
points = pointsList[n]
|
||||
transcription = transcriptionsList[n]
|
||||
dontCare = transcription == "###"
|
||||
if evaluationParams['LTRB']:
|
||||
gtRect = Rectangle(*points)
|
||||
gtPol = rectangle_to_polygon(gtRect)
|
||||
else:
|
||||
gtPol = polygon_from_points(points)
|
||||
gtPols.append(gtPol)
|
||||
gtPolPoints.append(points)
|
||||
|
||||
#On word spotting we will filter some transcriptions with special characters
|
||||
if evaluationParams['WORD_SPOTTING'] :
|
||||
if dontCare == False :
|
||||
if include_in_dictionary(transcription) == False :
|
||||
dontCare = True
|
||||
else:
|
||||
transcription = include_in_dictionary_transcription(transcription)
|
||||
|
||||
gtTrans.append(transcription)
|
||||
if dontCare:
|
||||
gtDontCarePolsNum.append( len(gtPols)-1 )
|
||||
|
||||
evaluationLog += "GT polygons: " + str(len(gtPols)) + (" (" + str(len(gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum)>0 else "\n")
|
||||
|
||||
if resFile in subm:
|
||||
|
||||
detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile])
|
||||
|
||||
pointsList,confidencesList,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES'])
|
||||
|
||||
for n in range(len(pointsList)):
|
||||
points = pointsList[n]
|
||||
transcription = transcriptionsList[n]
|
||||
|
||||
if evaluationParams['LTRB']:
|
||||
detRect = Rectangle(*points)
|
||||
detPol = rectangle_to_polygon(detRect)
|
||||
else:
|
||||
detPol = polygon_from_points(points)
|
||||
detPols.append(detPol)
|
||||
detPolPoints.append(points)
|
||||
detTrans.append(transcription)
|
||||
|
||||
if len(gtDontCarePolsNum)>0 :
|
||||
for dontCarePol in gtDontCarePolsNum:
|
||||
dontCarePol = gtPols[dontCarePol]
|
||||
intersected_area = get_intersection(dontCarePol,detPol)
|
||||
pdDimensions = detPol.area()
|
||||
precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions
|
||||
if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT'] ):
|
||||
detDontCarePolsNum.append( len(detPols)-1 )
|
||||
break
|
||||
|
||||
evaluationLog += "DET polygons: " + str(len(detPols)) + (" (" + str(len(detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum)>0 else "\n")
|
||||
|
||||
if len(gtPols)>0 and len(detPols)>0:
|
||||
#Calculate IoU and precision matrixs
|
||||
outputShape=[len(gtPols),len(detPols)]
|
||||
iouMat = np.empty(outputShape)
|
||||
gtRectMat = np.zeros(len(gtPols),np.int8)
|
||||
detRectMat = np.zeros(len(detPols),np.int8)
|
||||
for gtNum in range(len(gtPols)):
|
||||
for detNum in range(len(detPols)):
|
||||
pG = gtPols[gtNum]
|
||||
pD = detPols[detNum]
|
||||
iouMat[gtNum,detNum] = get_intersection_over_union(pD,pG)
|
||||
|
||||
for gtNum in range(len(gtPols)):
|
||||
for detNum in range(len(detPols)):
|
||||
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum :
|
||||
if iouMat[gtNum,detNum]>evaluationParams['IOU_CONSTRAINT']:
|
||||
gtRectMat[gtNum] = 1
|
||||
detRectMat[detNum] = 1
|
||||
#detection matched only if transcription is equal
|
||||
if evaluationParams['WORD_SPOTTING']:
|
||||
correct = gtTrans[gtNum].upper() == detTrans[detNum].upper()
|
||||
else:
|
||||
correct = transcription_match(gtTrans[gtNum].upper(),detTrans[detNum].upper(),evaluationParams['SPECIAL_CHARACTERS'],evaluationParams['ONLY_REMOVE_FIRST_LAST_CHARACTER'])==True
|
||||
detCorrect += (1 if correct else 0)
|
||||
if correct:
|
||||
detMatchedNums.append(detNum)
|
||||
pairs.append({'gt':gtNum,'det':detNum,'correct':correct})
|
||||
evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + " trans. correct: " + str(correct) + "\n"
|
||||
|
||||
if evaluationParams['CONFIDENCES']:
|
||||
for detNum in range(len(detPols)):
|
||||
if detNum not in detDontCarePolsNum :
|
||||
#we exclude the don't care detections
|
||||
match = detNum in detMatchedNums
|
||||
|
||||
arrSampleConfidences.append(confidencesList[detNum])
|
||||
arrSampleMatch.append(match)
|
||||
|
||||
arrGlobalConfidences.append(confidencesList[detNum]);
|
||||
arrGlobalMatches.append(match);
|
||||
|
||||
numGtCare = (len(gtPols) - len(gtDontCarePolsNum))
|
||||
numDetCare = (len(detPols) - len(detDontCarePolsNum))
|
||||
if numGtCare == 0:
|
||||
recall = float(1)
|
||||
precision = float(0) if numDetCare >0 else float(1)
|
||||
sampleAP = precision
|
||||
else:
|
||||
recall = float(detCorrect) / numGtCare
|
||||
precision = 0 if numDetCare==0 else float(detCorrect) / numDetCare
|
||||
if evaluationParams['CONFIDENCES']:
|
||||
sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch, numGtCare )
|
||||
|
||||
hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)
|
||||
|
||||
matchedSum += detCorrect
|
||||
numGlobalCareGt += numGtCare
|
||||
numGlobalCareDet += numDetCare
|
||||
|
||||
perSampleMetrics[resFile] = {
|
||||
'precision':precision,
|
||||
'recall':recall,
|
||||
'hmean':hmean,
|
||||
'pairs':pairs,
|
||||
'AP':sampleAP,
|
||||
'iouMat':[] if len(detPols)>100 else iouMat.tolist(),
|
||||
'gtPolPoints':gtPolPoints,
|
||||
'detPolPoints':detPolPoints,
|
||||
'gtTrans':gtTrans,
|
||||
'detTrans':detTrans,
|
||||
'gtDontCare':gtDontCarePolsNum,
|
||||
'detDontCare':detDontCarePolsNum,
|
||||
'evaluationParams': evaluationParams,
|
||||
'evaluationLog': evaluationLog
|
||||
}
|
||||
|
||||
# Compute AP
|
||||
AP = 0
|
||||
if evaluationParams['CONFIDENCES']:
|
||||
AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt)
|
||||
|
||||
methodRecall = 0 if numGlobalCareGt == 0 else float(matchedSum)/numGlobalCareGt
|
||||
methodPrecision = 0 if numGlobalCareDet == 0 else float(matchedSum)/numGlobalCareDet
|
||||
methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
|
||||
|
||||
methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean, 'AP': AP }
|
||||
|
||||
resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics}
|
||||
|
||||
|
||||
return resDict;
|
||||
|
||||
|
||||
|
||||
if __name__=='__main__':
|
||||
|
||||
rrc_evaluation_funcs.main_evaluation(None,default_evaluation_params,validate_data,evaluate_method)
|
Binary file not shown.
BIN
OCRBench_v2/eval_scripts/spotting_eval/submit.zip
Normal file
BIN
OCRBench_v2/eval_scripts/spotting_eval/submit.zip
Normal file
Binary file not shown.
@@ -0,0 +1 @@
|
||||
0,0,1000,0,1000,1000,0,1000,CHEROKEE STREET BIKES
|
184
OCRBench_v2/eval_scripts/spotting_metric.py
Normal file
184
OCRBench_v2/eval_scripts/spotting_metric.py
Normal file
@@ -0,0 +1,184 @@
|
||||
import re
|
||||
import os
|
||||
import ast
|
||||
import ipdb
|
||||
import shutil
|
||||
import zipfile
|
||||
import subprocess
|
||||
import spotting_eval.rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs
|
||||
from spotting_eval.script import default_evaluation_params,validate_data,evaluate_method
|
||||
|
||||
|
||||
def extract_bounding_boxes_robust(predict_str):
|
||||
"""
|
||||
Extract coordinates and text content from the given prediction string,
|
||||
handling potential format issues.
|
||||
|
||||
Args:
|
||||
predict_str (str): Model prediction output as a string.
|
||||
|
||||
Returns:
|
||||
list: Extracted data in the format [[x1, y1, x2, y2, text_content], ...].
|
||||
Returns None if no valid data is extracted.
|
||||
"""
|
||||
results = []
|
||||
seen = set()
|
||||
|
||||
# try parsing with ast.literal_eval
|
||||
try:
|
||||
data = ast.literal_eval(predict_str)
|
||||
except Exception:
|
||||
data = None
|
||||
|
||||
if data is not None:
|
||||
if isinstance(data, (list, tuple)):
|
||||
for item in data:
|
||||
if isinstance(item, (list, tuple)) and len(item) >= 5:
|
||||
x1_str, y1_str, x2_str, y2_str = item[:4]
|
||||
text_content = item[4]
|
||||
|
||||
x1_str = str(x1_str).strip()
|
||||
y1_str = str(y1_str).strip()
|
||||
x2_str = str(x2_str).strip()
|
||||
y2_str = str(y2_str).strip()
|
||||
text_content = str(text_content).replace("\n", "").strip().strip('"').strip("'")
|
||||
|
||||
try:
|
||||
x1 = int(x1_str)
|
||||
y1 = int(y1_str)
|
||||
x2 = int(x2_str)
|
||||
y2 = int(y2_str)
|
||||
|
||||
if not (0 <= x1 <= 1000 and 0 <= y1 <= 1000 and 0 <= x2 <= 1000 and 0 <= y2 <= 1000):
|
||||
continue
|
||||
|
||||
key = (x1, y1, x2, y2, text_content)
|
||||
if key in seen:
|
||||
continue
|
||||
|
||||
seen.add(key)
|
||||
results.append([x1, y1, x2, y2, text_content])
|
||||
except ValueError:
|
||||
continue
|
||||
else:
|
||||
# try parsing with regular expression
|
||||
|
||||
list_content = predict_str
|
||||
items = re.findall(r'[\[\(]\s*([^\[\]\(\)]*?)\s*[\]\)]', list_content)
|
||||
|
||||
if not items:
|
||||
return None
|
||||
|
||||
for item in items:
|
||||
parts = item.split(',', 4)
|
||||
if len(parts) < 5:
|
||||
continue
|
||||
|
||||
x1_str, y1_str, x2_str, y2_str, text_content = parts
|
||||
|
||||
x1_str = x1_str.strip()
|
||||
y1_str = y1_str.strip()
|
||||
x2_str = x2_str.strip()
|
||||
y2_str = y2_str.strip()
|
||||
text_content = text_content.replace("\n", "").strip().strip('"').strip("'")
|
||||
|
||||
try:
|
||||
x1 = int(x1_str)
|
||||
y1 = int(y1_str)
|
||||
x2 = int(x2_str)
|
||||
y2 = int(y2_str)
|
||||
|
||||
if not (0 <= x1 <= 1000 and 0 <= y1 <= 1000 and 0 <= x2 <= 1000 and 0 <= y2 <= 1000):
|
||||
continue
|
||||
|
||||
key = (x1, y1, x2, y2, text_content)
|
||||
if key in seen:
|
||||
continue
|
||||
|
||||
seen.add(key)
|
||||
results.append([x1, y1, x2, y2, text_content])
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if not results:
|
||||
return None
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def zip_folder(source_folder, destination_zip):
|
||||
abs_source = os.path.abspath(source_folder)
|
||||
abs_destination = os.path.abspath(destination_zip)
|
||||
|
||||
with zipfile.ZipFile(abs_destination, 'w', zipfile.ZIP_DEFLATED) as zf:
|
||||
for root, _, files in os.walk(abs_source):
|
||||
for file in files:
|
||||
abs_file_path = os.path.join(root, file)
|
||||
|
||||
relative_path = os.path.relpath(abs_file_path, abs_source)
|
||||
zf.write(abs_file_path, relative_path)
|
||||
|
||||
|
||||
def spotting_evaluation(prediction_list, img_metas):
|
||||
score = 0
|
||||
|
||||
submit_path = "./eval_scripts/spotting_eval/submit"
|
||||
gt_path = "./eval_scripts/spotting_eval/gt"
|
||||
submit_zip_path = "./eval_scripts/spotting_eval/submit.zip"
|
||||
gt_zip_path = "./eval_scripts/spotting_eval/gt.zip"
|
||||
for file_path in [submit_path, gt_path, submit_zip_path, gt_zip_path]:
|
||||
if "zip" in file_path:
|
||||
if os.path.exists(file_path):
|
||||
os.remove(file_path)
|
||||
else:
|
||||
if os.path.exists(file_path):
|
||||
shutil.rmtree(file_path)
|
||||
os.makedirs(file_path)
|
||||
|
||||
res_submit_list = []
|
||||
for item in prediction_list:
|
||||
if len(item) != 5:
|
||||
ipdb.set_trace()
|
||||
x1, y1, x2, y2, rec = item
|
||||
if x1 >= x2 or y1 >= y2:
|
||||
continue
|
||||
|
||||
res_submit_list.append(",".join([str(x1),str(y1),str(x2),str(y1),str(x2),str(y2),str(x1),str(y2),rec]))
|
||||
|
||||
res_gt_list = []
|
||||
for bbox, rec in zip(img_metas["bbox"], img_metas["content"]):
|
||||
x_coords = bbox[0::2]
|
||||
y_coords = bbox[1::2]
|
||||
|
||||
x1, y1 = min(x_coords), min(y_coords)
|
||||
x2, y2 = max(x_coords), max(y_coords)
|
||||
|
||||
res_gt_list.append(",".join([str(x1),str(y1),str(x2),str(y1),str(x2),str(y2),str(x1),str(y2),rec]))
|
||||
|
||||
if len(res_submit_list) == 0 or len(res_gt_list) == 0:
|
||||
return 0
|
||||
|
||||
with open(os.path.join(submit_path,"res_img_0.txt"), "w") as f:
|
||||
for item in res_submit_list[:-1]:
|
||||
f.write(item + "\n")
|
||||
f.write(res_submit_list[-1])
|
||||
|
||||
with open(os.path.join(gt_path,"gt_img_0.txt"), "w") as f:
|
||||
for item in res_gt_list[:-1]:
|
||||
f.write(item + "\n")
|
||||
f.write(res_gt_list[-1])
|
||||
|
||||
zip_folder(submit_path, submit_zip_path)
|
||||
zip_folder(gt_path, gt_zip_path)
|
||||
|
||||
command = {
|
||||
'g': gt_zip_path,
|
||||
's': submit_zip_path,
|
||||
'o': './',
|
||||
'p': '{"IOU_CONSTRAINT":0.5}'
|
||||
}
|
||||
|
||||
# run rrc_evaluation_funcs
|
||||
result = rrc_evaluation_funcs.main_evaluation(command,default_evaluation_params,validate_data,evaluate_method)
|
||||
score = result["method"]["hmean"]
|
||||
return score
|
282
OCRBench_v2/eval_scripts/vqa_metric.py
Normal file
282
OCRBench_v2/eval_scripts/vqa_metric.py
Normal file
@@ -0,0 +1,282 @@
|
||||
import re
|
||||
import os
|
||||
import json
|
||||
import ipdb
|
||||
import math
|
||||
import numpy as np
|
||||
|
||||
|
||||
def levenshtein_distance(s1, s2):
|
||||
if len(s1) > len(s2):
|
||||
s1, s2 = s2, s1
|
||||
|
||||
distances = range(len(s1) + 1)
|
||||
for i2, c2 in enumerate(s2):
|
||||
distances_ = [i2+1]
|
||||
for i1, c1 in enumerate(s1):
|
||||
if c1 == c2:
|
||||
distances_.append(distances[i1])
|
||||
else:
|
||||
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
|
||||
distances = distances_
|
||||
return distances[-1]
|
||||
|
||||
|
||||
def vqa_evaluation(predict, answers):
|
||||
score = 0
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
if isinstance(answers[j], (int, float)):
|
||||
answers[j] = str(answers[j])
|
||||
try:
|
||||
answer = answers[j].lower().strip().replace("\n"," ")
|
||||
except:
|
||||
ipdb.set_trace()
|
||||
if isinstance(predict, (int, float)):
|
||||
predict = str(predict)
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if len(answer.split()) < 5:
|
||||
if answer in predict:
|
||||
score = 1
|
||||
else:
|
||||
dist = levenshtein_distance(predict, answer)
|
||||
length = max(len(predict), len(answer))
|
||||
ANLS_value = 0.0 if length == 0 else float(dist) / float(length)
|
||||
ANLS_value = 1 - ANLS_value
|
||||
|
||||
if ANLS_value >= 0.5 and ANLS_value > score:
|
||||
score = ANLS_value
|
||||
|
||||
else:
|
||||
answers = answers.lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if len(answers.split()) < 5:
|
||||
if answers in predict:
|
||||
score = 1
|
||||
else:
|
||||
dist = levenshtein_distance(predict, answers)
|
||||
length = max(len(predict), len(answers))
|
||||
ANLS_value = 0.0 if length == 0 else float(dist) / float(length)
|
||||
ANLS_value = 1 - ANLS_value
|
||||
|
||||
if ANLS_value >= 0.5 and ANLS_value > score:
|
||||
score = ANLS_value
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def cn_vqa_evaluation(predict, answers):
|
||||
score = 0
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
if isinstance(answers[j], (int, float)):
|
||||
answers[j] = str(answers[j])
|
||||
try:
|
||||
answer = answers[j].lower().strip().replace("\n"," ").replace(" ", "")
|
||||
except:
|
||||
ipdb.set_trace()
|
||||
if isinstance(predict, (int, float)):
|
||||
predict = str(predict)
|
||||
predict = predict.lower().strip().replace("\n"," ").replace(" ", "")
|
||||
if len(answer.split(",")) < 4:
|
||||
if answer in predict:
|
||||
score = 1
|
||||
else:
|
||||
dist = levenshtein_distance(predict, answer)
|
||||
length = max(len(predict), len(answer))
|
||||
ANLS_value = 0.0 if length == 0 else float(dist) / float(length)
|
||||
ANLS_value = 1 - ANLS_value
|
||||
|
||||
if ANLS_value >= 0.5 and ANLS_value > score:
|
||||
score = ANLS_value
|
||||
|
||||
else:
|
||||
answers = answers.lower().strip().replace("\n"," ").replace(" ", "")
|
||||
predict = predict.lower().strip().replace("\n"," ").replace(" ", "")
|
||||
if len(answers.split(",")) < 4:
|
||||
if answers in predict:
|
||||
score = 1
|
||||
else:
|
||||
dist = levenshtein_distance(predict, answers)
|
||||
length = max(len(predict), len(answers))
|
||||
ANLS_value = 0.0 if length == 0 else float(dist) / float(length)
|
||||
ANLS_value = 1 - ANLS_value
|
||||
|
||||
if ANLS_value >= 0.5 and ANLS_value > score:
|
||||
score = ANLS_value
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def vqa_evaluation_case_sensitive(predict, answers):
|
||||
score = 0
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
if isinstance(answers[j], (int, float)):
|
||||
answers[j] = str(answers[j])
|
||||
try:
|
||||
answer = answers[j].strip().replace("\n"," ")
|
||||
except:
|
||||
ipdb.set_trace()
|
||||
predict = predict.strip().replace("\n"," ")
|
||||
if len(answer.split()) < 5:
|
||||
if answer in predict:
|
||||
score = 1
|
||||
else:
|
||||
dist = levenshtein_distance(predict, answer)
|
||||
length = max(len(predict), len(answer))
|
||||
ANLS_value = 0.0 if length == 0 else float(dist) / float(length)
|
||||
ANLS_value = 1 - ANLS_value
|
||||
|
||||
if ANLS_value >= 0.5 and ANLS_value > score:
|
||||
score = ANLS_value
|
||||
|
||||
else:
|
||||
answers = answers.strip().replace("\n"," ")
|
||||
predict = predict.strip().replace("\n"," ")
|
||||
if len(answers.split()) < 5:
|
||||
if answers in predict:
|
||||
score = 1
|
||||
else:
|
||||
dist = levenshtein_distance(predict, answers)
|
||||
length = max(len(predict), len(answers))
|
||||
ANLS_value = 0.0 if length == 0 else float(dist) / float(length)
|
||||
ANLS_value = 1 - ANLS_value
|
||||
|
||||
if ANLS_value >= 0.5 and ANLS_value > score:
|
||||
score = ANLS_value
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def extract_first_number(string):
|
||||
match = re.search(r'\d+', string)
|
||||
if match:
|
||||
return int(match.group())
|
||||
return None
|
||||
|
||||
|
||||
def counting_evaluation(predict, answers, eval_method):
|
||||
score = 0
|
||||
|
||||
if isinstance(predict, str):
|
||||
predict_processed = predict.lower().strip().replace("\n", " ")
|
||||
elif math.isnan(predict):
|
||||
return 0
|
||||
else:
|
||||
predict_processed = int(predict)
|
||||
if type(answers)==list:
|
||||
temp_score = 0
|
||||
for j in range(len(answers)):
|
||||
if isinstance(answers[j], (int, float)):
|
||||
answers[j] = str(answers[j])
|
||||
answer = answers[j].lower().strip().replace("\n"," ")
|
||||
if eval_method == "exact match":
|
||||
if answer in predict:
|
||||
score = 1
|
||||
else:
|
||||
score = 0
|
||||
elif eval_method == "regression":
|
||||
predict_number = extract_first_number(predict_processed)
|
||||
if predict_number:
|
||||
|
||||
answer = int(answer)
|
||||
|
||||
if predict_number <= 0 or predict_number >= 2 * answer:
|
||||
score = 0
|
||||
else:
|
||||
iou = 1 - abs(predict_number - answer) / answer
|
||||
if iou > 0.5:
|
||||
score = iou
|
||||
else:
|
||||
score = 0
|
||||
else:
|
||||
score = 0
|
||||
if score > temp_score:
|
||||
temp_score = score
|
||||
score = temp_score
|
||||
|
||||
else:
|
||||
answer = answers.lower().strip().replace("\n"," ")
|
||||
predict = predict.lower().strip().replace("\n"," ")
|
||||
if eval_method == "exact match":
|
||||
if answer in predict:
|
||||
score = 1
|
||||
else:
|
||||
score = 0
|
||||
elif eval_method == "regression":
|
||||
predict = extract_first_number(predict)
|
||||
if predict:
|
||||
answer = int(answer)
|
||||
if predict <= 0 or predict >= 2 * answer:
|
||||
score = 0
|
||||
else:
|
||||
iou = 1 - abs(predict - answer) / answer
|
||||
|
||||
if iou > 0.5:
|
||||
score = iou
|
||||
else:
|
||||
score = 0
|
||||
else:
|
||||
score = 0
|
||||
return score
|
||||
|
||||
|
||||
def math_expression_evaluation(predict, answers):
|
||||
score = 0
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answer in predict:
|
||||
score = 1
|
||||
else:
|
||||
answers = answers.strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answers in predict:
|
||||
score = 1
|
||||
return score
|
||||
|
||||
|
||||
def remove_text_tags(latex_str):
|
||||
"""
|
||||
Removes LaTeX \text{...} tags while keeping their content.
|
||||
|
||||
:param latex_str: A string containing LaTeX expressions
|
||||
:return: The processed string with \text{...} tags removed
|
||||
"""
|
||||
|
||||
pattern = r'\\text\{([^{}]*)\}'
|
||||
|
||||
processed_str = re.sub(pattern, r'\1', latex_str)
|
||||
|
||||
return processed_str
|
||||
|
||||
|
||||
def cn_math_expression_evaluation(predict, answers):
|
||||
score = 0
|
||||
|
||||
assert len(answers) == 1
|
||||
answers = [remove_text_tags(answers[0])]
|
||||
predict = remove_text_tags(predict)
|
||||
|
||||
if type(answers)==list:
|
||||
for j in range(len(answers)):
|
||||
answer = answers[j].strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answer in predict:
|
||||
score = 1
|
||||
else:
|
||||
answers = answers.strip().replace("\n"," ").replace(" ","")
|
||||
predict = predict.strip().replace("\n"," ").replace(" ","")
|
||||
if answers in predict:
|
||||
score = 1
|
||||
return score
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_predict = "apple pie and banana"
|
||||
test_answers = ["apple", "banana pie", "apple pie and orange"]
|
||||
|
||||
vqa_score = vqa_evaluation(test_predict, test_answers)
|
||||
print(f"VQA evaluation score for predict '{test_predict}' and answers {test_answers}: {vqa_score}")
|
149267
OCRBench_v2/pred_folder/internvl2_5_26b.json
Normal file
149267
OCRBench_v2/pred_folder/internvl2_5_26b.json
Normal file
File diff suppressed because one or more lines are too long
12
OCRBench_v2/requirements.txt
Normal file
12
OCRBench_v2/requirements.txt
Normal file
@@ -0,0 +1,12 @@
|
||||
numpy
|
||||
distance
|
||||
apted
|
||||
lxml
|
||||
zss
|
||||
Levenshtein
|
||||
editdistance
|
||||
nltk
|
||||
jieba
|
||||
Polygon3
|
||||
tqdm
|
||||
ipdb
|
112
README.md
112
README.md
@@ -1,48 +1,88 @@
|
||||
# On the Hidden Mystery of OCR in Large Multimodal Models
|
||||
<img src="./images/all_data.png" width="96%" height="96%">
|
||||
**This is the repository of the [OCRBench](./OCRBench/README.md) & [OCRBench v2](./OCRBench_v2/README.md).**
|
||||
|
||||
> Large models have recently played a dominant role in natural language processing and multimodal vision-language learning. However, their effectiveness in text-related visual tasks remains relatively unexplored. In this paper, we conducted a comprehensive evaluation of Large Multimodal Models, such as GPT4V and Gemini, in various text-related visual tasks including Text Recognition, Scene Text-Centric Visual Question Answering (VQA), Document-Oriented VQA, Key Information Extraction (KIE), and Handwritten Mathematical Expression Recognition (HMER). To facilitate the assessment of Optical Character Recognition (OCR) capabilities in Large Multimodal Models, we propose OCRBench, a comprehensive evaluation benchmark. Our study encompasses 29 datasets, making it the most comprehensive OCR evaluation benchmark available. Furthermore, our study reveals both the strengths and weaknesses of these models, particularly in handling multilingual text, handwritten text, non-semantic text, and mathematical expression recognition. Most importantly, the baseline results showcased in this study could provide a foundational framework for the conception and assessment of innovative strategies targeted at enhancing zero-shot multimodal techniques.
|
||||
<div align="center" xmlns="http://www.w3.org/1999/html">
|
||||
<h1 align="center">
|
||||
OCRBench v2: An Improved Benchmark for Evaluating Large Multimodal Models on Visual Text Localization and Reasoning
|
||||
</h1>
|
||||
|
||||
[](https://99franklin.github.io/ocrbench_v2/)
|
||||
[](https://arxiv.org/abs/2501.00321)
|
||||
[](https://huggingface.co/datasets/ling99/OCRBench_v2)
|
||||
[](https://github.com/Yuliang-Liu/MultimodalOCR/issues?q=is%3Aopen+is%3Aissue)
|
||||
[](https://github.com/Yuliang-Liu/MultimodalOCR/issues?q=is%3Aissue+is%3Aclosed)
|
||||
</div>
|
||||
|
||||
**[Project Page [This Page]](https://github.com/Yuliang-Liu/MultimodalOCR)** | **[Paper](https://arxiv.org/abs/2305.07895)** | **[OCRBench Leaderboard](http://27.18.7.167:7682/)** |
|
||||
|
||||
# Data
|
||||
> **OCRBench v2: An Improved Benchmark for Evaluating Large Multimodal Models on Visual Text Localization and Reasoning**<br>
|
||||
> Ling Fu, Zhebin Kuang, Jiajun Song, Mingxin Huang, Biao Yang, Yuzhe Li, Linghao Zhu, Qidi Luo, Xinyu Wang, Hao Lu, Zhang Li, Guozhi Tang, Bin Shan, Chunhui Lin, Qi Liu, Binghong Wu, Hao Feng, Hao Liu, Can Huang, Jingqun Tang, Wei Chen, Lianwen Jin, Yuliang Liu, Xiang Bai <br>
|
||||
[](https://arxiv.org/abs/2501.00321)
|
||||
[](https://huggingface.co/datasets/ling99/OCRBench_v2)
|
||||
[](https://drive.google.com/file/d/1Hk1TMu--7nr5vJ7iaNwMQZ_Iw9W_KI3C/view?usp=sharing)
|
||||
|
||||
|
||||
**OCRBench v2** is a large-scale bilingual text-centric benchmark with currently the most comprehensive set of tasks (4× more tasks than the previous multi-scene benchmark OCRBench), the widest coverage of scenarios (31 diverse scenarios including street scene, receipt, formula, diagram, and so on), and thorough evaluation metrics, with a total of 10, 000 human-verified question-answering pairs and a high proportion of difficult samples. More details can be found in [OCRBench v2 README](./OCRBench_v2/README.md).
|
||||
|
||||
<p align="center">
|
||||
<img src="https://v1.ax1x.com/2024/12/30/7VhCnP.jpg" width="88%" height="80%">
|
||||
<p>
|
||||
|
||||
> **OCRBench: On the Hidden Mystery of OCR in Large Multimodal Models**<br>
|
||||
> Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xucheng Yin, Cheng-lin Liu, Lianwen Jin, Xiang Bai <br>
|
||||
[](https://arxiv.org/abs/2305.07895)
|
||||
[](https://github.com/qywh2023/OCRbench/blob/main/OCRBench/README.md)
|
||||
|
||||
|
||||
**OCRBench** is a comprehensive evaluation benchmark designed to assess the OCR capabilities of Large Multimodal Models. It comprises five components: Text Recognition, SceneText-Centric VQA, Document-Oriented VQA, Key Information Extraction, and Handwritten Mathematical Expression Recognition. The benchmark includes 1000 question-answer pairs, and all the answers undergo manual verification and correction to ensure a more precise evaluation. More details can be found in [OCRBench README](./OCRBench/README.md).
|
||||
|
||||
<p align="center">
|
||||
<img src="./OCRBench/images/all_data.png" width="88%" height="80%">
|
||||
</p>
|
||||
|
||||
# News
|
||||
* ```2025.6.21``` 🚀 We realese the private dataset of OCRBench v2 and will update [Leaderboard](https://99franklin.github.io/ocrbench_v2/) every quarter.
|
||||
* ```2024.12.31``` 🚀 [OCRBench v2](./OCRBench_v2/README.md) is released.
|
||||
* ```2024.12.11``` 🚀 OCRBench has been accepted by [Science China Information Sciences](https://link.springer.com/article/10.1007/s11432-024-4235-6).
|
||||
* ```2024.5.19 ``` 🚀 We realese [DTVQA](https://github.com/ShuoZhang2003/DT-VQA), to explore the Capabilities of Large Multimodal Models on Dense Text.
|
||||
* ```2024.5.01 ``` 🚀 Thanks to [SWHL](https://github.com/Yuliang-Liu/MultimodalOCR/issues/29) for releasing [ChineseOCRBench](https://huggingface.co/datasets/SWHL/ChineseOCRBench).
|
||||
* ```2024.3.26 ``` 🚀 OCRBench is now supported in [lmms-eval](https://github.com/EvolvingLMMs-Lab/lmms-eval).
|
||||
* ```2024.3.12 ``` 🚀 We plan to construct OCRBench v2 to include more ocr tasks and data. Any contribution will be appreciated.
|
||||
* ```2024.2.25 ``` 🚀 OCRBench is now supported in [VLMEvalKit](https://github.com/open-compass/VLMEvalKit).
|
||||
|
||||
|
||||
# Other Related Multilingual Datasets
|
||||
| Data | Link | Description |
|
||||
| --- | --- | --- |
|
||||
| Full Test Json | [Full Test](./OCRBench/FullTest.json) | This file contains the test data used in Table 1 and Table 2 from [Paper](https://arxiv.org/abs/2305.07895). |
|
||||
| OCRBench Json | [OCRBench](./OCRBench/OCRBench.json) | This file contains the test data in OCRBench used in Table3 from [Paper](https://arxiv.org/abs/2305.07895). |
|
||||
| All Test Images |[All Images](https://drive.google.com/file/d/1U5AtLoJ7FrJe9yfcbssfeLmlKb7dTosc/view?usp=drive_link) | This file contains all the testing images used in [Paper](https://arxiv.org/abs/2305.07895), including OCRBench Images.|
|
||||
| OCRBench Images | [OCRBench Images](https://drive.google.com/file/d/1a3VRJx3V3SdOmPr7499Ky0Ug8AwqGUHO/view?usp=drive_link) | This file only contains the images used in OCRBench. |
|
||||
| Test Results | [Test Results](https://drive.google.com/drive/folders/15XlHCuNTavI1Ihqm4G7u3J34BHpkaqyE?usp=drive_link) | This file file contains the result files for the test models. |
|
||||
|
||||
# OCRBench
|
||||
|
||||
OCRBench is a comprehensive evaluation benchmark designed to assess the OCR capabilities of Large Multimodal Models. It comprises five components: Text Recognition, SceneText-Centric VQA, Document-Oriented VQA, Key Information Extraction, and Handwritten Mathematical Expression Recognition. The benchmark includes 1000 question-answer pairs, and all the answers undergo manual verification and correction to ensure a more precise evaluation.
|
||||
|
||||
You can find the results of Large Multimodal Models in **[OCRBench Leaderboard](http://27.18.7.167:7682/)**, if you would like to include your model in the OCRBench leaderboard, please follow the evaluation instructions provided below and feel free to contact us via email at zhangli123@hust.edu.cn. We will update the leaderboard in time.
|
||||
|
||||
<img src="./images/GPT4V_Gemini.png" width="96%" height="96%">
|
||||
|
||||
# Evaluation
|
||||
The test code for evaluating models in the paper can be found in [scripts](./scripts). If you want to evaluate other models, please edit the "TODO" things in [example](./example.py).
|
||||
|
||||
Example evaluation scripts:
|
||||
```python
|
||||
|
||||
python ./scripts/monkey.py --image_folder ./data --OCRBench_file ./OCRBench/OCRBench.json --save_name Monkey_OCRBench --num_workers GPU_Nums # Test on OCRBench
|
||||
python ./scripts/monkey.py --image_folder ./data --OCRBench_file ./OCRBench/FullTest.json --save_name Monkey_FullTest --num_workers GPU_Nums # Full Test
|
||||
|
||||
```
|
||||
| EST-VQA Dataset (CVPR 2020, English and Chinese) | [Link](https://github.com/xinke-wang/EST-VQA) | On the General Value of Evidence, and Bilingual Scene-Text Visual Question Answering. |
|
||||
| Swahili Dataset (ICDAR 2024) | [Link](https://arxiv.org/abs/2405.11437) | The First Swahili Language Scene Text Detection and Recognition Dataset. |
|
||||
| Urdu Dataset (ICDAR 2024) | [Link](https://arxiv.org/abs/2405.12533) | Dataset and Benchmark for Urdu Natural Scenes Text Detection, Recognition and Visual Question Answering. |
|
||||
| MTVQA (9 languages) | [Link](https://arxiv.org/abs/2405.11985) | MTVQA: Benchmarking Multilingual Text-Centric Visual Question Answering. |
|
||||
| EVOBC (Oracle Bone Script Evolution Dataset) | [Link](https://arxiv.org/abs/2401.12467) | We systematically collected ancient characters from authoritative texts and websites spanning six historical stages. |
|
||||
| HUST-OBC (Oracle Bone Script Character Dataset) | [Link](https://arxiv.org/abs/2401.15365) | For deciphering oracle bone script characters. |
|
||||
|
||||
# Citation
|
||||
If you wish to refer to the baseline results published here, please use the following BibTeX entries:
|
||||
```BibTeX
|
||||
@misc{liu2024hidden,
|
||||
title={On the Hidden Mystery of OCR in Large Multimodal Models},
|
||||
author={Yuliang Liu and Zhang Li and Biao Yang and Chunyuan Li and Xucheng Yin and Cheng-lin Liu and Lianwen Jin and Xiang Bai},
|
||||
year={2024},
|
||||
eprint={2305.07895},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={cs.CV}
|
||||
@article{Liu_2024,
|
||||
title={OCRBench: on the hidden mystery of OCR in large multimodal models},
|
||||
volume={67},
|
||||
ISSN={1869-1919},
|
||||
url={http://dx.doi.org/10.1007/s11432-024-4235-6},
|
||||
DOI={10.1007/s11432-024-4235-6},
|
||||
number={12},
|
||||
journal={Science China Information Sciences},
|
||||
publisher={Springer Science and Business Media LLC},
|
||||
author={Liu, Yuliang and Li, Zhang and Huang, Mingxin and Yang, Biao and Yu, Wenwen and Li, Chunyuan and Yin, Xu-Cheng and Liu, Cheng-Lin and Jin, Lianwen and Bai, Xiang},
|
||||
year={2024},
|
||||
month=dec }
|
||||
|
||||
@misc{fu2024ocrbenchv2improvedbenchmark,
|
||||
title={OCRBench v2: An Improved Benchmark for Evaluating Large Multimodal Models on Visual Text Localization and Reasoning},
|
||||
author={Ling Fu and Biao Yang and Zhebin Kuang and Jiajun Song and Yuzhe Li and Linghao Zhu and Qidi Luo and Xinyu Wang and Hao Lu and Mingxin Huang and Zhang Li and Guozhi Tang and Bin Shan and Chunhui Lin and Qi Liu and Binghong Wu and Hao Feng and Hao Liu and Can Huang and Jingqun Tang and Wei Chen and Lianwen Jin and Yuliang Liu and Xiang Bai},
|
||||
year={2024},
|
||||
eprint={2501.00321},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={cs.CV},
|
||||
url={https://arxiv.org/abs/2501.00321},
|
||||
}
|
||||
```
|
||||
|
||||
|
21
pyproject.toml
Normal file
21
pyproject.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[project]
|
||||
name = "multimodalocr"
|
||||
version = "0.1.0"
|
||||
description = "Add your description here"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.13"
|
||||
dependencies = [
|
||||
"apted>=1.0.3",
|
||||
"distance>=0.1.3",
|
||||
"editdistance>=0.8.1",
|
||||
"gdown>=5.2.0",
|
||||
"ipdb>=0.13.13",
|
||||
"jieba>=0.42.1",
|
||||
"levenshtein>=0.27.1",
|
||||
"lxml>=6.0.1",
|
||||
"nltk>=3.9.1",
|
||||
"numpy>=2.3.2",
|
||||
"polygon3>=3.0.9.1",
|
||||
"tqdm>=4.67.1",
|
||||
"zss>=1.2.0",
|
||||
]
|
685
uv.lock
generated
Normal file
685
uv.lock
generated
Normal file
@@ -0,0 +1,685 @@
|
||||
version = 1
|
||||
revision = 3
|
||||
requires-python = ">=3.13"
|
||||
|
||||
[[package]]
|
||||
name = "apted"
|
||||
version = "1.0.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e0/29/3a42b2fb26272a464a9fbf455928a7e4255efa2e6f56679e9c0adaaf798a/apted-1.0.3.tar.gz", hash = "sha256:befa5181e2d4457fa88e54995a82604ee048bb2fbc781ea97d8e1856b4715ce9", size = 24547, upload-time = "2017-11-08T13:03:23.294Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b9/71/c2bcf92376d3ae65d57111d33f577aca68d343e1b7b1914a3767bfbac18e/apted-1.0.3-py3-none-any.whl", hash = "sha256:74193369d023649d335269e67c4df07f922959e5ac2597de1b79af4e694150e8", size = 40566, upload-time = "2017-11-08T13:03:21.831Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "asttokens"
|
||||
version = "3.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978, upload-time = "2024-11-30T04:30:14.439Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918, upload-time = "2024-11-30T04:30:10.946Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "beautifulsoup4"
|
||||
version = "4.13.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "soupsieve" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/85/2e/3e5079847e653b1f6dc647aa24549d68c6addb4c595cc0d902d1b19308ad/beautifulsoup4-4.13.5.tar.gz", hash = "sha256:5e70131382930e7c3de33450a2f54a63d5e4b19386eab43a5b34d594268f3695", size = 622954, upload-time = "2025-08-24T14:06:13.168Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/04/eb/f4151e0c7377a6e08a38108609ba5cede57986802757848688aeedd1b9e8/beautifulsoup4-4.13.5-py3-none-any.whl", hash = "sha256:642085eaa22233aceadff9c69651bc51e8bf3f874fb6d7104ece2beb24b47c4a", size = 105113, upload-time = "2025-08-24T14:06:14.884Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "certifi"
|
||||
version = "2025.8.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/dc/67/960ebe6bf230a96cda2e0abcf73af550ec4f090005363542f0765df162e0/certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407", size = 162386, upload-time = "2025-08-03T03:07:47.08Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/48/1549795ba7742c948d2ad169c1c8cdbae65bc450d6cd753d124b17c8cd32/certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5", size = 161216, upload-time = "2025-08-03T03:07:45.777Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "charset-normalizer"
|
||||
version = "3.4.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/83/2d/5fd176ceb9b2fc619e63405525573493ca23441330fcdaee6bef9460e924/charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14", size = 122371, upload-time = "2025-08-09T07:57:28.46Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/65/ca/2135ac97709b400c7654b4b764daf5c5567c2da45a30cdd20f9eefe2d658/charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe", size = 205326, upload-time = "2025-08-09T07:56:24.721Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/11/98a04c3c97dd34e49c7d247083af03645ca3730809a5509443f3c37f7c99/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8", size = 146008, upload-time = "2025-08-09T07:56:26.004Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/f5/4659a4cb3c4ec146bec80c32d8bb16033752574c20b1252ee842a95d1a1e/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9", size = 159196, upload-time = "2025-08-09T07:56:27.25Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/9e/f552f7a00611f168b9a5865a1414179b2c6de8235a4fa40189f6f79a1753/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31", size = 156819, upload-time = "2025-08-09T07:56:28.515Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/95/42aa2156235cbc8fa61208aded06ef46111c4d3f0de233107b3f38631803/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f", size = 151350, upload-time = "2025-08-09T07:56:29.716Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c2/a9/3865b02c56f300a6f94fc631ef54f0a8a29da74fb45a773dfd3dcd380af7/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927", size = 148644, upload-time = "2025-08-09T07:56:30.984Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/77/d9/cbcf1a2a5c7d7856f11e7ac2d782aec12bdfea60d104e60e0aa1c97849dc/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9", size = 160468, upload-time = "2025-08-09T07:56:32.252Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/42/6f45efee8697b89fda4d50580f292b8f7f9306cb2971d4b53f8914e4d890/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5", size = 158187, upload-time = "2025-08-09T07:56:33.481Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/99/f1c3bdcfaa9c45b3ce96f70b14f070411366fa19549c1d4832c935d8e2c3/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc", size = 152699, upload-time = "2025-08-09T07:56:34.739Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/ad/b0081f2f99a4b194bcbb1934ef3b12aa4d9702ced80a37026b7607c72e58/charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce", size = 99580, upload-time = "2025-08-09T07:56:35.981Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/8f/ae790790c7b64f925e5c953b924aaa42a243fb778fed9e41f147b2a5715a/charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef", size = 107366, upload-time = "2025-08-09T07:56:37.339Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/91/b5a06ad970ddc7a0e513112d40113e834638f4ca1120eb727a249fb2715e/charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15", size = 204342, upload-time = "2025-08-09T07:56:38.687Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/ec/1edc30a377f0a02689342f214455c3f6c2fbedd896a1d2f856c002fc3062/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db", size = 145995, upload-time = "2025-08-09T07:56:40.048Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/e5/5e67ab85e6d22b04641acb5399c8684f4d37caf7558a53859f0283a650e9/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d", size = 158640, upload-time = "2025-08-09T07:56:41.311Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/e5/38421987f6c697ee3722981289d554957c4be652f963d71c5e46a262e135/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096", size = 156636, upload-time = "2025-08-09T07:56:43.195Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/e4/5a075de8daa3ec0745a9a3b54467e0c2967daaaf2cec04c845f73493e9a1/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa", size = 150939, upload-time = "2025-08-09T07:56:44.819Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/f7/3611b32318b30974131db62b4043f335861d4d9b49adc6d57c1149cc49d4/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049", size = 148580, upload-time = "2025-08-09T07:56:46.684Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/61/19b36f4bd67f2793ab6a99b979b4e4f3d8fc754cbdffb805335df4337126/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0", size = 159870, upload-time = "2025-08-09T07:56:47.941Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/57/84722eefdd338c04cf3030ada66889298eaedf3e7a30a624201e0cbe424a/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92", size = 157797, upload-time = "2025-08-09T07:56:49.756Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/72/2a/aff5dd112b2f14bcc3462c312dce5445806bfc8ab3a7328555da95330e4b/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16", size = 152224, upload-time = "2025-08-09T07:56:51.369Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/8c/9839225320046ed279c6e839d51f028342eb77c91c89b8ef2549f951f3ec/charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce", size = 100086, upload-time = "2025-08-09T07:56:52.722Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/7a/36fbcf646e41f710ce0a563c1c9a343c6edf9be80786edeb15b6f62e17db/charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c", size = 107400, upload-time = "2025-08-09T07:56:55.172Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/1f/f041989e93b001bc4e44bb1669ccdcf54d3f00e628229a85b08d330615c5/charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a", size = 53175, upload-time = "2025-08-09T07:57:26.864Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "click"
|
||||
version = "8.2.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "colorama"
|
||||
version = "0.4.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "decorator"
|
||||
version = "5.2.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "distance"
|
||||
version = "0.1.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/5c/1a/883e47df323437aefa0d0a92ccfb38895d9416bd0b56262c2e46a47767b8/Distance-0.1.3.tar.gz", hash = "sha256:60807584f5b6003f5c521aa73f39f51f631de3be5cccc5a1d67166fcbf0d4551", size = 180271, upload-time = "2013-11-21T00:14:34.152Z" }
|
||||
|
||||
[[package]]
|
||||
name = "editdistance"
|
||||
version = "0.8.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d5/18/9f4f975ca87a390832b1c22478f3702fcdf739f83211e24d054b7551270d/editdistance-0.8.1.tar.gz", hash = "sha256:d1cdf80a5d5014b0c9126a69a42ce55a457b457f6986ff69ca98e4fe4d2d8fed", size = 50006, upload-time = "2024-02-10T07:44:53.914Z" }
|
||||
|
||||
[[package]]
|
||||
name = "executing"
|
||||
version = "2.2.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/91/50/a9d80c47ff289c611ff12e63f7c5d13942c65d68125160cefd768c73e6e4/executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755", size = 978693, upload-time = "2025-01-22T15:41:29.403Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702, upload-time = "2025-01-22T15:41:25.929Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "filelock"
|
||||
version = "3.19.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/40/bb/0ab3e58d22305b6f5440629d20683af28959bf793d98d11950e305c1c326/filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58", size = 17687, upload-time = "2025-08-14T16:56:03.016Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/42/14/42b2651a2f46b022ccd948bca9f2d5af0fd8929c4eec235b8d6d844fbe67/filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d", size = 15988, upload-time = "2025-08-14T16:56:01.633Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gdown"
|
||||
version = "5.2.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "beautifulsoup4" },
|
||||
{ name = "filelock" },
|
||||
{ name = "requests", extra = ["socks"] },
|
||||
{ name = "tqdm" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/09/6a/37e6b70c5bda3161e40265861e63b64a86bfc6ca6a8f1c35328a675c84fd/gdown-5.2.0.tar.gz", hash = "sha256:2145165062d85520a3cd98b356c9ed522c5e7984d408535409fd46f94defc787", size = 284647, upload-time = "2024-05-12T06:45:12.725Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl", hash = "sha256:33083832d82b1101bdd0e9df3edd0fbc0e1c5f14c9d8c38d2a35bf1683b526d6", size = 18235, upload-time = "2024-05-12T06:45:10.017Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "3.10"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ipdb"
|
||||
version = "0.13.13"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "decorator" },
|
||||
{ name = "ipython" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3d/1b/7e07e7b752017f7693a0f4d41c13e5ca29ce8cbcfdcc1fd6c4ad8c0a27a0/ipdb-0.13.13.tar.gz", hash = "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726", size = 17042, upload-time = "2023-03-09T15:40:57.487Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/4c/b075da0092003d9a55cf2ecc1cae9384a1ca4f650d51b00fc59875fe76f6/ipdb-0.13.13-py3-none-any.whl", hash = "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4", size = 12130, upload-time = "2023-03-09T15:40:55.021Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ipython"
|
||||
version = "9.5.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||
{ name = "decorator" },
|
||||
{ name = "ipython-pygments-lexers" },
|
||||
{ name = "jedi" },
|
||||
{ name = "matplotlib-inline" },
|
||||
{ name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" },
|
||||
{ name = "prompt-toolkit" },
|
||||
{ name = "pygments" },
|
||||
{ name = "stack-data" },
|
||||
{ name = "traitlets" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/6e/71/a86262bf5a68bf211bcc71fe302af7e05f18a2852fdc610a854d20d085e6/ipython-9.5.0.tar.gz", hash = "sha256:129c44b941fe6d9b82d36fc7a7c18127ddb1d6f02f78f867f402e2e3adde3113", size = 4389137, upload-time = "2025-08-29T12:15:21.519Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/08/2a/5628a99d04acb2d2f2e749cdf4ea571d2575e898df0528a090948018b726/ipython-9.5.0-py3-none-any.whl", hash = "sha256:88369ffa1d5817d609120daa523a6da06d02518e582347c29f8451732a9c5e72", size = 612426, upload-time = "2025-08-29T12:15:18.866Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ipython-pygments-lexers"
|
||||
version = "1.1.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pygments" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ef/4c/5dd1d8af08107f88c7f741ead7a40854b8ac24ddf9ae850afbcf698aa552/ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81", size = 8393, upload-time = "2025-01-17T11:24:34.505Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074, upload-time = "2025-01-17T11:24:33.271Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jedi"
|
||||
version = "0.19.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "parso" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287, upload-time = "2024-11-11T01:41:42.873Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278, upload-time = "2024-11-11T01:41:40.175Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jieba"
|
||||
version = "0.42.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c6/cb/18eeb235f833b726522d7ebed54f2278ce28ba9438e3135ab0278d9792a2/jieba-0.42.1.tar.gz", hash = "sha256:055ca12f62674fafed09427f176506079bc135638a14e23e25be909131928db2", size = 19214172, upload-time = "2020-01-20T14:27:23.5Z" }
|
||||
|
||||
[[package]]
|
||||
name = "joblib"
|
||||
version = "1.5.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e8/5d/447af5ea094b9e4c4054f82e223ada074c552335b9b4b2d14bd9b35a67c4/joblib-1.5.2.tar.gz", hash = "sha256:3faa5c39054b2f03ca547da9b2f52fde67c06240c31853f306aea97f13647b55", size = 331077, upload-time = "2025-08-27T12:15:46.575Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/e8/685f47e0d754320684db4425a0967f7d3fa70126bffd76110b7009a0090f/joblib-1.5.2-py3-none-any.whl", hash = "sha256:4e1f0bdbb987e6d843c70cf43714cb276623def372df3c22fe5266b2670bc241", size = 308396, upload-time = "2025-08-27T12:15:45.188Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "levenshtein"
|
||||
version = "0.27.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "rapidfuzz" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/7e/b3/b5f8011483ba9083a0bc74c4d58705e9cf465fbe55c948a1b1357d0a2aa8/levenshtein-0.27.1.tar.gz", hash = "sha256:3e18b73564cfc846eec94dd13fab6cb006b5d2e0cc56bad1fd7d5585881302e3", size = 382571, upload-time = "2025-03-02T19:44:56.148Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c6/d3/30485fb9aee848542ee2d01aba85106a7f5da982ebeeffc619f70ea593c7/levenshtein-0.27.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ab00c2cae2889166afb7e1af64af2d4e8c1b126f3902d13ef3740df00e54032d", size = 173397, upload-time = "2025-03-02T19:43:42.553Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/df/9f/40a81c54cfe74b22737710e654bd25ad934a675f737b60b24f84099540e0/levenshtein-0.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c27e00bc7527e282f7c437817081df8da4eb7054e7ef9055b851fa3947896560", size = 155787, upload-time = "2025-03-02T19:43:43.864Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/df/98/915f4e24e21982b6eca2c0203546c160f4a83853fa6a2ac6e2b208a54afc/levenshtein-0.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5b07de42bfc051136cc8e7f1e7ba2cb73666aa0429930f4218efabfdc5837ad", size = 150013, upload-time = "2025-03-02T19:43:45.134Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/80/93/9b0773107580416b9de14bf6a12bd1dd2b2964f7a9f6fb0e40723e1f0572/levenshtein-0.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb11ad3c9dae3063405aa50d9c96923722ab17bb606c776b6817d70b51fd7e07", size = 181234, upload-time = "2025-03-02T19:43:47.125Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/91/b1/3cd4f69af32d40de14808142cc743af3a1b737b25571bd5e8d2f46b885e0/levenshtein-0.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c5986fb46cb0c063305fd45b0a79924abf2959a6d984bbac2b511d3ab259f3f", size = 183697, upload-time = "2025-03-02T19:43:48.412Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bb/65/b691e502c6463f6965b7e0d8d84224c188aa35b53fbc85853c72a0e436c9/levenshtein-0.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75191e469269ddef2859bc64c4a8cfd6c9e063302766b5cb7e1e67f38cc7051a", size = 159964, upload-time = "2025-03-02T19:43:49.704Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/c0/89a922a47306a475fb6d8f2ab08668f143d3dc7dea4c39d09e46746e031c/levenshtein-0.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51b3a7b2266933babc04e4d9821a495142eebd6ef709f90e24bc532b52b81385", size = 244759, upload-time = "2025-03-02T19:43:51.733Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/93/30283c6e69a6556b02e0507c88535df9613179f7b44bc49cdb4bc5e889a3/levenshtein-0.27.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbac509794afc3e2a9e73284c9e3d0aab5b1d928643f42b172969c3eefa1f2a3", size = 1115955, upload-time = "2025-03-02T19:43:53.739Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/cf/7e19ea2c23671db02fbbe5a5a4aeafd1d471ee573a6251ae17008458c434/levenshtein-0.27.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8d68714785178347ecb272b94e85cbf7e638165895c4dd17ab57e7742d8872ec", size = 1400921, upload-time = "2025-03-02T19:43:55.146Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/f7/fb42bfe2f3b46ef91f0fc6fa217b44dbeb4ef8c72a9c1917bbbe1cafc0f8/levenshtein-0.27.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8ee74ee31a5ab8f61cd6c6c6e9ade4488dde1285f3c12207afc018393c9b8d14", size = 1225037, upload-time = "2025-03-02T19:43:56.7Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/25/c86f8874ac7b0632b172d0d1622ed3ab9608a7f8fe85d41d632b16f5948e/levenshtein-0.27.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f2441b6365453ec89640b85344afd3d602b0d9972840b693508074c613486ce7", size = 1420601, upload-time = "2025-03-02T19:43:58.383Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/fe/ebfbaadcd90ea7dfde987ae95b5c11dc27c2c5d55a2c4ccbbe4e18a8af7b/levenshtein-0.27.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a9be39640a46d8a0f9be729e641651d16a62b2c07d3f4468c36e1cc66b0183b9", size = 1188241, upload-time = "2025-03-02T19:44:00.976Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/1a/aa6b07316e10781a6c5a5a8308f9bdc22213dc3911b959daa6d7ff654fc6/levenshtein-0.27.1-cp313-cp313-win32.whl", hash = "sha256:a520af67d976761eb6580e7c026a07eb8f74f910f17ce60e98d6e492a1f126c7", size = 88103, upload-time = "2025-03-02T19:44:02.42Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9d/7b/9bbfd417f80f1047a28d0ea56a9b38b9853ba913b84dd5998785c5f98541/levenshtein-0.27.1-cp313-cp313-win_amd64.whl", hash = "sha256:7dd60aa49c2d8d23e0ef6452c8329029f5d092f386a177e3385d315cabb78f2a", size = 100579, upload-time = "2025-03-02T19:44:04.142Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/01/5f3ff775db7340aa378b250e2a31e6b4b038809a24ff0a3636ef20c7ca31/levenshtein-0.27.1-cp313-cp313-win_arm64.whl", hash = "sha256:149cd4f0baf5884ac5df625b7b0d281721b15de00f447080e38f5188106e1167", size = 87933, upload-time = "2025-03-02T19:44:05.364Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lxml"
|
||||
version = "6.0.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8f/bd/f9d01fd4132d81c6f43ab01983caea69ec9614b913c290a26738431a015d/lxml-6.0.1.tar.gz", hash = "sha256:2b3a882ebf27dd026df3801a87cf49ff791336e0f94b0fad195db77e01240690", size = 4070214, upload-time = "2025-08-22T10:37:53.525Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/43/c4/cd757eeec4548e6652eff50b944079d18ce5f8182d2b2cf514e125e8fbcb/lxml-6.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:485eda5d81bb7358db96a83546949c5fe7474bec6c68ef3fa1fb61a584b00eea", size = 8405139, upload-time = "2025-08-22T10:33:34.09Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/99/0290bb86a7403893f5e9658490c705fcea103b9191f2039752b071b4ef07/lxml-6.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d12160adea318ce3d118f0b4fbdff7d1225c75fb7749429541b4d217b85c3f76", size = 4585954, upload-time = "2025-08-22T10:33:36.294Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/a7/4bb54dd1e626342a0f7df6ec6ca44fdd5d0e100ace53acc00e9a689ead04/lxml-6.0.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:48c8d335d8ab72f9265e7ba598ae5105a8272437403f4032107dbcb96d3f0b29", size = 4944052, upload-time = "2025-08-22T10:33:38.19Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/8d/20f51cd07a7cbef6214675a8a5c62b2559a36d9303fe511645108887c458/lxml-6.0.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:405e7cf9dbdbb52722c231e0f1257214202dfa192327fab3de45fd62e0554082", size = 5098885, upload-time = "2025-08-22T10:33:40.035Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/63/efceeee7245d45f97d548e48132258a36244d3c13c6e3ddbd04db95ff496/lxml-6.0.1-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:299a790d403335a6a057ade46f92612ebab87b223e4e8c5308059f2dc36f45ed", size = 5017542, upload-time = "2025-08-22T10:33:41.896Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/5d/92cb3d3499f5caba17f7933e6be3b6c7de767b715081863337ced42eb5f2/lxml-6.0.1-cp313-cp313-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:48da704672f6f9c461e9a73250440c647638cc6ff9567ead4c3b1f189a604ee8", size = 5347303, upload-time = "2025-08-22T10:33:43.868Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/f8/606fa16a05d7ef5e916c6481c634f40870db605caffed9d08b1a4fb6b989/lxml-6.0.1-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:21e364e1bb731489e3f4d51db416f991a5d5da5d88184728d80ecfb0904b1d68", size = 5641055, upload-time = "2025-08-22T10:33:45.784Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/01/15d5fc74ebb49eac4e5df031fbc50713dcc081f4e0068ed963a510b7d457/lxml-6.0.1-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1bce45a2c32032afddbd84ed8ab092130649acb935536ef7a9559636ce7ffd4a", size = 5242719, upload-time = "2025-08-22T10:33:48.089Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/a5/1b85e2aaaf8deaa67e04c33bddb41f8e73d07a077bf9db677cec7128bfb4/lxml-6.0.1-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:fa164387ff20ab0e575fa909b11b92ff1481e6876835014e70280769920c4433", size = 4717310, upload-time = "2025-08-22T10:33:49.852Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/23/f3bb1292f55a725814317172eeb296615db3becac8f1a059b53c51fc1da8/lxml-6.0.1-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7587ac5e000e1594e62278422c5783b34a82b22f27688b1074d71376424b73e8", size = 5254024, upload-time = "2025-08-22T10:33:52.22Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/be/4d768f581ccd0386d424bac615d9002d805df7cc8482ae07d529f60a3c1e/lxml-6.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:57478424ac4c9170eabf540237125e8d30fad1940648924c058e7bc9fb9cf6dd", size = 5055335, upload-time = "2025-08-22T10:33:54.041Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/07/ed61d1a3e77d1a9f856c4fab15ee5c09a2853fb7af13b866bb469a3a6d42/lxml-6.0.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:09c74afc7786c10dd6afaa0be2e4805866beadc18f1d843cf517a7851151b499", size = 4784864, upload-time = "2025-08-22T10:33:56.382Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/37/77e7971212e5c38a55431744f79dff27fd751771775165caea096d055ca4/lxml-6.0.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7fd70681aeed83b196482d42a9b0dc5b13bab55668d09ad75ed26dff3be5a2f5", size = 5657173, upload-time = "2025-08-22T10:33:58.698Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/a3/e98806d483941cd9061cc838b1169626acef7b2807261fbe5e382fcef881/lxml-6.0.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:10a72e456319b030b3dd900df6b1f19d89adf06ebb688821636dc406788cf6ac", size = 5245896, upload-time = "2025-08-22T10:34:00.586Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/de/9bb5a05e42e8623bf06b4638931ea8c8f5eb5a020fe31703abdbd2e83547/lxml-6.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b0fa45fb5f55111ce75b56c703843b36baaf65908f8b8d2fbbc0e249dbc127ed", size = 5267417, upload-time = "2025-08-22T10:34:02.719Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/43/c1cb2a7c67226266c463ef8a53b82d42607228beb763b5fbf4867e88a21f/lxml-6.0.1-cp313-cp313-win32.whl", hash = "sha256:01dab65641201e00c69338c9c2b8a0f2f484b6b3a22d10779bb417599fae32b5", size = 3610051, upload-time = "2025-08-22T10:34:04.553Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/34/96/6a6c3b8aa480639c1a0b9b6faf2a63fb73ab79ffcd2a91cf28745faa22de/lxml-6.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:bdf8f7c8502552d7bff9e4c98971910a0a59f60f88b5048f608d0a1a75e94d1c", size = 4009325, upload-time = "2025-08-22T10:34:06.24Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/66/622e8515121e1fd773e3738dae71b8df14b12006d9fb554ce90886689fd0/lxml-6.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:a6aeca75959426b9fd8d4782c28723ba224fe07cfa9f26a141004210528dcbe2", size = 3670443, upload-time = "2025-08-22T10:34:07.974Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/e3/b7eb612ce07abe766918a7e581ec6a0e5212352194001fd287c3ace945f0/lxml-6.0.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:29b0e849ec7030e3ecb6112564c9f7ad6881e3b2375dd4a0c486c5c1f3a33859", size = 8426160, upload-time = "2025-08-22T10:34:10.154Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/35/8f/ab3639a33595cf284fe733c6526da2ca3afbc5fd7f244ae67f3303cec654/lxml-6.0.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:02a0f7e629f73cc0be598c8b0611bf28ec3b948c549578a26111b01307fd4051", size = 4589288, upload-time = "2025-08-22T10:34:12.972Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/65/819d54f2e94d5c4458c1db8c1ccac9d05230b27c1038937d3d788eb406f9/lxml-6.0.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:beab5e54de016e730875f612ba51e54c331e2fa6dc78ecf9a5415fc90d619348", size = 4964523, upload-time = "2025-08-22T10:34:15.474Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5b/4a/d4a74ce942e60025cdaa883c5a4478921a99ce8607fc3130f1e349a83b28/lxml-6.0.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:92a08aefecd19ecc4ebf053c27789dd92c87821df2583a4337131cf181a1dffa", size = 5101108, upload-time = "2025-08-22T10:34:17.348Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/48/67f15461884074edd58af17b1827b983644d1fae83b3d909e9045a08b61e/lxml-6.0.1-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36c8fa7e177649470bc3dcf7eae6bee1e4984aaee496b9ccbf30e97ac4127fa2", size = 5053498, upload-time = "2025-08-22T10:34:19.232Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b6/d4/ec1bf1614828a5492f4af0b6a9ee2eb3e92440aea3ac4fa158e5228b772b/lxml-6.0.1-cp314-cp314-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:5d08e0f1af6916267bb7eff21c09fa105620f07712424aaae09e8cb5dd4164d1", size = 5351057, upload-time = "2025-08-22T10:34:21.143Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/65/2b/c85929dacac08821f2100cea3eb258ce5c8804a4e32b774f50ebd7592850/lxml-6.0.1-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9705cdfc05142f8c38c97a61bd3a29581ceceb973a014e302ee4a73cc6632476", size = 5671579, upload-time = "2025-08-22T10:34:23.528Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/36/cf544d75c269b9aad16752fd9f02d8e171c5a493ca225cb46bb7ba72868c/lxml-6.0.1-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:74555e2da7c1636e30bff4e6e38d862a634cf020ffa591f1f63da96bf8b34772", size = 5250403, upload-time = "2025-08-22T10:34:25.642Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c2/e8/83dbc946ee598fd75fdeae6151a725ddeaab39bb321354a9468d4c9f44f3/lxml-6.0.1-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:e38b5f94c5a2a5dadaddd50084098dfd005e5a2a56cd200aaf5e0a20e8941782", size = 4696712, upload-time = "2025-08-22T10:34:27.753Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/72/889c633b47c06205743ba935f4d1f5aa4eb7f0325d701ed2b0540df1b004/lxml-6.0.1-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a5ec101a92ddacb4791977acfc86c1afd624c032974bfb6a21269d1083c9bc49", size = 5268177, upload-time = "2025-08-22T10:34:29.804Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/b6/f42a21a1428479b66ea0da7bd13e370436aecaff0cfe93270c7e165bd2a4/lxml-6.0.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:5c17e70c82fd777df586c12114bbe56e4e6f823a971814fd40dec9c0de518772", size = 5094648, upload-time = "2025-08-22T10:34:31.703Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/b0/5f8c1e8890e2ee1c2053c2eadd1cb0e4b79e2304e2912385f6ca666f48b1/lxml-6.0.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:45fdd0415a0c3d91640b5d7a650a8f37410966a2e9afebb35979d06166fd010e", size = 4745220, upload-time = "2025-08-22T10:34:33.595Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/f9/820b5125660dae489ca3a21a36d9da2e75dd6b5ffe922088f94bbff3b8a0/lxml-6.0.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:d417eba28981e720a14fcb98f95e44e7a772fe25982e584db38e5d3b6ee02e79", size = 5692913, upload-time = "2025-08-22T10:34:35.482Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/8e/a557fae9eec236618aecf9ff35fec18df41b6556d825f3ad6017d9f6e878/lxml-6.0.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:8e5d116b9e59be7934febb12c41cce2038491ec8fdb743aeacaaf36d6e7597e4", size = 5259816, upload-time = "2025-08-22T10:34:37.482Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/fd/b266cfaab81d93a539040be699b5854dd24c84e523a1711ee5f615aa7000/lxml-6.0.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c238f0d0d40fdcb695c439fe5787fa69d40f45789326b3bb6ef0d61c4b588d6e", size = 5276162, upload-time = "2025-08-22T10:34:39.507Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/6c/6f9610fbf1de002048e80585ea4719591921a0316a8565968737d9f125ca/lxml-6.0.1-cp314-cp314-win32.whl", hash = "sha256:537b6cf1c5ab88cfd159195d412edb3e434fee880f206cbe68dff9c40e17a68a", size = 3669595, upload-time = "2025-08-22T10:34:41.783Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/72/a5/506775e3988677db24dc75a7b03e04038e0b3d114ccd4bccea4ce0116c15/lxml-6.0.1-cp314-cp314-win_amd64.whl", hash = "sha256:911d0a2bb3ef3df55b3d97ab325a9ca7e438d5112c102b8495321105d25a441b", size = 4079818, upload-time = "2025-08-22T10:34:44.04Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0a/44/9613f300201b8700215856e5edd056d4e58dd23368699196b58877d4408b/lxml-6.0.1-cp314-cp314-win_arm64.whl", hash = "sha256:2834377b0145a471a654d699bdb3a2155312de492142ef5a1d426af2c60a0a31", size = 3753901, upload-time = "2025-08-22T10:34:45.799Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "matplotlib-inline"
|
||||
version = "0.1.7"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "traitlets" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159, upload-time = "2024-04-15T13:44:44.803Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899, upload-time = "2024-04-15T13:44:43.265Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "multimodalocr"
|
||||
version = "0.1.0"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "apted" },
|
||||
{ name = "distance" },
|
||||
{ name = "editdistance" },
|
||||
{ name = "gdown" },
|
||||
{ name = "ipdb" },
|
||||
{ name = "jieba" },
|
||||
{ name = "levenshtein" },
|
||||
{ name = "lxml" },
|
||||
{ name = "nltk" },
|
||||
{ name = "numpy" },
|
||||
{ name = "polygon3" },
|
||||
{ name = "tqdm" },
|
||||
{ name = "zss" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "apted", specifier = ">=1.0.3" },
|
||||
{ name = "distance", specifier = ">=0.1.3" },
|
||||
{ name = "editdistance", specifier = ">=0.8.1" },
|
||||
{ name = "gdown", specifier = ">=5.2.0" },
|
||||
{ name = "ipdb", specifier = ">=0.13.13" },
|
||||
{ name = "jieba", specifier = ">=0.42.1" },
|
||||
{ name = "levenshtein", specifier = ">=0.27.1" },
|
||||
{ name = "lxml", specifier = ">=6.0.1" },
|
||||
{ name = "nltk", specifier = ">=3.9.1" },
|
||||
{ name = "numpy", specifier = ">=2.3.2" },
|
||||
{ name = "polygon3", specifier = ">=3.0.9.1" },
|
||||
{ name = "tqdm", specifier = ">=4.67.1" },
|
||||
{ name = "zss", specifier = ">=1.2.0" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nltk"
|
||||
version = "3.9.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "click" },
|
||||
{ name = "joblib" },
|
||||
{ name = "regex" },
|
||||
{ name = "tqdm" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3c/87/db8be88ad32c2d042420b6fd9ffd4a149f9a0d7f0e86b3f543be2eeeedd2/nltk-3.9.1.tar.gz", hash = "sha256:87d127bd3de4bd89a4f81265e5fa59cb1b199b27440175370f7417d2bc7ae868", size = 2904691, upload-time = "2024-08-18T19:48:37.769Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/66/7d9e26593edda06e8cb531874633f7c2372279c3b0f46235539fe546df8b/nltk-3.9.1-py3-none-any.whl", hash = "sha256:4fa26829c5b00715afe3061398a8989dc643b92ce7dd93fb4585a70930d168a1", size = 1505442, upload-time = "2024-08-18T19:48:21.909Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "numpy"
|
||||
version = "2.3.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/37/7d/3fec4199c5ffb892bed55cff901e4f39a58c81df9c44c280499e92cad264/numpy-2.3.2.tar.gz", hash = "sha256:e0486a11ec30cdecb53f184d496d1c6a20786c81e55e41640270130056f8ee48", size = 20489306, upload-time = "2025-07-24T21:32:07.553Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/c0/c6bb172c916b00700ed3bf71cb56175fd1f7dbecebf8353545d0b5519f6c/numpy-2.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c8d9727f5316a256425892b043736d63e89ed15bbfe6556c5ff4d9d4448ff3b3", size = 20949074, upload-time = "2025-07-24T20:43:07.813Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/4e/c116466d22acaf4573e58421c956c6076dc526e24a6be0903219775d862e/numpy-2.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:efc81393f25f14d11c9d161e46e6ee348637c0a1e8a54bf9dedc472a3fae993b", size = 14177311, upload-time = "2025-07-24T20:43:29.335Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/45/d4698c182895af189c463fc91d70805d455a227261d950e4e0f1310c2550/numpy-2.3.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dd937f088a2df683cbb79dda9a772b62a3e5a8a7e76690612c2737f38c6ef1b6", size = 5106022, upload-time = "2025-07-24T20:43:37.999Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/76/3e6880fef4420179309dba72a8c11f6166c431cf6dee54c577af8906f914/numpy-2.3.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:11e58218c0c46c80509186e460d79fbdc9ca1eb8d8aee39d8f2dc768eb781089", size = 6640135, upload-time = "2025-07-24T20:43:49.28Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/34/fa/87ff7f25b3c4ce9085a62554460b7db686fef1e0207e8977795c7b7d7ba1/numpy-2.3.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5ad4ebcb683a1f99f4f392cc522ee20a18b2bb12a2c1c42c3d48d5a1adc9d3d2", size = 14278147, upload-time = "2025-07-24T20:44:10.328Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1d/0f/571b2c7a3833ae419fe69ff7b479a78d313581785203cc70a8db90121b9a/numpy-2.3.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:938065908d1d869c7d75d8ec45f735a034771c6ea07088867f713d1cd3bbbe4f", size = 16635989, upload-time = "2025-07-24T20:44:34.88Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/24/5a/84ae8dca9c9a4c592fe11340b36a86ffa9fd3e40513198daf8a97839345c/numpy-2.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:66459dccc65d8ec98cc7df61307b64bf9e08101f9598755d42d8ae65d9a7a6ee", size = 16053052, upload-time = "2025-07-24T20:44:58.872Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/7c/e5725d99a9133b9813fcf148d3f858df98511686e853169dbaf63aec6097/numpy-2.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a7af9ed2aa9ec5950daf05bb11abc4076a108bd3c7db9aa7251d5f107079b6a6", size = 18577955, upload-time = "2025-07-24T20:45:26.714Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/11/7c546fcf42145f29b71e4d6f429e96d8d68e5a7ba1830b2e68d7418f0bbd/numpy-2.3.2-cp313-cp313-win32.whl", hash = "sha256:906a30249315f9c8e17b085cc5f87d3f369b35fedd0051d4a84686967bdbbd0b", size = 6311843, upload-time = "2025-07-24T20:49:24.444Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/6f/a428fd1cb7ed39b4280d057720fed5121b0d7754fd2a9768640160f5517b/numpy-2.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:c63d95dc9d67b676e9108fe0d2182987ccb0f11933c1e8959f42fa0da8d4fa56", size = 12782876, upload-time = "2025-07-24T20:49:43.227Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/65/85/4ea455c9040a12595fb6c43f2c217257c7b52dd0ba332c6a6c1d28b289fe/numpy-2.3.2-cp313-cp313-win_arm64.whl", hash = "sha256:b05a89f2fb84d21235f93de47129dd4f11c16f64c87c33f5e284e6a3a54e43f2", size = 10192786, upload-time = "2025-07-24T20:49:59.443Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/80/23/8278f40282d10c3f258ec3ff1b103d4994bcad78b0cba9208317f6bb73da/numpy-2.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4e6ecfeddfa83b02318f4d84acf15fbdbf9ded18e46989a15a8b6995dfbf85ab", size = 21047395, upload-time = "2025-07-24T20:45:58.821Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/2d/624f2ce4a5df52628b4ccd16a4f9437b37c35f4f8a50d00e962aae6efd7a/numpy-2.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:508b0eada3eded10a3b55725b40806a4b855961040180028f52580c4729916a2", size = 14300374, upload-time = "2025-07-24T20:46:20.207Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/62/ff1e512cdbb829b80a6bd08318a58698867bca0ca2499d101b4af063ee97/numpy-2.3.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:754d6755d9a7588bdc6ac47dc4ee97867271b17cee39cb87aef079574366db0a", size = 5228864, upload-time = "2025-07-24T20:46:30.58Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/8e/74bc18078fff03192d4032cfa99d5a5ca937807136d6f5790ce07ca53515/numpy-2.3.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f66e7d2b2d7712410d3bc5684149040ef5f19856f20277cd17ea83e5006286", size = 6737533, upload-time = "2025-07-24T20:46:46.111Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/ea/0731efe2c9073ccca5698ef6a8c3667c4cf4eea53fcdcd0b50140aba03bc/numpy-2.3.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de6ea4e5a65d5a90c7d286ddff2b87f3f4ad61faa3db8dabe936b34c2275b6f8", size = 14352007, upload-time = "2025-07-24T20:47:07.1Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cf/90/36be0865f16dfed20f4bc7f75235b963d5939707d4b591f086777412ff7b/numpy-2.3.2-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3ef07ec8cbc8fc9e369c8dcd52019510c12da4de81367d8b20bc692aa07573a", size = 16701914, upload-time = "2025-07-24T20:47:32.459Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/30/06cd055e24cb6c38e5989a9e747042b4e723535758e6153f11afea88c01b/numpy-2.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:27c9f90e7481275c7800dc9c24b7cc40ace3fdb970ae4d21eaff983a32f70c91", size = 16132708, upload-time = "2025-07-24T20:47:58.129Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/14/ecede608ea73e58267fd7cb78f42341b3b37ba576e778a1a06baffbe585c/numpy-2.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:07b62978075b67eee4065b166d000d457c82a1efe726cce608b9db9dd66a73a5", size = 18651678, upload-time = "2025-07-24T20:48:25.402Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/f3/2fe6066b8d07c3685509bc24d56386534c008b462a488b7f503ba82b8923/numpy-2.3.2-cp313-cp313t-win32.whl", hash = "sha256:c771cfac34a4f2c0de8e8c97312d07d64fd8f8ed45bc9f5726a7e947270152b5", size = 6441832, upload-time = "2025-07-24T20:48:37.181Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/ba/0937d66d05204d8f28630c9c60bc3eda68824abde4cf756c4d6aad03b0c6/numpy-2.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:72dbebb2dcc8305c431b2836bcc66af967df91be793d63a24e3d9b741374c450", size = 12927049, upload-time = "2025-07-24T20:48:56.24Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/ed/13542dd59c104d5e654dfa2ac282c199ba64846a74c2c4bcdbc3a0f75df1/numpy-2.3.2-cp313-cp313t-win_arm64.whl", hash = "sha256:72c6df2267e926a6d5286b0a6d556ebe49eae261062059317837fda12ddf0c1a", size = 10262935, upload-time = "2025-07-24T20:49:13.136Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/7c/7659048aaf498f7611b783e000c7268fcc4dcf0ce21cd10aad7b2e8f9591/numpy-2.3.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:448a66d052d0cf14ce9865d159bfc403282c9bc7bb2a31b03cc18b651eca8b1a", size = 20950906, upload-time = "2025-07-24T20:50:30.346Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/80/db/984bea9d4ddf7112a04cfdfb22b1050af5757864cfffe8e09e44b7f11a10/numpy-2.3.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:546aaf78e81b4081b2eba1d105c3b34064783027a06b3ab20b6eba21fb64132b", size = 14185607, upload-time = "2025-07-24T20:50:51.923Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/76/b3d6f414f4eca568f469ac112a3b510938d892bc5a6c190cb883af080b77/numpy-2.3.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:87c930d52f45df092f7578889711a0768094debf73cfcde105e2d66954358125", size = 5114110, upload-time = "2025-07-24T20:51:01.041Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/d2/6f5e6826abd6bca52392ed88fe44a4b52aacb60567ac3bc86c67834c3a56/numpy-2.3.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:8dc082ea901a62edb8f59713c6a7e28a85daddcb67454c839de57656478f5b19", size = 6642050, upload-time = "2025-07-24T20:51:11.64Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/43/f12b2ade99199e39c73ad182f103f9d9791f48d885c600c8e05927865baf/numpy-2.3.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:af58de8745f7fa9ca1c0c7c943616c6fe28e75d0c81f5c295810e3c83b5be92f", size = 14296292, upload-time = "2025-07-24T20:51:33.488Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/f9/77c07d94bf110a916b17210fac38680ed8734c236bfed9982fd8524a7b47/numpy-2.3.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed5527c4cf10f16c6d0b6bee1f89958bccb0ad2522c8cadc2efd318bcd545f5", size = 16638913, upload-time = "2025-07-24T20:51:58.517Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/d1/9d9f2c8ea399cc05cfff8a7437453bd4e7d894373a93cdc46361bbb49a7d/numpy-2.3.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:095737ed986e00393ec18ec0b21b47c22889ae4b0cd2d5e88342e08b01141f58", size = 16071180, upload-time = "2025-07-24T20:52:22.827Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/41/82e2c68aff2a0c9bf315e47d61951099fed65d8cb2c8d9dc388cb87e947e/numpy-2.3.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5e40e80299607f597e1a8a247ff8d71d79c5b52baa11cc1cce30aa92d2da6e0", size = 18576809, upload-time = "2025-07-24T20:52:51.015Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/14/4b4fd3efb0837ed252d0f583c5c35a75121038a8c4e065f2c259be06d2d8/numpy-2.3.2-cp314-cp314-win32.whl", hash = "sha256:7d6e390423cc1f76e1b8108c9b6889d20a7a1f59d9a60cac4a050fa734d6c1e2", size = 6366410, upload-time = "2025-07-24T20:56:44.949Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/9e/b4c24a6b8467b61aced5c8dc7dcfce23621baa2e17f661edb2444a418040/numpy-2.3.2-cp314-cp314-win_amd64.whl", hash = "sha256:b9d0878b21e3918d76d2209c924ebb272340da1fb51abc00f986c258cd5e957b", size = 12918821, upload-time = "2025-07-24T20:57:06.479Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/0f/0dc44007c70b1007c1cef86b06986a3812dd7106d8f946c09cfa75782556/numpy-2.3.2-cp314-cp314-win_arm64.whl", hash = "sha256:2738534837c6a1d0c39340a190177d7d66fdf432894f469728da901f8f6dc910", size = 10477303, upload-time = "2025-07-24T20:57:22.879Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/3e/075752b79140b78ddfc9c0a1634d234cfdbc6f9bbbfa6b7504e445ad7d19/numpy-2.3.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:4d002ecf7c9b53240be3bb69d80f86ddbd34078bae04d87be81c1f58466f264e", size = 21047524, upload-time = "2025-07-24T20:53:22.086Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/6d/60e8247564a72426570d0e0ea1151b95ce5bd2f1597bb878a18d32aec855/numpy-2.3.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:293b2192c6bcce487dbc6326de5853787f870aeb6c43f8f9c6496db5b1781e45", size = 14300519, upload-time = "2025-07-24T20:53:44.053Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/73/d8326c442cd428d47a067070c3ac6cc3b651a6e53613a1668342a12d4479/numpy-2.3.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:0a4f2021a6da53a0d580d6ef5db29947025ae8b35b3250141805ea9a32bbe86b", size = 5228972, upload-time = "2025-07-24T20:53:53.81Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/34/2e/e71b2d6dad075271e7079db776196829019b90ce3ece5c69639e4f6fdc44/numpy-2.3.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:9c144440db4bf3bb6372d2c3e49834cc0ff7bb4c24975ab33e01199e645416f2", size = 6737439, upload-time = "2025-07-24T20:54:04.742Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/15/b0/d004bcd56c2c5e0500ffc65385eb6d569ffd3363cb5e593ae742749b2daa/numpy-2.3.2-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f92d6c2a8535dc4fe4419562294ff957f83a16ebdec66df0805e473ffaad8bd0", size = 14352479, upload-time = "2025-07-24T20:54:25.819Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/e3/285142fcff8721e0c99b51686426165059874c150ea9ab898e12a492e291/numpy-2.3.2-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cefc2219baa48e468e3db7e706305fcd0c095534a192a08f31e98d83a7d45fb0", size = 16702805, upload-time = "2025-07-24T20:54:50.814Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/c3/33b56b0e47e604af2c7cd065edca892d180f5899599b76830652875249a3/numpy-2.3.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:76c3e9501ceb50b2ff3824c3589d5d1ab4ac857b0ee3f8f49629d0de55ecf7c2", size = 16133830, upload-time = "2025-07-24T20:55:17.306Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/ae/7b1476a1f4d6a48bc669b8deb09939c56dd2a439db1ab03017844374fb67/numpy-2.3.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:122bf5ed9a0221b3419672493878ba4967121514b1d7d4656a7580cd11dddcbf", size = 18652665, upload-time = "2025-07-24T20:55:46.665Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/ba/5b5c9978c4bb161034148ade2de9db44ec316fab89ce8c400db0e0c81f86/numpy-2.3.2-cp314-cp314t-win32.whl", hash = "sha256:6f1ae3dcb840edccc45af496f312528c15b1f79ac318169d094e85e4bb35fdf1", size = 6514777, upload-time = "2025-07-24T20:55:57.66Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/46/3dbaf0ae7c17cdc46b9f662c56da2054887b8d9e737c1476f335c83d33db/numpy-2.3.2-cp314-cp314t-win_amd64.whl", hash = "sha256:087ffc25890d89a43536f75c5fe8770922008758e8eeeef61733957041ed2f9b", size = 13111856, upload-time = "2025-07-24T20:56:17.318Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c1/9e/1652778bce745a67b5fe05adde60ed362d38eb17d919a540e813d30f6874/numpy-2.3.2-cp314-cp314t-win_arm64.whl", hash = "sha256:092aeb3449833ea9c0bf0089d70c29ae480685dd2377ec9cdbbb620257f84631", size = 10544226, upload-time = "2025-07-24T20:56:34.509Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parso"
|
||||
version = "0.8.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d4/de/53e0bcf53d13e005bd8c92e7855142494f41171b34c2536b86187474184d/parso-0.8.5.tar.gz", hash = "sha256:034d7354a9a018bdce352f48b2a8a450f05e9d6ee85db84764e9b6bd96dafe5a", size = 401205, upload-time = "2025-08-23T15:15:28.028Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/16/32/f8e3c85d1d5250232a5d3477a2a28cc291968ff175caeadaf3cc19ce0e4a/parso-0.8.5-py2.py3-none-any.whl", hash = "sha256:646204b5ee239c396d040b90f9e272e9a8017c630092bf59980beb62fd033887", size = 106668, upload-time = "2025-08-23T15:15:25.663Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pexpect"
|
||||
version = "4.9.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "ptyprocess" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "polygon3"
|
||||
version = "3.0.9.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1f/26/eea4112be43c8b7345477ad9150d499303494f32fb5951cb0f6e9104045b/Polygon3-3.0.9.1.tar.gz", hash = "sha256:2ddf8d06975f728d5b40786136c82e5b9d38a846bce236b7e6587bbd6a5e9b49", size = 39121, upload-time = "2021-03-09T16:04:50.975Z" }
|
||||
|
||||
[[package]]
|
||||
name = "prompt-toolkit"
|
||||
version = "3.0.52"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "wcwidth" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ptyprocess"
|
||||
version = "0.7.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pure-eval"
|
||||
version = "0.2.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752, upload-time = "2024-07-21T12:58:21.801Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pygments"
|
||||
version = "2.19.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pysocks"
|
||||
version = "1.7.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bd/11/293dd436aea955d45fc4e8a35b6ae7270f5b8e00b53cf6c024c83b657a11/PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0", size = 284429, upload-time = "2019-09-20T02:07:35.714Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/59/b4572118e098ac8e46e399a1dd0f2d85403ce8bbaad9ec79373ed6badaf9/PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5", size = 16725, upload-time = "2019-09-20T02:06:22.938Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rapidfuzz"
|
||||
version = "3.14.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d4/11/0de727b336f28e25101d923c9feeeb64adcf231607fe7e1b083795fa149a/rapidfuzz-3.14.0.tar.gz", hash = "sha256:672b6ba06150e53d7baf4e3d5f12ffe8c213d5088239a15b5ae586ab245ac8b2", size = 58073448, upload-time = "2025-08-27T13:41:31.541Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/04/b1/e6875e32209b28a581d3b8ec1ffded8f674de4a27f4540ec312d0ecf4b83/rapidfuzz-3.14.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5cf3828b8cbac02686e1d5c499c58e43c5f613ad936fe19a2d092e53f3308ccd", size = 2015663, upload-time = "2025-08-27T13:39:55.815Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/c7/702472c4f3c4e5f9985bb5143405a5c4aadf3b439193f4174944880c50a3/rapidfuzz-3.14.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68c3931c19c51c11654cf75f663f34c0c7ea04c456c84ccebfd52b2047121dba", size = 1472180, upload-time = "2025-08-27T13:39:57.663Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/e1/c22fc941b8e506db9a6f051298e17edbae76e1be63e258e51f13791d5eb2/rapidfuzz-3.14.0-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9b4232168959af46f2c0770769e7986ff6084d97bc4b6b2b16b2bfa34164421b", size = 1461676, upload-time = "2025-08-27T13:39:59.409Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/4c/9dd58e4b4d2b1b7497c35c5280b4fa064bd6e6e3ed5fcf67513faaa2d4f4/rapidfuzz-3.14.0-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:174c784cecfafe22d783b5124ebffa2e02cc01e49ffe60a28ad86d217977f478", size = 1774563, upload-time = "2025-08-27T13:40:01.284Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/96/8f/89a39ab5fbd971e6a25431edbbf66e255d271a0b67aadc340b8e8bf573e7/rapidfuzz-3.14.0-cp313-cp313-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0b2dedf216f43a50f227eee841ef0480e29e26b2ce2d7ee680b28354ede18627", size = 2332659, upload-time = "2025-08-27T13:40:03.04Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/34/b0/f30f9bae81a472182787641c9c2430da79431c260f7620899a105ee959d0/rapidfuzz-3.14.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5698239eecf5b759630450ef59521ad3637e5bd4afc2b124ae8af2ff73309c41", size = 3289626, upload-time = "2025-08-27T13:40:04.77Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/b9/c9eb0bfb62972123a23b31811d4d345e8dd46cb3083d131dd3c1c97b70af/rapidfuzz-3.14.0-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:0acc9553fc26f1c291c381a6aa8d3c5625be23b5721f139528af40cc4119ae1d", size = 1324164, upload-time = "2025-08-27T13:40:06.642Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7f/a1/91bf79a76626bd0dae694ad9c57afdad2ca275f9808f69e570be39a99e71/rapidfuzz-3.14.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:00141dfd3b8c9ae15fbb5fbd191a08bde63cdfb1f63095d8f5faf1698e30da93", size = 2480695, upload-time = "2025-08-27T13:40:08.459Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/6a/bfab3575842d8ccc406c3fa8c618b476363e4218a0d01394543c741ef1bd/rapidfuzz-3.14.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:67f725c3f5713da6e0750dc23f65f0f822c6937c25e3fc9ee797aa6783bef8c1", size = 2628236, upload-time = "2025-08-27T13:40:10.27Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/10/e7e99ca1a6546645aa21d1b426f728edbfb7a3abcb1a7b7642353b79ae57/rapidfuzz-3.14.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ba351cf2678d40a23fb4cbfe82cc45ea338a57518dca62a823c5b6381aa20c68", size = 2893483, upload-time = "2025-08-27T13:40:12.079Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/11/fb46a86659e2bb304764478a28810f36bb56f794087f34a5bd1b81dd0be5/rapidfuzz-3.14.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:558323dcd5fb38737226be84c78cafbe427706e47379f02c57c3e35ac3745061", size = 3411761, upload-time = "2025-08-27T13:40:14.051Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/76/89eabf1e7523f6dc996ea6b2bfcfd22565cdfa830c7c3af0ebc5b17e9ce7/rapidfuzz-3.14.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cb4e4ea174add5183c707d890a816a85e9330f93e5ded139dab182adc727930c", size = 4404126, upload-time = "2025-08-27T13:40:16.39Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/6c/ddc7ee86d392908efdf95a1242b87b94523f6feaa368b7a24efa39ecd9d9/rapidfuzz-3.14.0-cp313-cp313-win32.whl", hash = "sha256:ec379e1b407935d729c08da9641cfc5dfb2a7796f74cdd82158ce5986bb8ff88", size = 1828545, upload-time = "2025-08-27T13:40:19.069Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/47/2a271455b602eef360cd5cc716d370d7ab47b9d57f00263821a217fd30f4/rapidfuzz-3.14.0-cp313-cp313-win_amd64.whl", hash = "sha256:4b59ba48a909bdf7ec5dad6e3a5a0004aeec141ae5ddb205d0c5bd4389894cf9", size = 1658600, upload-time = "2025-08-27T13:40:21.278Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/47/5acb5d160a091c3175c6f5e3f227ccdf03b201b05ceaad2b8b7f5009ebe9/rapidfuzz-3.14.0-cp313-cp313-win_arm64.whl", hash = "sha256:e688b0a98edea42da450fa6ba41736203ead652a78b558839916c10df855f545", size = 885686, upload-time = "2025-08-27T13:40:23.254Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/f2/203c44a06dfefbb580ad7b743333880d600d7bdff693af9d290bd2b09742/rapidfuzz-3.14.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:cb6c5a46444a2787e466acd77e162049f061304025ab24da02b59caedea66064", size = 2041214, upload-time = "2025-08-27T13:40:25.051Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/db/6571a5bbba38255ede8098b3b45c007242788e5a5c3cdbe7f6f03dd6daed/rapidfuzz-3.14.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:99ed7a9e9ff798157caf3c3d96ca7da6560878902d8f70fa7731acc94e0d293c", size = 1501621, upload-time = "2025-08-27T13:40:26.881Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/85/efbae42fe8ca2bdb967751da1df2e3ebb5be9ea68f22f980731e5c18ce25/rapidfuzz-3.14.0-cp313-cp313t-win32.whl", hash = "sha256:c8e954dd59291ff0cd51b9c0f425e5dc84731bb006dbd5b7846746fe873a0452", size = 1887956, upload-time = "2025-08-27T13:40:29.143Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/60/2bb44b5ecb7151093ed7e2020156f260bdd9a221837f57a0bc5938b2b6d1/rapidfuzz-3.14.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5754e3ca259667c46a2b58ca7d7568251d6e23d2f0e354ac1cc5564557f4a32d", size = 1702542, upload-time = "2025-08-27T13:40:31.103Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/b7/688e9ab091545ff8eed564994a01309d8a52718211f27af94743d55b3c80/rapidfuzz-3.14.0-cp313-cp313t-win_arm64.whl", hash = "sha256:558865f6825d27006e6ae2e1635cfe236d736c8f2c5c82db6db4b1b6df4478bc", size = 912891, upload-time = "2025-08-27T13:40:33.263Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/12/9c29b975f742db04da5017640dbc2dcfaaf0d6336598071cd2ca8b0dc783/rapidfuzz-3.14.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:3cc4bd8de6643258c5899f21414f9d45d7589d158eee8d438ea069ead624823b", size = 2015534, upload-time = "2025-08-27T13:40:35.1Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/09/ff3a79a6d5f532e7f30569ded892e28c462c0808f01b155509adbcc001e7/rapidfuzz-3.14.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:081aac1acb4ab449f8ea7d4e5ea268227295503e1287f56f0b56c7fc3452da1e", size = 1473359, upload-time = "2025-08-27T13:40:36.991Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/e9/000792dff6ad6ccc52880bc21d29cf05fabef3004261039ba31965310130/rapidfuzz-3.14.0-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3e0209c6ef7f2c732e10ce4fccafcf7d9e79eb8660a81179aa307c7bd09fafcd", size = 1469241, upload-time = "2025-08-27T13:40:38.82Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/5d/1556dc5fbd91d4c27708272692361970d167f8142642052c8e874fcfd9a9/rapidfuzz-3.14.0-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6e4610997e9de08395e8632b605488a9efc859fe0516b6993b3925f3057f9da7", size = 1779910, upload-time = "2025-08-27T13:40:40.598Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/fb/6c11600aa5eec998c27c53a617820bb3cdfa0603c164b9e8028f7e715b9e/rapidfuzz-3.14.0-cp314-cp314-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efd0095cde6d0179c92c997ede4b85158bf3c7386043e2fadbee291018b29300", size = 2340555, upload-time = "2025-08-27T13:40:42.641Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/46/63746cb12724ea819ee469f2aed4c4c0be4a5bbb2f9174b29298a14def16/rapidfuzz-3.14.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0a141c07f9e97c45e67aeed677bac92c08f228c556a80750ea3e191e82d54034", size = 3295540, upload-time = "2025-08-27T13:40:45.721Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/23/1be0841eed0f196772f2d4fd7b21cfa73501ce96b44125726c4c739df5ae/rapidfuzz-3.14.0-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:5a9de40fa6be7809fd2579c8020b9edaf6f50ffc43082b14e95ad3928a254f22", size = 1318384, upload-time = "2025-08-27T13:40:47.814Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/aa/457c11d0495ab75de7a9b5b61bce041f5dd5a9c39d2d297a73be124518fd/rapidfuzz-3.14.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20f510dae17bad8f4909ab32b40617f964af55131e630de7ebc0ffa7f00fe634", size = 2487028, upload-time = "2025-08-27T13:40:49.784Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/fc/d8e4b7163064019de5f4c8c3e4af95331208c67738c024214f408b480018/rapidfuzz-3.14.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:79c3fd17a432c3f74de94782d7139f9a22e948cec31659a1a05d67b5c0f4290e", size = 2622505, upload-time = "2025-08-27T13:40:52.077Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/91/0cb2cdbc4b223187e6269002ad73f49f6312844ecbdcd061c2770cf01539/rapidfuzz-3.14.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:8cde9ffb86ea33d67cce9b26b513a177038be48ee2eb4d856cc60a75cb698db7", size = 2898844, upload-time = "2025-08-27T13:40:54.285Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/73/dc997aaa88d6850938c73bda3f6185d77800bc04a26c084a3a3b95e139ed/rapidfuzz-3.14.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:cafb657c8f2959761bca40c0da66f29d111e2c40d91f8ed4a75cc486c99b33ae", size = 3419941, upload-time = "2025-08-27T13:40:56.35Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/c0/b02d5bd8effd7dedb2c65cbdd85579ba42b21fb9579f833bca9252f2fe02/rapidfuzz-3.14.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4d80a9f673c534800d73f164ed59620e2ba820ed3840abb67c56022ad043564b", size = 4408912, upload-time = "2025-08-27T13:40:58.465Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/38/68f0f8a03fde87a8905a029a0dcdb716a2faf15c8e8895ef4a7f26b085e6/rapidfuzz-3.14.0-cp314-cp314-win32.whl", hash = "sha256:da9878a01357c7906fb16359b3622ce256933a3286058ee503358859e1442f68", size = 1862571, upload-time = "2025-08-27T13:41:00.581Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/5e/98ba43b2660c83b683221706f1cca1409c99eafd458e028142ef32d21baa/rapidfuzz-3.14.0-cp314-cp314-win_amd64.whl", hash = "sha256:09af941076ef18f6c2b35acfd5004c60d03414414058e98ece6ca9096f454870", size = 1706951, upload-time = "2025-08-27T13:41:02.63Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/65/eb/60ac6b461dc71be3405ce469e7aee56adbe121666ed5326dce6bd579fa52/rapidfuzz-3.14.0-cp314-cp314-win_arm64.whl", hash = "sha256:1a878eb065ce6061038dd1c0b9e8eb7477f7d05d5c5161a1d2a5fa630818f938", size = 912456, upload-time = "2025-08-27T13:41:04.971Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/7f/a4325050d6cfb89c2fde4fe6e918820b941c3dc0cbbd08b697b66d9e0a06/rapidfuzz-3.14.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:33ce0326e6feb0d2207a7ca866a5aa6a2ac2361f1ca43ca32aca505268c18ec9", size = 2041108, upload-time = "2025-08-27T13:41:06.953Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/77/b4965b3a8ec7b30515bc184a95c75ae9406c95ad0cfa61f32bee366e1859/rapidfuzz-3.14.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e8056d10e99dedf110e929fdff4de6272057115b28eeef4fb6f0d99fd73c026f", size = 1501577, upload-time = "2025-08-27T13:41:08.963Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/5e/0886bd2f525d6e5011378b8eb51a29137df3dec55fafa39ffb77823771bf/rapidfuzz-3.14.0-cp314-cp314t-win32.whl", hash = "sha256:ddde238b7076e49c2c21a477ee4b67143e1beaf7a3185388fe0b852e64c6ef52", size = 1925406, upload-time = "2025-08-27T13:41:11.207Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/56/8ddf6d8cf4b7e04c49861a38b791b4f0d5b3f1270ff3ade1aabdf6b19b7a/rapidfuzz-3.14.0-cp314-cp314t-win_amd64.whl", hash = "sha256:ef24464be04a7da1adea741376ddd2b092e0de53c9b500fd3c2e38e071295c9e", size = 1751584, upload-time = "2025-08-27T13:41:13.628Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/0c/825f6055e49d7ee943be95ca0d62bb6e5fbfd7b7c30bbfca7d00ac5670e7/rapidfuzz-3.14.0-cp314-cp314t-win_arm64.whl", hash = "sha256:fd4a27654f51bed3518bc5bbf166627caf3ddd858b12485380685777421f8933", size = 936661, upload-time = "2025-08-27T13:41:15.566Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "2025.7.34"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0b/de/e13fa6dc61d78b30ba47481f99933a3b49a57779d625c392d8036770a60d/regex-2025.7.34.tar.gz", hash = "sha256:9ead9765217afd04a86822dfcd4ed2747dfe426e887da413b15ff0ac2457e21a", size = 400714, upload-time = "2025-07-31T00:21:16.262Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/15/16/b709b2119975035169a25aa8e4940ca177b1a2e25e14f8d996d09130368e/regex-2025.7.34-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c3c9740a77aeef3f5e3aaab92403946a8d34437db930a0280e7e81ddcada61f5", size = 485334, upload-time = "2025-07-31T00:19:56.58Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/a6/c09136046be0595f0331bc58a0e5f89c2d324cf734e0b0ec53cf4b12a636/regex-2025.7.34-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:69ed3bc611540f2ea70a4080f853741ec698be556b1df404599f8724690edbcd", size = 289942, upload-time = "2025-07-31T00:19:57.943Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/36/91/08fc0fd0f40bdfb0e0df4134ee37cfb16e66a1044ac56d36911fd01c69d2/regex-2025.7.34-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d03c6f9dcd562c56527c42b8530aad93193e0b3254a588be1f2ed378cdfdea1b", size = 285991, upload-time = "2025-07-31T00:19:59.837Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/be/2f/99dc8f6f756606f0c214d14c7b6c17270b6bbe26d5c1f05cde9dbb1c551f/regex-2025.7.34-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6164b1d99dee1dfad33f301f174d8139d4368a9fb50bf0a3603b2eaf579963ad", size = 797415, upload-time = "2025-07-31T00:20:01.668Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/cf/2fcdca1110495458ba4e95c52ce73b361cf1cafd8a53b5c31542cde9a15b/regex-2025.7.34-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1e4f4f62599b8142362f164ce776f19d79bdd21273e86920a7b604a4275b4f59", size = 862487, upload-time = "2025-07-31T00:20:03.142Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/38/899105dd27fed394e3fae45607c1983e138273ec167e47882fc401f112b9/regex-2025.7.34-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:72a26dcc6a59c057b292f39d41465d8233a10fd69121fa24f8f43ec6294e5415", size = 910717, upload-time = "2025-07-31T00:20:04.727Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/f6/4716198dbd0bcc9c45625ac4c81a435d1c4d8ad662e8576dac06bab35b17/regex-2025.7.34-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5273fddf7a3e602695c92716c420c377599ed3c853ea669c1fe26218867002f", size = 801943, upload-time = "2025-07-31T00:20:07.1Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/5d/cff8896d27e4e3dd11dd72ac78797c7987eb50fe4debc2c0f2f1682eb06d/regex-2025.7.34-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c1844be23cd40135b3a5a4dd298e1e0c0cb36757364dd6cdc6025770363e06c1", size = 786664, upload-time = "2025-07-31T00:20:08.818Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/10/29/758bf83cf7b4c34f07ac3423ea03cee3eb3176941641e4ccc05620f6c0b8/regex-2025.7.34-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dde35e2afbbe2272f8abee3b9fe6772d9b5a07d82607b5788e8508974059925c", size = 856457, upload-time = "2025-07-31T00:20:10.328Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/30/c19d212b619963c5b460bfed0ea69a092c6a43cba52a973d46c27b3e2975/regex-2025.7.34-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f6e8e7af516a7549412ce57613e859c3be27d55341a894aacaa11703a4c31a", size = 849008, upload-time = "2025-07-31T00:20:11.823Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/b8/3c35da3b12c87e3cc00010ef6c3a4ae787cff0bc381aa3d251def219969a/regex-2025.7.34-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:469142fb94a869beb25b5f18ea87646d21def10fbacb0bcb749224f3509476f0", size = 788101, upload-time = "2025-07-31T00:20:13.729Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/80/2f46677c0b3c2b723b2c358d19f9346e714113865da0f5f736ca1a883bde/regex-2025.7.34-cp313-cp313-win32.whl", hash = "sha256:da7507d083ee33ccea1310447410c27ca11fb9ef18c95899ca57ff60a7e4d8f1", size = 264401, upload-time = "2025-07-31T00:20:15.233Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/be/fa/917d64dd074682606a003cba33585c28138c77d848ef72fc77cbb1183849/regex-2025.7.34-cp313-cp313-win_amd64.whl", hash = "sha256:9d644de5520441e5f7e2db63aec2748948cc39ed4d7a87fd5db578ea4043d997", size = 275368, upload-time = "2025-07-31T00:20:16.711Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/65/cd/f94383666704170a2154a5df7b16be28f0c27a266bffcd843e58bc84120f/regex-2025.7.34-cp313-cp313-win_arm64.whl", hash = "sha256:7bf1c5503a9f2cbd2f52d7e260acb3131b07b6273c470abb78568174fe6bde3f", size = 268482, upload-time = "2025-07-31T00:20:18.189Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ac/23/6376f3a23cf2f3c00514b1cdd8c990afb4dfbac3cb4a68b633c6b7e2e307/regex-2025.7.34-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:8283afe7042d8270cecf27cca558873168e771183d4d593e3c5fe5f12402212a", size = 485385, upload-time = "2025-07-31T00:20:19.692Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/5b/6d4d3a0b4d312adbfd6d5694c8dddcf1396708976dd87e4d00af439d962b/regex-2025.7.34-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6c053f9647e3421dd2f5dff8172eb7b4eec129df9d1d2f7133a4386319b47435", size = 289788, upload-time = "2025-07-31T00:20:21.941Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/92/71/5862ac9913746e5054d01cb9fb8125b3d0802c0706ef547cae1e7f4428fa/regex-2025.7.34-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a16dd56bbcb7d10e62861c3cd000290ddff28ea142ffb5eb3470f183628011ac", size = 286136, upload-time = "2025-07-31T00:20:26.146Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/df/5b505dc447eb71278eba10d5ec940769ca89c1af70f0468bfbcb98035dc2/regex-2025.7.34-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:69c593ff5a24c0d5c1112b0df9b09eae42b33c014bdca7022d6523b210b69f72", size = 797753, upload-time = "2025-07-31T00:20:27.919Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/38/3e3dc953d13998fa047e9a2414b556201dbd7147034fbac129392363253b/regex-2025.7.34-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:98d0ce170fcde1a03b5df19c5650db22ab58af375aaa6ff07978a85c9f250f0e", size = 863263, upload-time = "2025-07-31T00:20:29.803Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/68/e5/3ff66b29dde12f5b874dda2d9dec7245c2051f2528d8c2a797901497f140/regex-2025.7.34-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d72765a4bff8c43711d5b0f5b452991a9947853dfa471972169b3cc0ba1d0751", size = 910103, upload-time = "2025-07-31T00:20:31.313Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/fe/14176f2182125977fba3711adea73f472a11f3f9288c1317c59cd16ad5e6/regex-2025.7.34-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4494f8fd95a77eb434039ad8460e64d57baa0434f1395b7da44015bef650d0e4", size = 801709, upload-time = "2025-07-31T00:20:33.323Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/0d/80d4e66ed24f1ba876a9e8e31b709f9fd22d5c266bf5f3ab3c1afe683d7d/regex-2025.7.34-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4f42b522259c66e918a0121a12429b2abcf696c6f967fa37bdc7b72e61469f98", size = 786726, upload-time = "2025-07-31T00:20:35.252Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/75/c3ebb30e04a56c046f5c85179dc173818551037daae2c0c940c7b19152cb/regex-2025.7.34-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:aaef1f056d96a0a5d53ad47d019d5b4c66fe4be2da87016e0d43b7242599ffc7", size = 857306, upload-time = "2025-07-31T00:20:37.12Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/b2/a4dc5d8b14f90924f27f0ac4c4c4f5e195b723be98adecc884f6716614b6/regex-2025.7.34-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:656433e5b7dccc9bc0da6312da8eb897b81f5e560321ec413500e5367fcd5d47", size = 848494, upload-time = "2025-07-31T00:20:38.818Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/21/9ac6e07a4c5e8646a90b56b61f7e9dac11ae0747c857f91d3d2bc7c241d9/regex-2025.7.34-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e91eb2c62c39705e17b4d42d4b86c4e86c884c0d15d9c5a47d0835f8387add8e", size = 787850, upload-time = "2025-07-31T00:20:40.478Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/be/6c/d51204e28e7bc54f9a03bb799b04730d7e54ff2718862b8d4e09e7110a6a/regex-2025.7.34-cp314-cp314-win32.whl", hash = "sha256:f978ddfb6216028c8f1d6b0f7ef779949498b64117fc35a939022f67f810bdcb", size = 269730, upload-time = "2025-07-31T00:20:42.253Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/52/a7e92d02fa1fdef59d113098cb9f02c5d03289a0e9f9e5d4d6acccd10677/regex-2025.7.34-cp314-cp314-win_amd64.whl", hash = "sha256:4b7dc33b9b48fb37ead12ffc7bdb846ac72f99a80373c4da48f64b373a7abeae", size = 278640, upload-time = "2025-07-31T00:20:44.42Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/78/a815529b559b1771080faa90c3ab401730661f99d495ab0071649f139ebd/regex-2025.7.34-cp314-cp314-win_arm64.whl", hash = "sha256:4b8c4d39f451e64809912c82392933d80fe2e4a87eeef8859fcc5380d0173c64", size = 271757, upload-time = "2025-07-31T00:20:46.355Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "requests"
|
||||
version = "2.32.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "certifi" },
|
||||
{ name = "charset-normalizer" },
|
||||
{ name = "idna" },
|
||||
{ name = "urllib3" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
socks = [
|
||||
{ name = "pysocks" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "six"
|
||||
version = "1.17.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "soupsieve"
|
||||
version = "2.8"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/6d/e6/21ccce3262dd4889aa3332e5a119a3491a95e8f60939870a3a035aabac0d/soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f", size = 103472, upload-time = "2025-08-27T15:39:51.78Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/14/a0/bb38d3b76b8cae341dad93a2dd83ab7462e6dbcdd84d43f54ee60a8dc167/soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c", size = 36679, upload-time = "2025-08-27T15:39:50.179Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "stack-data"
|
||||
version = "0.6.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "asttokens" },
|
||||
{ name = "executing" },
|
||||
{ name = "pure-eval" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707, upload-time = "2023-09-30T13:58:05.479Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tqdm"
|
||||
version = "4.67.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "traitlets"
|
||||
version = "5.14.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621, upload-time = "2024-04-19T11:11:49.746Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359, upload-time = "2024-04-19T11:11:46.763Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
version = "4.15.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.5.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wcwidth"
|
||||
version = "0.2.13"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301, upload-time = "2024-01-06T02:10:57.829Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166, upload-time = "2024-01-06T02:10:55.763Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zss"
|
||||
version = "1.2.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "six" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1e/d1/ed34d12f55d07cc1efb61d74fb2f64f46a705557f5bdd1ef1b810f0e2ec5/zss-1.2.0.tar.gz", hash = "sha256:07bb937441929ccb82961f4f7b80fbce9e2b20d0e46ddcbcbc1fcb094f585b50", size = 9790, upload-time = "2018-03-12T15:02:20.208Z" }
|
Reference in New Issue
Block a user