Upload get_bigdocs_75m.py
Browse files- get_bigdocs_75m.py +128 -0
get_bigdocs_75m.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
import datasets
|
| 3 |
+
import io
|
| 4 |
+
import PIL
|
| 5 |
+
import PIL.PngImagePlugin
|
| 6 |
+
import os
|
| 7 |
+
import hashlib
|
| 8 |
+
import warnings
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
ASSEMBLED_COLUMNS = (
|
| 12 |
+
'sample_id',
|
| 13 |
+
'dataset_name',
|
| 14 |
+
'task_name',
|
| 15 |
+
'query',
|
| 16 |
+
'annotations',
|
| 17 |
+
'image',
|
| 18 |
+
'query_info',
|
| 19 |
+
'annotations_info',
|
| 20 |
+
'image_info',
|
| 21 |
+
'image_sha256'
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _hash_bytes(b: bytes) -> str:
|
| 26 |
+
m = hashlib.sha256()
|
| 27 |
+
m.update(b)
|
| 28 |
+
return m.hexdigest()
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_bigdocs_75m(
|
| 32 |
+
formal_name: datasets.DatasetDict,
|
| 33 |
+
user_local_path: Optional[str],
|
| 34 |
+
load_from_cache_file:Optional[bool]=None,
|
| 35 |
+
num_proc: Optional[int]=None,
|
| 36 |
+
raise_on_missing: Optional[bool]=None,
|
| 37 |
+
skip_bad_sha256: Optional[bool]=None,
|
| 38 |
+
bigdocs_load_dataset_kwargs: Optional[dict]=None
|
| 39 |
+
) -> datasets.DatasetDict:
|
| 40 |
+
"""
|
| 41 |
+
Get a subset of BigDocs-7.5M
|
| 42 |
+
|
| 43 |
+
Some parts of BigDocs-7.5M are distributed without their "image" column,
|
| 44 |
+
and instead have an "img_id" column. The present function substitutes
|
| 45 |
+
such images back in.
|
| 46 |
+
|
| 47 |
+
For the following `formal_name`, the the user is responsible to download
|
| 48 |
+
the specified dataset and specify its location through `user_local_path`.
|
| 49 |
+
|
| 50 |
+
- COCOtext: http://images.cocodataset.org/zips/train2014.zip
|
| 51 |
+
- pubtables-1m: https://www.microsoft.com/en-us/research/publication/pubtables-1m
|
| 52 |
+
- TextOCR: https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
formal_name (`DatasetDict`): The BigDocs-7.5M dataset to augment with local images.
|
| 56 |
+
user_local_path (`Optional[str]`, defaults to `None`): The local path containing the images to be linked.
|
| 57 |
+
load_from_cache_file (`Optional[bool], defaults to `None`): Passed to `map`, `filter` and the likes.
|
| 58 |
+
num_proc (`Optional[int], defaults to `None`): Passed to `map`, `filter` and the likes.
|
| 59 |
+
raise_on_missing (`Optional[bool]`, defaults to `None`):
|
| 60 |
+
Determines what to do when there is an error loading an image.
|
| 61 |
+
- `True`: raise an error.
|
| 62 |
+
- `None`: print a warning and skip the sample (default).
|
| 63 |
+
- `False`: silently skip the sample.
|
| 64 |
+
use_bad_sha256 (`Optional[bool], defaults to `None`):
|
| 65 |
+
Determines what to do when the sha256 integrity test fails.
|
| 66 |
+
- `True`: ignore the sha256 integrity test.
|
| 67 |
+
- `None`: print a warning and skip samples with bad sha256 (default).
|
| 68 |
+
- `False`: silently skip entries with bad sha256.
|
| 69 |
+
load_dataset_kwargs (`Optional[dict]`, defaults to `None`): Arguments passed to datasets.load_dataset .
|
| 70 |
+
"""
|
| 71 |
+
if bigdocs_load_dataset_kwargs is None:
|
| 72 |
+
bigdocs_load_dataset_kwargs = {}
|
| 73 |
+
unprocessed = datasets.load_dataset("ServiceNow/BigDocs-7.5M", formal_name, **bigdocs_load_dataset_kwargs)
|
| 74 |
+
|
| 75 |
+
def on_disk_processor(sample):
|
| 76 |
+
img_path = os.path.join(user_local_path, sample['img_id'])
|
| 77 |
+
# Load the image
|
| 78 |
+
try:
|
| 79 |
+
image = PIL.Image.open(img_path)
|
| 80 |
+
except Exception as e:
|
| 81 |
+
if raise_on_missing:
|
| 82 |
+
raise RuntimeError(f"Error loading image at {img_path}\n{e}")
|
| 83 |
+
if raise_on_missing is None:
|
| 84 |
+
warnings.warn(f"Skipping due to error loading image {img_path}", RuntimeWarning)
|
| 85 |
+
image = None # Sample will be filtered out
|
| 86 |
+
if image is not None:
|
| 87 |
+
# Place into `buffer` using PNG image format
|
| 88 |
+
buffer = io.BytesIO()
|
| 89 |
+
image.save(buffer, "png")
|
| 90 |
+
# Reload the image with guaranteed PNG format
|
| 91 |
+
image = PIL.Image.open(buffer)
|
| 92 |
+
# Check sha256
|
| 93 |
+
if not skip_bad_sha256:
|
| 94 |
+
sha256 = _hash_bytes(buffer.getvalue())
|
| 95 |
+
if sha256 != sample["image_sha256"]:
|
| 96 |
+
image = None # Sample will be filtered out
|
| 97 |
+
if skip_bad_sha256 is None:
|
| 98 |
+
warnings.warn(f"Skipping due to bad sha256 for {img_path}", RuntimeWarning)
|
| 99 |
+
return {"image": image}
|
| 100 |
+
|
| 101 |
+
# Get the correct processor
|
| 102 |
+
try:
|
| 103 |
+
processor = {
|
| 104 |
+
"COCOtext": on_disk_processor,
|
| 105 |
+
"pubtables-1m": on_disk_processor,
|
| 106 |
+
"TextOCR": on_disk_processor,
|
| 107 |
+
}[formal_name]
|
| 108 |
+
except KeyError:
|
| 109 |
+
raise ValueError(f"Unknown formal_name: {formal_name}")
|
| 110 |
+
if processor is on_disk_processor:
|
| 111 |
+
assert user_local_path is not None, f"user_local_path is mandatory for formal_name={formal_name}"
|
| 112 |
+
|
| 113 |
+
if processor is None:
|
| 114 |
+
processed = unprocessed
|
| 115 |
+
else:
|
| 116 |
+
processed = unprocessed.map(
|
| 117 |
+
processor,
|
| 118 |
+
remove_columns="img_id",
|
| 119 |
+
load_from_cache_file=load_from_cache_file,
|
| 120 |
+
num_proc=num_proc
|
| 121 |
+
)
|
| 122 |
+
# Drop missing images.
|
| 123 |
+
if not raise_on_missing:
|
| 124 |
+
processed = processed.filter((lambda image: image is not None), input_columns="image", num_proc=num_proc)
|
| 125 |
+
# Column order
|
| 126 |
+
processed = processed.select_columns(list(ASSEMBLED_COLUMNS))
|
| 127 |
+
|
| 128 |
+
return processed
|