| """ |
| Inspired from |
| https://huggingface.co/datasets/ydshieh/coco_dataset_script/blob/main/coco_dataset_script.py |
| """ |
|
|
| import json |
| import os |
| import datasets |
|
|
|
|
| class COCOBuilderConfig(datasets.BuilderConfig): |
|
|
| def __init__(self, name, splits, **kwargs): |
| super().__init__(name, **kwargs) |
| self.splits = splits |
|
|
|
|
| |
| |
| _CITATION = """\ |
| @article{doclaynet2022, |
| title = {DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis}, |
| doi = {10.1145/3534678.353904}, |
| url = {https://arxiv.org/abs/2206.01062}, |
| author = {Pfitzmann, Birgit and Auer, Christoph and Dolfi, Michele and Nassar, Ahmed S and Staar, Peter W J}, |
| year = {2022} |
| } |
| """ |
|
|
| |
| |
| _DESCRIPTION = """\ |
| DocLayNet is a human-annotated document layout segmentation dataset from a broad variety of document sources. |
| """ |
|
|
| |
| _HOMEPAGE = "https://developer.ibm.com/exchanges/data/all/doclaynet/" |
|
|
| |
| _LICENSE = "CDLA-Permissive-1.0" |
|
|
| |
| |
| |
|
|
| |
| _URLs = { |
| "core": "https://codait-cos-dax.s3.us.cloud-object-storage.appdomain.cloud/dax-doclaynet/1.0.0/DocLayNet_core.zip", |
| } |
|
|
|
|
| |
| class COCODataset(datasets.GeneratorBasedBuilder): |
| """An example dataset script to work with the local (downloaded) COCO dataset""" |
|
|
| VERSION = datasets.Version("1.0.0") |
|
|
| BUILDER_CONFIG_CLASS = COCOBuilderConfig |
| BUILDER_CONFIGS = [ |
| COCOBuilderConfig(name='2022.08', splits=['train', 'val', 'test']), |
| ] |
| DEFAULT_CONFIG_NAME = "2022.08" |
|
|
| def _info(self): |
| |
|
|
| feature_dict = { |
| "id": datasets.Value("int64"), |
| "height": datasets.Value("int64"), |
| "width": datasets.Value("int64"), |
| "file_name": datasets.Value("string"), |
|
|
| |
| "doc_category": datasets.Value("string"), |
| "collection": datasets.Value("string"), |
| "doc_name": datasets.Value("string"), |
| "page_no": datasets.Value("int64"), |
| |
| } |
|
|
| features = datasets.Features(feature_dict) |
|
|
| return datasets.DatasetInfo( |
| |
| description=_DESCRIPTION, |
| |
| features=features, |
| |
| |
| |
| supervised_keys=None, |
| |
| homepage=_HOMEPAGE, |
| |
| license=_LICENSE, |
| |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Returns SplitGenerators.""" |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| archive_path = dl_manager.download_and_extract(_URLs) |
| print("archive_path: ", archive_path) |
|
|
| splits = [] |
| for split in self.config.splits: |
| if split == 'train': |
| dataset = datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| |
| gen_kwargs={ |
| "json_path": os.path.join(archive_path["core"], "COCO", "train.json"), |
| "image_dir": os.path.join(archive_path["core"], "PNG"), |
| "split": "train", |
| } |
| ) |
| elif split in ['val', 'valid', 'validation', 'dev']: |
| dataset = datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| |
| gen_kwargs={ |
| "json_path": os.path.join(archive_path["core"], "COCO", "val.json"), |
| "image_dir": os.path.join(archive_path["core"], "PNG"), |
| "split": "val", |
| }, |
| ) |
| elif split == 'test': |
| dataset = datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| |
| gen_kwargs={ |
| "json_path": os.path.join(archive_path["core"], "COCO", "test.json"), |
| "image_dir": os.path.join(archive_path["core"], "PNG"), |
| "split": "test", |
| }, |
| ) |
| else: |
| continue |
|
|
| splits.append(dataset) |
|
|
| return splits |
|
|
| def _generate_examples( |
| |
| self, json_path, image_dir, split |
| ): |
| """ Yields examples as (key, example) tuples. """ |
| |
| |
|
|
| _features = ["image_id", "image_path", "doc_category", "collection", "height", "width", "file_name", "doc_name", "page_no", "id"] |
| features = list(_features) |
|
|
| with open(json_path, 'r', encoding='UTF-8') as fp: |
| data = json.load(fp) |
|
|
| |
| images = data["images"] |
| entries = images |
|
|
| |
| d = {image["id"]: image for image in images} |
|
|
| |
| if split in ["train", "val"]: |
| annotations = data["annotations"] |
|
|
| |
| for annotation in annotations: |
| _id = annotation["id"] |
| image_info = d[annotation["image_id"]] |
| annotation.update(image_info) |
| annotation["id"] = _id |
|
|
| entries = annotations |
|
|
| for id_, entry in enumerate(entries): |
|
|
| entry = {k: v for k, v in entry.items() if k in features} |
|
|
| if split == "test": |
| entry["image_id"] = entry["id"] |
| entry["id"] = -1 |
|
|
| entry["image_path"] = os.path.join(image_dir, entry["file_name"]) |
|
|
| entry = {k: entry[k] for k in _features if k in entry} |
|
|
| yield str((entry["image_id"], entry["id"])), entry |
|
|