From a1c7744a0597e0b54a92c66af88993c913c50b98 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Mon, 17 Jan 2022 14:15:39 +0000 Subject: [PATCH 1/5] Add support for PCAM dataset --- .../unittest/linux/scripts/environment.yml | 1 + .../unittest/windows/scripts/environment.yml | 1 + docs/source/datasets.rst | 1 + test/datasets_utils.py | 1 + test/test_datasets.py | 23 +++ torchvision/datasets/__init__.py | 1 + torchvision/datasets/oxford_iiit_pet.py | 4 +- torchvision/datasets/pcam.py | 136 ++++++++++++++++++ 8 files changed, 166 insertions(+), 2 deletions(-) create mode 100644 torchvision/datasets/pcam.py diff --git a/.circleci/unittest/linux/scripts/environment.yml b/.circleci/unittest/linux/scripts/environment.yml index df65f034076..3c8fd4d966c 100644 --- a/.circleci/unittest/linux/scripts/environment.yml +++ b/.circleci/unittest/linux/scripts/environment.yml @@ -9,6 +9,7 @@ dependencies: - libpng - jpeg - ca-certificates + - h5py - pip: - future - pillow >=5.3.0, !=8.3.* diff --git a/.circleci/unittest/windows/scripts/environment.yml b/.circleci/unittest/windows/scripts/environment.yml index 43a33588995..1a274bfc41d 100644 --- a/.circleci/unittest/windows/scripts/environment.yml +++ b/.circleci/unittest/windows/scripts/environment.yml @@ -9,6 +9,7 @@ dependencies: - libpng - jpeg - ca-certificates + - h5py - pip: - future - pillow >=5.3.0, !=8.3.* diff --git a/docs/source/datasets.rst b/docs/source/datasets.rst index c7bdff274f0..4feac3abfdf 100644 --- a/docs/source/datasets.rst +++ b/docs/source/datasets.rst @@ -66,6 +66,7 @@ You can also create your own datasets using the provided :ref:`base classes `_. + + The PatchCamelyon dataset is a binray classification dataset with 327,680 + color images (96 x 96px), extracted from histopathologic scans of lymph node + sections. Each image is annoted with a binary label indicating presence of + metastatic tissue. + + This dataset requires the ``h5py`` package which you can install with ``pip install h5py``. + + Args: + root (string): Root directory of the dataset. + split (string, optional): The dataset split, supports ``"trai"`` (default), ``"test"`` or ``"val"``. + transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed + version. E.g, ``transforms.RandomCrop``. + target_transform (callable, optional): A function/transform that takes in the target and transforms it. + download (bool, optional): If True, downloads the dataset from the internet and puts it into ``root/pcam``. If + dataset is already downloaded, it is not downloaded again. + """ + + _FILES = { + "train": { + "images": ( + "camelyonpatch_level_2_split_train_x.h5", # Data file name + "1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2", # Google Drive ID + "1571f514728f59376b705fc836ff4b63", # md5 hash + ), + "targets": ( + "camelyonpatch_level_2_split_train_y.h5", + "1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG", + "35c2d7259d906cfc8143347bb8e05be7", + ), + }, + "test": { + "images": ( + "camelyonpatch_level_2_split_test_x.h5", + "1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_", + "d5b63470df7cfa627aeec8b9dc0c066e", + ), + "targets": ( + "camelyonpatch_level_2_split_test_y.h5", + "17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP", + "2b85f58b927af9964a4c15b8f7e8f179", + ), + }, + "val": { + "images": ( + "camelyonpatch_level_2_split_valid_x.h5", + "1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3", + "d8c2d60d490dbd479f8199bdfa0cf6ec", + ), + "targets": ( + "camelyonpatch_level_2_split_valid_y.h5", + "1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO", + "60a7035772fbdb7f34eb86d4420cf66a", + ), + }, + } + + def __init__( + self, + root: str, + split: str = "train", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = True, + ): + try: + import h5py + + self.h5py = h5py + except ImportError: + raise RuntimeError( + "h5py is not found. This dataset needs to have h5py installed: please run pip install h5py" + ) + + self._split = verify_str_arg(split, "split", ("train", "test", "val")) + + super().__init__(root, transform=transform, target_transform=target_transform) + self._base_folder = pathlib.Path(self.root) / "pcam" + + if download: + self._download() + + if not self._check_exists(): + raise RuntimeError("Dataset not found. You can use download=True to download it") + + self.classes = ["neg", "pos"] + self.classes_to_idx = {"neg": 0, "pos": 1} + + def __len__(self) -> int: + images_file = self._FILES[self._split]["images"][0] + with self.h5py.File(self._base_folder / images_file) as images_data: + print(images_data.keys()) + return images_data["x"].shape[0] + + def __getitem__(self, idx: int) -> Tuple[Any, Any]: + images_file = self._FILES[self._split]["images"][0] + with self.h5py.File(self._base_folder / images_file) as images_data: + image = Image.fromarray(images_data["x"][idx]).convert("RGB") + + targets_file = self._FILES[self._split]["targets"][0] + with self.h5py.File(self._base_folder / targets_file) as targets_data: + target = int(targets_data["y"][idx, 0, 0, 0]) # shape is [num_images, 1, 1, 1] + + if self.transform: + image = self.transform(image) + if self.target_transform: + target = self.target_transform(target) + + return image, target + + def _check_exists(self) -> bool: + images_file = self._FILES[self._split]["images"][0] + targets_file = self._FILES[self._split]["targets"][0] + return all((os.path.exists(str(self._base_folder / h5_file)) for h5_file in (images_file, targets_file))) + + def _download(self) -> None: + if self._check_exists(): + return + + for file_name, file_id, md5 in self._FILES[self._split].values(): + archive_name = file_name + ".gz" + download_file_from_google_drive(file_id, str(self._base_folder), filename=archive_name, md5=md5) + extract_archive(str(self._base_folder / archive_name)) From 8a0dfb4980e331f5d3e7637bc05cc58df4e00f2f Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Mon, 17 Jan 2022 14:38:24 +0000 Subject: [PATCH 2/5] mypy --- torchvision/datasets/pcam.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/datasets/pcam.py b/torchvision/datasets/pcam.py index 7befed770d5..e3afc68ebf7 100644 --- a/torchvision/datasets/pcam.py +++ b/torchvision/datasets/pcam.py @@ -77,7 +77,7 @@ def __init__( download: bool = True, ): try: - import h5py + import h5py # type: ignore[import] self.h5py = h5py except ImportError: From 3ba4d82128a42fd6d19a5001f7240f323f53e85e Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Mon, 17 Jan 2022 15:15:39 +0000 Subject: [PATCH 3/5] Apply suggestions from code review Co-authored-by: Philip Meier --- torchvision/datasets/pcam.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/torchvision/datasets/pcam.py b/torchvision/datasets/pcam.py index e3afc68ebf7..a39ccd789d4 100644 --- a/torchvision/datasets/pcam.py +++ b/torchvision/datasets/pcam.py @@ -12,16 +12,16 @@ class PCAM(VisionDataset): """`PCAM Dataset `_. - The PatchCamelyon dataset is a binray classification dataset with 327,680 - color images (96 x 96px), extracted from histopathologic scans of lymph node - sections. Each image is annoted with a binary label indicating presence of + The PatchCamelyon dataset is a binary classification dataset with 327,680 + color images (96px x 96px), extracted from histopathologic scans of lymph node + sections. Each image is annotated with a binary label indicating presence of metastatic tissue. This dataset requires the ``h5py`` package which you can install with ``pip install h5py``. Args: root (string): Root directory of the dataset. - split (string, optional): The dataset split, supports ``"trai"`` (default), ``"test"`` or ``"val"``. + split (string, optional): The dataset split, supports ``"train"`` (default), ``"test"`` or ``"val"``. transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``. target_transform (callable, optional): A function/transform that takes in the target and transforms it. @@ -102,7 +102,6 @@ def __init__( def __len__(self) -> int: images_file = self._FILES[self._split]["images"][0] with self.h5py.File(self._base_folder / images_file) as images_data: - print(images_data.keys()) return images_data["x"].shape[0] def __getitem__(self, idx: int) -> Tuple[Any, Any]: @@ -124,7 +123,7 @@ def __getitem__(self, idx: int) -> Tuple[Any, Any]: def _check_exists(self) -> bool: images_file = self._FILES[self._split]["images"][0] targets_file = self._FILES[self._split]["targets"][0] - return all((os.path.exists(str(self._base_folder / h5_file)) for h5_file in (images_file, targets_file))) + return all(self._base_folder.joinpath(h5_file).exists() for h5_file in (images_file, targets_file))) def _download(self) -> None: if self._check_exists(): From 95044d632ab6ca5f8cc623b60f85f496c641de57 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Mon, 17 Jan 2022 15:17:29 +0000 Subject: [PATCH 4/5] Remove classes and class_to_idx attributes --- torchvision/datasets/pcam.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/torchvision/datasets/pcam.py b/torchvision/datasets/pcam.py index a39ccd789d4..dd530b38df3 100644 --- a/torchvision/datasets/pcam.py +++ b/torchvision/datasets/pcam.py @@ -1,5 +1,3 @@ -import os -import os.path import pathlib from typing import Any, Callable, Optional, Tuple @@ -19,7 +17,7 @@ class PCAM(VisionDataset): This dataset requires the ``h5py`` package which you can install with ``pip install h5py``. - Args: + Args: root (string): Root directory of the dataset. split (string, optional): The dataset split, supports ``"train"`` (default), ``"test"`` or ``"val"``. transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed @@ -96,9 +94,6 @@ def __init__( if not self._check_exists(): raise RuntimeError("Dataset not found. You can use download=True to download it") - self.classes = ["neg", "pos"] - self.classes_to_idx = {"neg": 0, "pos": 1} - def __len__(self) -> int: images_file = self._FILES[self._split]["images"][0] with self.h5py.File(self._base_folder / images_file) as images_data: @@ -123,7 +118,7 @@ def __getitem__(self, idx: int) -> Tuple[Any, Any]: def _check_exists(self) -> bool: images_file = self._FILES[self._split]["images"][0] targets_file = self._FILES[self._split]["targets"][0] - return all(self._base_folder.joinpath(h5_file).exists() for h5_file in (images_file, targets_file))) + return all(self._base_folder.joinpath(h5_file).exists() for h5_file in (images_file, targets_file)) def _download(self) -> None: if self._check_exists(): From f95f64ec5455ff851f7805582b24c8576dcd24e0 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Mon, 17 Jan 2022 15:21:30 +0000 Subject: [PATCH 5/5] Use _decompress --- torchvision/datasets/pcam.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchvision/datasets/pcam.py b/torchvision/datasets/pcam.py index dd530b38df3..f9b9b6817bf 100644 --- a/torchvision/datasets/pcam.py +++ b/torchvision/datasets/pcam.py @@ -3,7 +3,7 @@ from PIL import Image -from .utils import download_file_from_google_drive, extract_archive, verify_str_arg +from .utils import download_file_from_google_drive, _decompress, verify_str_arg from .vision import VisionDataset @@ -127,4 +127,4 @@ def _download(self) -> None: for file_name, file_id, md5 in self._FILES[self._split].values(): archive_name = file_name + ".gz" download_file_from_google_drive(file_id, str(self._base_folder), filename=archive_name, md5=md5) - extract_archive(str(self._base_folder / archive_name)) + _decompress(str(self._base_folder / archive_name))