Skip to content

Commit db0fd27

Browse files
datumboxpmeierthommaaAnton Thomma
authored
Update CI on Multiweight branch to use the new weight download approach (#5628)
* port Pad to prototype transforms (#5621) * port Pad to prototype transforms * use literal * Bump up LibTorchvision version number for Podspec to release Cocoapods (#5624) Co-authored-by: Anton Thomma <anton@pri.co.nz> Co-authored-by: Vasilis Vryniotis <datumbox@users.noreply.github.com> * pre-download model weights in CI docs build (#5625) * pre-download model weights in CI docs build * move changes into template * change docs image * Regenerated config.yml Co-authored-by: Philip Meier <github.pmeier@posteo.de> Co-authored-by: Anton Thomma <11010310+thommaa@users.noreply.github.com> Co-authored-by: Anton Thomma <anton@pri.co.nz>
1 parent 189cbc8 commit db0fd27

File tree

7 files changed

+120
-59
lines changed

7 files changed

+120
-59
lines changed

.circleci/config.yml

Lines changed: 23 additions & 9 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.circleci/config.yml.in

Lines changed: 23 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,26 @@ commands:
174174
- store_test_results:
175175
path: test-results
176176

177+
download_model_weights:
178+
parameters:
179+
extract_roots:
180+
type: string
181+
default: "torchvision/models"
182+
background:
183+
type: boolean
184+
default: true
185+
steps:
186+
- apt_install:
187+
args: parallel wget
188+
descr: Install download utilitites
189+
- run:
190+
name: Download model weights
191+
background: << parameters.background >>
192+
command: |
193+
mkdir -p ~/.cache/torch/hub/checkpoints
194+
python scripts/collect_model_urls.py << parameters.extract_roots >> \
195+
| parallel -j0 'wget --no-verbose -O ~/.cache/torch/hub/checkpoints/`basename {}` {}\?source=ci'
196+
177197
binary_common: &binary_common
178198
parameters:
179199
# Edit these defaults to do a release
@@ -354,14 +374,7 @@ jobs:
354374
resource_class: xlarge
355375
steps:
356376
- checkout
357-
- run:
358-
name: Download model weights
359-
background: true
360-
command: |
361-
sudo apt update -qy && sudo apt install -qy parallel wget
362-
mkdir -p ~/.cache/torch/hub/checkpoints
363-
python scripts/collect_model_urls.py torchvision/models \
364-
| parallel -j0 'wget --no-verbose -O ~/.cache/torch/hub/checkpoints/`basename {}` {}\?source=ci'
377+
- download_model_weights
365378
- install_torchvision
366379
- run:
367380
name: Enable extended tests
@@ -1021,12 +1034,13 @@ jobs:
10211034
build_docs:
10221035
<<: *binary_common
10231036
docker:
1024-
- image: "pytorch/manylinux-cuda100"
1037+
- image: circleci/python:3.7
10251038
resource_class: 2xlarge+
10261039
steps:
10271040
- attach_workspace:
10281041
at: ~/workspace
10291042
- checkout
1043+
- download_model_weights
10301044
- run:
10311045
name: Setup
10321046
command: .circleci/unittest/linux/scripts/setup_env.sh

ios/LibTorchvision.podspec

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
pytorch_version = '1.10.0'
1+
pytorch_version = '1.11.0'
22

33
Pod::Spec.new do |s|
44
s.name = 'LibTorchvision'
5-
s.version = '0.11.1'
5+
s.version = '0.12.0'
66
s.authors = 'PyTorch Team'
77
s.license = { :type => 'BSD' }
88
s.homepage = 'https://github.com/pytorch/vision'

scripts/collect_model_urls.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,21 +2,19 @@
22
import re
33
import sys
44

5-
MODEL_URL_PATTERN = re.compile(r"https://download[.]pytorch[.]org/models/.*?[.]pth")
5+
MODEL_URL_PATTERN = re.compile(r"https://download[.]pytorch[.]org/models/.+?[.]pth")
66

77

8-
def main(root):
8+
def main(*roots):
99
model_urls = set()
10-
for path in pathlib.Path(root).glob("**/*"):
11-
if path.name.startswith("_") or not path.suffix == ".py":
12-
continue
13-
14-
with open(path, "r") as file:
15-
for line in file:
16-
model_urls.update(MODEL_URL_PATTERN.findall(line))
10+
for root in roots:
11+
for path in pathlib.Path(root).rglob("*.py"):
12+
with open(path, "r") as file:
13+
for line in file:
14+
model_urls.update(MODEL_URL_PATTERN.findall(line))
1715

1816
print("\n".join(sorted(model_urls)))
1917

2018

2119
if __name__ == "__main__":
22-
main(sys.argv[1])
20+
main(*sys.argv[1:])

test/test_prototype_transforms.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ class TestSmoke:
7171
transforms.CenterCrop([16, 16]),
7272
transforms.ConvertImageDtype(),
7373
transforms.RandomHorizontalFlip(),
74+
transforms.Pad(5),
7475
)
7576
def test_common(self, transform, input):
7677
transform(input)

torchvision/prototype/transforms/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
TenCrop,
1414
BatchMultiCrop,
1515
RandomHorizontalFlip,
16+
Pad,
1617
RandomZoomOut,
1718
)
1819
from ._meta import ConvertBoundingBoxFormat, ConvertImageDtype, ConvertImageColorSpace

torchvision/prototype/transforms/_geometry.py

Lines changed: 62 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import collections.abc
22
import math
3+
import numbers
34
import warnings
45
from typing import Any, Dict, List, Union, Sequence, Tuple, cast
56

@@ -9,6 +10,7 @@
910
from torchvision.prototype.transforms import Transform, functional as F
1011
from torchvision.transforms.functional import pil_to_tensor, InterpolationMode
1112
from torchvision.transforms.transforms import _setup_size, _interpolation_modes_from_int
13+
from typing_extensions import Literal
1214

1315
from ._utils import query_image, get_image_dimensions, has_any, is_simple_tensor
1416

@@ -272,42 +274,31 @@ def apply_recursively(obj: Any) -> Any:
272274
return apply_recursively(inputs if len(inputs) > 1 else inputs[0])
273275

274276

275-
class RandomZoomOut(Transform):
277+
class Pad(Transform):
276278
def __init__(
277-
self, fill: Union[float, Sequence[float]] = 0.0, side_range: Tuple[float, float] = (1.0, 4.0), p: float = 0.5
279+
self,
280+
padding: Union[int, Sequence[int]],
281+
fill: Union[float, Sequence[float]] = 0.0,
282+
padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant",
278283
) -> None:
279284
super().__init__()
285+
if not isinstance(padding, (numbers.Number, tuple, list)):
286+
raise TypeError("Got inappropriate padding arg")
280287

281-
if fill is None:
282-
fill = 0.0
283-
self.fill = fill
284-
285-
self.side_range = side_range
286-
if side_range[0] < 1.0 or side_range[0] > side_range[1]:
287-
raise ValueError(f"Invalid canvas side range provided {side_range}.")
288-
289-
self.p = p
290-
291-
def _get_params(self, sample: Any) -> Dict[str, Any]:
292-
image = query_image(sample)
293-
orig_c, orig_h, orig_w = get_image_dimensions(image)
294-
295-
r = self.side_range[0] + torch.rand(1) * (self.side_range[1] - self.side_range[0])
296-
canvas_width = int(orig_w * r)
297-
canvas_height = int(orig_h * r)
288+
if not isinstance(fill, (numbers.Number, str, tuple, list)):
289+
raise TypeError("Got inappropriate fill arg")
298290

299-
r = torch.rand(2)
300-
left = int((canvas_width - orig_w) * r[0])
301-
top = int((canvas_height - orig_h) * r[1])
302-
right = canvas_width - (left + orig_w)
303-
bottom = canvas_height - (top + orig_h)
304-
padding = [left, top, right, bottom]
291+
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
292+
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
305293

306-
fill = self.fill
307-
if not isinstance(fill, collections.abc.Sequence):
308-
fill = [fill] * orig_c
294+
if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:
295+
raise ValueError(
296+
f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple"
297+
)
309298

310-
return dict(padding=padding, fill=fill)
299+
self.padding = padding
300+
self.fill = fill
301+
self.padding_mode = padding_mode
311302

312303
def _transform(self, input: Any, params: Dict[str, Any]) -> Any:
313304
if isinstance(input, features.Image) or is_simple_tensor(input):
@@ -349,6 +340,48 @@ def _transform(self, input: Any, params: Dict[str, Any]) -> Any:
349340
else:
350341
return input
351342

343+
344+
class RandomZoomOut(Transform):
345+
def __init__(
346+
self, fill: Union[float, Sequence[float]] = 0.0, side_range: Tuple[float, float] = (1.0, 4.0), p: float = 0.5
347+
) -> None:
348+
super().__init__()
349+
350+
if fill is None:
351+
fill = 0.0
352+
self.fill = fill
353+
354+
self.side_range = side_range
355+
if side_range[0] < 1.0 or side_range[0] > side_range[1]:
356+
raise ValueError(f"Invalid canvas side range provided {side_range}.")
357+
358+
self.p = p
359+
360+
def _get_params(self, sample: Any) -> Dict[str, Any]:
361+
image = query_image(sample)
362+
orig_c, orig_h, orig_w = get_image_dimensions(image)
363+
364+
r = self.side_range[0] + torch.rand(1) * (self.side_range[1] - self.side_range[0])
365+
canvas_width = int(orig_w * r)
366+
canvas_height = int(orig_h * r)
367+
368+
r = torch.rand(2)
369+
left = int((canvas_width - orig_w) * r[0])
370+
top = int((canvas_height - orig_h) * r[1])
371+
right = canvas_width - (left + orig_w)
372+
bottom = canvas_height - (top + orig_h)
373+
padding = [left, top, right, bottom]
374+
375+
fill = self.fill
376+
if not isinstance(fill, collections.abc.Sequence):
377+
fill = [fill] * orig_c
378+
379+
return dict(padding=padding, fill=fill)
380+
381+
def _transform(self, input: Any, params: Dict[str, Any]) -> Any:
382+
transform = Pad(**params, padding_mode="constant")
383+
return transform(input)
384+
352385
def forward(self, *inputs: Any) -> Any:
353386
sample = inputs if len(inputs) > 1 else inputs[0]
354387
if torch.rand(1) >= self.p:

0 commit comments

Comments
 (0)