diff --git a/torchvision/models/optical_flow/raft.py b/torchvision/models/optical_flow/raft.py index 02705a7ebdb..ba1cc8499d8 100644 --- a/torchvision/models/optical_flow/raft.py +++ b/torchvision/models/optical_flow/raft.py @@ -8,6 +8,7 @@ from torch.nn.modules.instancenorm import InstanceNorm2d from torchvision.ops import ConvNormActivation +from ...utils import _log_api_usage_once from ._utils import grid_sample, make_coords_grid, upsample_flow @@ -432,6 +433,7 @@ def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block If ``None`` (default), the flow is upsampled using interpolation. """ super().__init__() + _log_api_usage_once(self) self.feature_encoder = feature_encoder self.context_encoder = context_encoder diff --git a/torchvision/prototype/models/vision_transformer.py b/torchvision/prototype/models/vision_transformer.py index 9794559745d..ae8eee45539 100644 --- a/torchvision/prototype/models/vision_transformer.py +++ b/torchvision/prototype/models/vision_transformer.py @@ -11,6 +11,7 @@ import torch.nn as nn from torch import Tensor +from ...utils import _log_api_usage_once from ._api import WeightsEnum from ._utils import handle_legacy_interface @@ -139,6 +140,7 @@ def __init__( norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6), ): super().__init__() + _log_api_usage_once(self) torch._assert(image_size % patch_size == 0, "Input shape indivisible by patch size!") self.image_size = image_size self.patch_size = patch_size