Skip to content

Commit 8fbf214

Browse files
mthrokfacebook-github-bot
authored andcommitted
Static analysis improvements python (#3263)
Summary: * Simplify code and remove used vars. * Simplify expressions and remove used parenthesis. * Jit fixes. * Making check more readable. * fixing styles Reviewed By: datumbox Differential Revision: D25954566 fbshipit-source-id: 7ae7966e3c9b91e1f93b42e1c1a8f93cfef19643
1 parent 869b85b commit 8fbf214

File tree

9 files changed

+12
-18
lines changed

9 files changed

+12
-18
lines changed

torchvision/datasets/mnist.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -471,8 +471,8 @@ def read_sn3_pascalvincent_tensor(path: Union[str, IO], strict: bool = True) ->
471471
magic = get_int(data[0:4])
472472
nd = magic % 256
473473
ty = magic // 256
474-
assert nd >= 1 and nd <= 3
475-
assert ty >= 8 and ty <= 14
474+
assert 1 <= nd <= 3
475+
assert 8 <= ty <= 14
476476
m = SN3_PASCALVINCENT_TYPEMAP[ty]
477477
s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)]
478478
parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1)))

torchvision/datasets/phototour.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,6 @@ def read_info_file(data_dir: str, info_file: str) -> torch.Tensor:
192192
"""Return a Tensor containing the list of labels
193193
Read the file and keep only the ID of the 3D point.
194194
"""
195-
labels = []
196195
with open(os.path.join(data_dir, info_file), 'r') as f:
197196
labels = [int(line.split()[0]) for line in f]
198197
return torch.LongTensor(labels)

torchvision/io/_video_opt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@ def _read_video_timestamps_from_file(filename):
303303
1, # audio_timebase_den
304304
)
305305
_vframes, vframe_pts, vtimebase, vfps, vduration, \
306-
_aframes, aframe_pts, atimebase, asample_rate, aduration = (result)
306+
_aframes, aframe_pts, atimebase, asample_rate, aduration = result
307307
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
308308

309309
vframe_pts = vframe_pts.numpy().tolist()

torchvision/models/detection/backbone_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def resnet_fpn_backbone(
8787
norm_layer=norm_layer)
8888

8989
# select layers that wont be frozen
90-
assert trainable_layers <= 5 and trainable_layers >= 0
90+
assert 0 <= trainable_layers <= 5
9191
layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers]
9292
# freeze layers only if pretrained backbone is used
9393
for name, parameter in backbone.named_parameters():

torchvision/models/detection/generalized_rcnn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,6 @@ def forward(self, images, targets=None):
106106
if not self._has_warned:
107107
warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting")
108108
self._has_warned = True
109-
return (losses, detections)
109+
return losses, detections
110110
else:
111111
return self.eager_outputs(losses, detections)

torchvision/models/detection/roi_heads.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ def _onnx_heatmaps_to_keypoints(maps, maps_i, roi_map_width, roi_map_height,
190190

191191
xy_preds_i_0 = x + offset_x_i.to(dtype=torch.float32)
192192
xy_preds_i_1 = y + offset_y_i.to(dtype=torch.float32)
193-
xy_preds_i_2 = torch.ones((xy_preds_i_1.shape), dtype=torch.float32)
193+
xy_preds_i_2 = torch.ones(xy_preds_i_1.shape, dtype=torch.float32)
194194
xy_preds_i = torch.stack([xy_preds_i_0.to(dtype=torch.float32),
195195
xy_preds_i_1.to(dtype=torch.float32),
196196
xy_preds_i_2.to(dtype=torch.float32)], 0)
@@ -795,7 +795,6 @@ def forward(self,
795795
mask_features = self.mask_head(mask_features)
796796
mask_logits = self.mask_predictor(mask_features)
797797
else:
798-
mask_logits = torch.tensor(0)
799798
raise Exception("Expected mask_roi_pool to be not None")
800799

801800
loss_mask = {}

torchvision/models/video/resnet.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def __init__(self,
3030

3131
@staticmethod
3232
def get_downsample_stride(stride):
33-
return (stride, stride, stride)
33+
return stride, stride, stride
3434

3535

3636
class Conv2Plus1D(nn.Sequential):
@@ -53,7 +53,7 @@ def __init__(self,
5353

5454
@staticmethod
5555
def get_downsample_stride(stride):
56-
return (stride, stride, stride)
56+
return stride, stride, stride
5757

5858

5959
class Conv3DNoTemporal(nn.Conv3d):
@@ -75,7 +75,7 @@ def __init__(self,
7575

7676
@staticmethod
7777
def get_downsample_stride(stride):
78-
return (1, stride, stride)
78+
return 1, stride, stride
7979

8080

8181
class BasicBlock(nn.Module):

torchvision/ops/feature_pyramid_network.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -99,9 +99,7 @@ def get_result_from_inner_blocks(self, x: Tensor, idx: int) -> Tensor:
9999
This is equivalent to self.inner_blocks[idx](x),
100100
but torchscript doesn't support this yet
101101
"""
102-
num_blocks = 0
103-
for m in self.inner_blocks:
104-
num_blocks += 1
102+
num_blocks = len(self.inner_blocks)
105103
if idx < 0:
106104
idx += num_blocks
107105
i = 0
@@ -117,9 +115,7 @@ def get_result_from_layer_blocks(self, x: Tensor, idx: int) -> Tensor:
117115
This is equivalent to self.layer_blocks[idx](x),
118116
but torchscript doesn't support this yet
119117
"""
120-
num_blocks = 0
121-
for m in self.layer_blocks:
122-
num_blocks += 1
118+
num_blocks = len(self.layer_blocks)
123119
if idx < 0:
124120
idx += num_blocks
125121
i = 0

torchvision/transforms/functional_tensor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def _max_value(dtype: torch.dtype) -> float:
4545
max_value = next_value
4646
bits *= 2
4747
else:
48-
return max_value.item()
48+
break
4949
return max_value.item()
5050

5151

0 commit comments

Comments
 (0)