diff --git a/tests/test_models.py b/tests/test_models.py index de6640688f9ec7638701ae06d3c3e847bc401734..44cb3ba2e297f05b0e784b00112a87ecebd9dc91 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -190,7 +190,7 @@ EXCLUDE_JIT_FILTERS = [ def test_model_forward_torchscript(model_name, batch_size): """Run a single forward pass with each model""" input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE) - if max(input_size) > MAX_JIT_SIZE: # NOTE using MAX_FWD_SIZE as the final limit is intentional + if max(input_size) > MAX_JIT_SIZE: pytest.skip("Fixed input size model > limit.") with set_scriptable(True): diff --git a/timm/models/visformer.py b/timm/models/visformer.py index 936f1ddf561c7a2f01a4d86e220309c04689ad71..33a2fe872a41326b16657db27995a8b550ffa71d 100644 --- a/timm/models/visformer.py +++ b/timm/models/visformer.py @@ -26,7 +26,7 @@ def _cfg(url='', **kwargs): 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, - 'first_conv': 'patch_embed.proj', 'classifier': 'head', + 'first_conv': 'stem.0', 'classifier': 'head', **kwargs } @@ -183,7 +183,7 @@ class Visformer(nn.Module): img_size //= 8 else: self.stem = nn.Sequential( - nn.Conv2d(3, self.init_channels, 7, stride=2, padding=3, bias=False), + nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False), nn.BatchNorm2d(self.init_channels), nn.ReLU(inplace=True) )