From c6ef54eefa7038e7e42a181ce516c6c802ad905c Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Fri, 23 Aug 2024 10:14:06 -0700 Subject: [PATCH 1/9] Initial mambaout work --- timm/models/__init__.py | 1 + timm/models/mambaout.py | 480 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 481 insertions(+) create mode 100644 timm/models/mambaout.py diff --git a/timm/models/__init__.py b/timm/models/__init__.py index 60fd483c44..c5b1984f20 100644 --- a/timm/models/__init__.py +++ b/timm/models/__init__.py @@ -35,6 +35,7 @@ from .inception_v4 import * from .levit import * from .maxxvit import * +from .mambaout import * from .metaformer import * from .mlp_mixer import * from .mobilenetv3 import * diff --git a/timm/models/mambaout.py b/timm/models/mambaout.py new file mode 100644 index 0000000000..3acd1d6f4c --- /dev/null +++ b/timm/models/mambaout.py @@ -0,0 +1,480 @@ +""" +MambaOut models for image classification. +Some implementations are modified from: +timm (https://github.com/rwightman/pytorch-image-models), +MetaFormer (https://github.com/sail-sg/metaformer), +InceptionNeXt (https://github.com/sail-sg/inceptionnext) +""" +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from timm.models.layers import trunc_normal_, DropPath, LayerNorm +from .vision_transformer import LayerScale +from ._manipulate import checkpoint_seq +from timm.models.registry import register_model +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + + +class Stem(nn.Module): + r""" Code modified from InternImage: + https://github.com/OpenGVLab/InternImage + """ + + def __init__( + self, + in_chs=3, + out_chs=96, + mid_norm: bool = True, + act_layer=nn.GELU, + norm_layer=LayerNorm, + ): + super().__init__() + self.conv1 = nn.Conv2d( + in_chs, + out_chs // 2, + kernel_size=3, + stride=2, + padding=1 + ) + self.norm1 = norm_layer(out_chs // 2) if mid_norm else None + self.act = act_layer() + self.conv2 = nn.Conv2d( + out_chs // 2, + out_chs, + kernel_size=3, + stride=2, + padding=1 + ) + self.norm2 = norm_layer(out_chs) + + def forward(self, x): + x = self.conv1(x) + if self.norm1 is not None: + x = x.permute(0, 2, 3, 1) + x = self.norm1(x) + x = x.permute(0, 3, 1, 2) + x = self.act(x) + x = self.conv2(x) + x = x.permute(0, 2, 3, 1) + x = self.norm2(x) + return x + + +class DownsampleNormFirst(nn.Module): + + def __init__( + self, + in_chs=96, + out_chs=198, + norm_layer=LayerNorm, + ): + super().__init__() + self.norm = norm_layer(in_chs) + self.conv = nn.Conv2d( + in_chs, + out_chs, + kernel_size=3, + stride=2, + padding=1 + ) + + def forward(self, x): + x = self.norm(x) + x = x.permute(0, 3, 1, 2) + x = self.conv(x) + x = x.permute(0, 2, 3, 1) + return x + + +class Downsample(nn.Module): + + def __init__( + self, + in_chs=96, + out_chs=198, + norm_layer=LayerNorm, + ): + super().__init__() + self.conv = nn.Conv2d( + in_chs, + out_chs, + kernel_size=3, + stride=2, + padding=1 + ) + self.norm = norm_layer(out_chs) + + def forward(self, x): + x = x.permute(0, 3, 1, 2) + x = self.conv(x) + x = x.permute(0, 2, 3, 1) + x = self.norm(x) + return x + + +class MlpHead(nn.Module): + """ MLP classification head + """ + + def __init__( + self, + dim, + num_classes=1000, + act_layer=nn.GELU, + mlp_ratio=4, + norm_layer=LayerNorm, + drop_rate=0., + bias=True, + ): + super().__init__() + hidden_features = int(mlp_ratio * dim) + self.fc1 = nn.Linear(dim, hidden_features, bias=bias) + self.act = act_layer() + self.norm = norm_layer(hidden_features) + self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias) + self.head_dropout = nn.Dropout(drop_rate) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.norm(x) + x = self.head_dropout(x) + x = self.fc2(x) + return x + + +class GatedConvBlock(nn.Module): + r""" Our implementation of Gated CNN Block: https://arxiv.org/pdf/1612.08083 + Args: + conv_ratio: control the number of channels to conduct depthwise convolution. + Conduct convolution on partial channels can improve paraitcal efficiency. + The idea of partial channels is from ShuffleNet V2 (https://arxiv.org/abs/1807.11164) and + also used by InceptionNeXt (https://arxiv.org/abs/2303.16900) and FasterNet (https://arxiv.org/abs/2303.03667) + """ + + def __init__( + self, + dim, + expansion_ratio=8 / 3, + kernel_size=7, + conv_ratio=1.0, + ls_init_value=None, + norm_layer=LayerNorm, + act_layer=nn.GELU, + drop_path=0., + **kwargs + ): + super().__init__() + self.norm = norm_layer(dim) + hidden = int(expansion_ratio * dim) + self.fc1 = nn.Linear(dim, hidden * 2) + self.act = act_layer() + conv_channels = int(conv_ratio * dim) + self.split_indices = (hidden, hidden - conv_channels, conv_channels) + self.conv = nn.Conv2d( + conv_channels, + conv_channels, + kernel_size=kernel_size, + padding=kernel_size // 2, + groups=conv_channels + ) + self.fc2 = nn.Linear(hidden, dim) + self.ls = LayerScale(dim) if ls_init_value is not None else nn.Identity() + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x # [B, H, W, C] + x = self.norm(x) + x = self.fc1(x) + g, i, c = torch.split(x, self.split_indices, dim=-1) + c = c.permute(0, 3, 1, 2) # [B, H, W, C] -> [B, C, H, W] + c = self.conv(c) + c = c.permute(0, 2, 3, 1) # [B, C, H, W] -> [B, H, W, C] + x = self.fc2(self.act(g) * torch.cat((i, c), dim=-1)) + x = self.ls(x) + x = self.drop_path(x) + return x + shortcut + + +class MambaOutStage(nn.Module): + + def __init__( + self, + dim, + dim_out: Optional[int] = None, + depth: int = 4, + expansion_ratio=8 / 3, + kernel_size=7, + conv_ratio=1.0, + downsample: bool = False, + ls_init_value: Optional[float] = None, + norm_layer=LayerNorm, + act_layer=nn.GELU, + drop_path=0., + ): + super().__init__() + dim_out = dim_out or dim + self.grad_checkpointing = False + + if downsample: + self.downsample = Downsample(dim, dim_out, norm_layer=norm_layer) + else: + assert dim == dim_out + self.downsample = nn.Identity() + + self.blocks = nn.Sequential(*[ + GatedConvBlock( + dim=dim_out, + expansion_ratio=expansion_ratio, + kernel_size=kernel_size, + conv_ratio=conv_ratio, + ls_init_value=ls_init_value, + norm_layer=norm_layer, + act_layer=act_layer, + drop_path=drop_path[j] if isinstance(drop_path, (list, tuple)) else drop_path, + ) + for j in range(depth) + ]) + + def forward(self, x): + x = self.downsample(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class MambaOut(nn.Module): + r""" MetaFormer + A PyTorch impl of : `MetaFormer Baselines for Vision` - + https://arxiv.org/abs/2210.13452 + + Args: + in_chans (int): Number of input image channels. Default: 3. + num_classes (int): Number of classes for classification head. Default: 1000. + depths (list or tuple): Number of blocks at each stage. Default: [3, 3, 9, 3]. + dims (int): Feature dimension at each stage. Default: [96, 192, 384, 576]. + downsample_layers: (list or tuple): Downsampling layers before each stage. + drop_path_rate (float): Stochastic depth rate. Default: 0. + output_norm: norm before classifier head. Default: partial(nn.LayerNorm, eps=1e-6). + head_fn: classification head. Default: nn.Linear. + head_dropout (float): dropout for MLP classifier. Default: 0. + """ + + def __init__( + self, + in_chans=3, + num_classes=1000, + depths=(3, 3, 9, 3), + dims=(96, 192, 384, 576), + norm_layer=LayerNorm, + act_layer=nn.GELU, + conv_ratio=1.0, + kernel_size=7, + ls_init_value=None, + drop_path_rate=0., + drop_rate=0., + output_norm=LayerNorm, + head_fn=MlpHead, + **kwargs, + ): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + if not isinstance(depths, (list, tuple)): + depths = [depths] # it means the model has only one stage + if not isinstance(dims, (list, tuple)): + dims = [dims] + + num_stage = len(depths) + self.num_stage = num_stage + + self.stem = Stem(in_chans, dims[0], act_layer=act_layer, norm_layer=norm_layer) + prev_dim = dims[0] + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + self.stages = nn.ModuleList() + cur = 0 + for i in range(num_stage): + dim = dims[i] + stage = MambaOutStage( + dim=prev_dim, + dim_out=dim, + depth=depths[i], + kernel_size=kernel_size, + conv_ratio=conv_ratio, + downsample=i > 0, + ls_init_value=ls_init_value, + norm_layer=norm_layer, + act_layer=act_layer, + drop_path=dp_rates[i], + ) + self.stages.append(stage) + prev_dim = dim + cur += depths[i] + + self.norm = output_norm(prev_dim) + + self.head = head_fn(prev_dim, num_classes, drop_rate=drop_rate) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, (nn.Conv2d, nn.Linear)): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'norm'} + + def forward_features(self, x): + x = self.stem(x) + for s in self.stages: + x = s(x) + return x + + def forward_head(self, x): + x = x.mean((1, 2)) + x = self.norm(x) + x = self.head(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 1.0, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'mambaout_femto': _cfg( + url='https://github.com/yuweihao/MambaOut/releases/download/model/mambaout_femto.pth'), + 'mambaout_kobe': _cfg( + url='https://github.com/yuweihao/MambaOut/releases/download/model/mambaout_kobe.pth'), + 'mambaout_tiny': _cfg( + url='https://github.com/yuweihao/MambaOut/releases/download/model/mambaout_tiny.pth'), + 'mambaout_small': _cfg( + url='https://github.com/yuweihao/MambaOut/releases/download/model/mambaout_small.pth'), + 'mambaout_base': _cfg( + url='https://github.com/yuweihao/MambaOut/releases/download/model/mambaout_base.pth'), + 'mambaout_small_rw': _cfg(), + 'mambaout_base_rw': _cfg(), +} + + +# a series of MambaOut models +@register_model +def mambaout_femto(pretrained=False, **kwargs): + model = MambaOut( + depths=[3, 3, 9, 3], + dims=[48, 96, 192, 288], + **kwargs) + model.default_cfg = default_cfgs['mambaout_femto'] + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url=model.default_cfg['url'], map_location="cpu", check_hash=True) + model.load_state_dict(state_dict) + return model + + +# Kobe Memorial Version with 24 Gated CNN blocks +@register_model +def mambaout_kobe(pretrained=False, **kwargs): + model = MambaOut( + depths=[3, 3, 15, 3], + dims=[48, 96, 192, 288], + **kwargs) + model.default_cfg = default_cfgs['mambaout_kobe'] + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url=model.default_cfg['url'], map_location="cpu", check_hash=True) + model.load_state_dict(state_dict) + return model + + +@register_model +def mambaout_tiny(pretrained=False, **kwargs): + model = MambaOut( + depths=[3, 3, 9, 3], + dims=[96, 192, 384, 576], + **kwargs) + model.default_cfg = default_cfgs['mambaout_tiny'] + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url=model.default_cfg['url'], map_location="cpu", check_hash=True) + model.load_state_dict(state_dict) + return model + + +@register_model +def mambaout_small(pretrained=False, **kwargs): + model = MambaOut( + depths=[3, 4, 27, 3], + dims=[96, 192, 384, 576], + **kwargs) + model.default_cfg = default_cfgs['mambaout_small'] + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url=model.default_cfg['url'], map_location="cpu", check_hash=True) + model.load_state_dict(state_dict) + return model + + +@register_model +def mambaout_base(pretrained=False, **kwargs): + model = MambaOut( + depths=[3, 4, 27, 3], + dims=[128, 256, 512, 768], + **kwargs) + model.default_cfg = default_cfgs['mambaout_base'] + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url=model.default_cfg['url'], map_location="cpu", check_hash=True) + model.load_state_dict(state_dict) + return model + + +@register_model +def mambaout_small_rw(pretrained=False, **kwargs): + model = MambaOut( + depths=[3, 4, 27, 3], + dims=[96, 192, 384, 576], + ls_init_value=1e-6, + **kwargs, + ) + model.default_cfg = default_cfgs['mambaout_small'] + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url=model.default_cfg['url'], map_location="cpu", check_hash=True) + model.load_state_dict(state_dict) + return model + + +@register_model +def mambaout_base_rw(pretrained=False, **kwargs): + model = MambaOut( + depths=(3, 4, 27, 3), + dims=(128, 256, 512, 768), + ls_init_value=1e-6, + **kwargs + ) + model.default_cfg = default_cfgs['mambaout_base'] + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url=model.default_cfg['url'], map_location="cpu", check_hash=True) + model.load_state_dict(state_dict) + return model From f2086f51a03fac2bffffd472ce805bb203920b7f Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Fri, 23 Aug 2024 10:39:01 -0700 Subject: [PATCH 2/9] Add mambaout builder support, pretrained weight remap --- timm/models/mambaout.py | 131 +++++++++++++++++----------------------- 1 file changed, 55 insertions(+), 76 deletions(-) diff --git a/timm/models/mambaout.py b/timm/models/mambaout.py index 3acd1d6f4c..a57ba8f3d7 100644 --- a/timm/models/mambaout.py +++ b/timm/models/mambaout.py @@ -5,17 +5,16 @@ MetaFormer (https://github.com/sail-sg/metaformer), InceptionNeXt (https://github.com/sail-sg/inceptionnext) """ -from functools import partial from typing import Optional import torch import torch.nn as nn -import torch.nn.functional as F -from timm.models.layers import trunc_normal_, DropPath, LayerNorm -from .vision_transformer import LayerScale -from ._manipulate import checkpoint_seq -from timm.models.registry import register_model + from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import trunc_normal_, DropPath, LayerNorm, LayerScale +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq +from ._registry import register_model class Stem(nn.Module): @@ -275,6 +274,7 @@ def __init__( act_layer=nn.GELU, conv_ratio=1.0, kernel_size=7, + stem_mid_norm=True, ls_init_value=None, drop_path_rate=0., drop_rate=0., @@ -293,7 +293,13 @@ def __init__( num_stage = len(depths) self.num_stage = num_stage - self.stem = Stem(in_chans, dims[0], act_layer=act_layer, norm_layer=norm_layer) + self.stem = Stem( + in_chans, + dims[0], + mid_norm=stem_mid_norm, + act_layer=act_layer, + norm_layer=norm_layer, + ) prev_dim = dims[0] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] self.stages = nn.ModuleList() @@ -338,7 +344,7 @@ def forward_features(self, x): x = s(x) return x - def forward_head(self, x): + def forward_head(self, x, pre_logits: bool = False): x = x.mean((1, 2)) x = self.norm(x) x = self.head(x) @@ -350,6 +356,21 @@ def forward(self, x): return x +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + state_dict = state_dict['model'] + + import re + out_dict = {} + for k, v in state_dict.items(): + k = k.replace('downsample_layers.0.', 'stem.') + k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) + k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k) + out_dict[k] = v + + return out_dict + + def _cfg(url='', **kwargs): return { 'url': url, @@ -376,105 +397,63 @@ def _cfg(url='', **kwargs): } +def _create_mambaout(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + MambaOut, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), + **kwargs, + ) + return model + + # a series of MambaOut models @register_model def mambaout_femto(pretrained=False, **kwargs): - model = MambaOut( - depths=[3, 3, 9, 3], - dims=[48, 96, 192, 288], - **kwargs) - model.default_cfg = default_cfgs['mambaout_femto'] - if pretrained: - state_dict = torch.hub.load_state_dict_from_url( - url=model.default_cfg['url'], map_location="cpu", check_hash=True) - model.load_state_dict(state_dict) - return model - + model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 192, 288)) + return _create_mambaout('mambaout_femto', pretrained=pretrained, **dict(model_args, **kwargs)) # Kobe Memorial Version with 24 Gated CNN blocks @register_model def mambaout_kobe(pretrained=False, **kwargs): - model = MambaOut( - depths=[3, 3, 15, 3], - dims=[48, 96, 192, 288], - **kwargs) - model.default_cfg = default_cfgs['mambaout_kobe'] - if pretrained: - state_dict = torch.hub.load_state_dict_from_url( - url=model.default_cfg['url'], map_location="cpu", check_hash=True) - model.load_state_dict(state_dict) - return model - + model_args = dict(depths=[3, 3, 15, 3], dims=[48, 96, 192, 288]) + return _create_mambaout('mambaout_kobe', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_tiny(pretrained=False, **kwargs): - model = MambaOut( - depths=[3, 3, 9, 3], - dims=[96, 192, 384, 576], - **kwargs) - model.default_cfg = default_cfgs['mambaout_tiny'] - if pretrained: - state_dict = torch.hub.load_state_dict_from_url( - url=model.default_cfg['url'], map_location="cpu", check_hash=True) - model.load_state_dict(state_dict) - return model + model_args = dict(depths=[3, 3, 9, 3], dims=[96, 192, 384, 576]) + return _create_mambaout('mambaout_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_small(pretrained=False, **kwargs): - model = MambaOut( - depths=[3, 4, 27, 3], - dims=[96, 192, 384, 576], - **kwargs) - model.default_cfg = default_cfgs['mambaout_small'] - if pretrained: - state_dict = torch.hub.load_state_dict_from_url( - url=model.default_cfg['url'], map_location="cpu", check_hash=True) - model.load_state_dict(state_dict) - return model + model_args = dict(depths=[3, 4, 27, 3], dims=[96, 192, 384, 576]) + return _create_mambaout('mambaout_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_base(pretrained=False, **kwargs): - model = MambaOut( - depths=[3, 4, 27, 3], - dims=[128, 256, 512, 768], - **kwargs) - model.default_cfg = default_cfgs['mambaout_base'] - if pretrained: - state_dict = torch.hub.load_state_dict_from_url( - url=model.default_cfg['url'], map_location="cpu", check_hash=True) - model.load_state_dict(state_dict) - return model + model_args = dict(depths=[3, 4, 27, 3], dims=[128, 256, 512, 768]) + return _create_mambaout('mambaout_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_small_rw(pretrained=False, **kwargs): - model = MambaOut( + model_args = dict( depths=[3, 4, 27, 3], dims=[96, 192, 384, 576], + stem_mid_norm=False, ls_init_value=1e-6, - **kwargs, ) - model.default_cfg = default_cfgs['mambaout_small'] - if pretrained: - state_dict = torch.hub.load_state_dict_from_url( - url=model.default_cfg['url'], map_location="cpu", check_hash=True) - model.load_state_dict(state_dict) - return model + return _create_mambaout('mambaout_small_rw', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_base_rw(pretrained=False, **kwargs): - model = MambaOut( + model_args = dict( depths=(3, 4, 27, 3), dims=(128, 256, 512, 768), + stem_mid_norm=False, ls_init_value=1e-6, - **kwargs ) - model.default_cfg = default_cfgs['mambaout_base'] - if pretrained: - state_dict = torch.hub.load_state_dict_from_url( - url=model.default_cfg['url'], map_location="cpu", check_hash=True) - model.load_state_dict(state_dict) - return model + return _create_mambaout('mambaout_base_rw', pretrained=pretrained, **dict(model_args, **kwargs)) From c2da12c7e1d67e4cea8ee496a4f0b8251b484919 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Tue, 27 Aug 2024 14:03:59 -0700 Subject: [PATCH 3/9] Update rw models, fix heads --- timm/models/mambaout.py | 66 ++++++++++++++++++++++++++++++----------- 1 file changed, 49 insertions(+), 17 deletions(-) diff --git a/timm/models/mambaout.py b/timm/models/mambaout.py index a57ba8f3d7..3c9900a02d 100644 --- a/timm/models/mambaout.py +++ b/timm/models/mambaout.py @@ -8,10 +8,10 @@ from typing import Optional import torch -import torch.nn as nn +from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD -from timm.layers import trunc_normal_, DropPath, LayerNorm, LayerScale +from timm.layers import trunc_normal_, DropPath, LayerNorm, LayerScale, ClNormMlpClassifierHead from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model @@ -122,6 +122,7 @@ def __init__( self, dim, num_classes=1000, + pool_type='avg', act_layer=nn.GELU, mlp_ratio=4, norm_layer=LayerNorm, @@ -130,17 +131,25 @@ def __init__( ): super().__init__() hidden_features = int(mlp_ratio * dim) + self.pool_type = pool_type + + self.norm1 = norm_layer(dim) self.fc1 = nn.Linear(dim, hidden_features, bias=bias) self.act = act_layer() - self.norm = norm_layer(hidden_features) + self.norm2 = norm_layer(hidden_features) self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias) self.head_dropout = nn.Dropout(drop_rate) - def forward(self, x): + def forward(self, x, pre_logits: bool = False): + if self.pool_type == 'avg': + x = x.mean((1, 2)) + x = self.norm1(x) x = self.fc1(x) x = self.act(x) - x = self.norm(x) + x = self.norm2(x) x = self.head_dropout(x) + if pre_logits: + return x x = self.fc2(x) return x @@ -208,7 +217,7 @@ def __init__( expansion_ratio=8 / 3, kernel_size=7, conv_ratio=1.0, - downsample: bool = False, + downsample: str = '', ls_init_value: Optional[float] = None, norm_layer=LayerNorm, act_layer=nn.GELU, @@ -218,8 +227,10 @@ def __init__( dim_out = dim_out or dim self.grad_checkpointing = False - if downsample: + if downsample == 'conv': self.downsample = Downsample(dim, dim_out, norm_layer=norm_layer) + elif downsample == 'conv_nf': + self.downsample = DownsampleNormFirst(dim, dim_out, norm_layer=norm_layer) else: assert dim == dim_out self.downsample = nn.Identity() @@ -276,10 +287,10 @@ def __init__( kernel_size=7, stem_mid_norm=True, ls_init_value=None, + downsample='conv', drop_path_rate=0., drop_rate=0., - output_norm=LayerNorm, - head_fn=MlpHead, + head_fn='default', **kwargs, ): super().__init__() @@ -312,7 +323,7 @@ def __init__( depth=depths[i], kernel_size=kernel_size, conv_ratio=conv_ratio, - downsample=i > 0, + downsample=downsample if i > 0 else '', ls_init_value=ls_init_value, norm_layer=norm_layer, act_layer=act_layer, @@ -322,9 +333,25 @@ def __init__( prev_dim = dim cur += depths[i] - self.norm = output_norm(prev_dim) - - self.head = head_fn(prev_dim, num_classes, drop_rate=drop_rate) + if head_fn == 'default': + # specific to this model, unusual norm -> pool -> fc -> act -> norm -> fc combo + self.head = MlpHead( + prev_dim, + num_classes, + pool_type='avg', + drop_rate=drop_rate, + norm_layer=norm_layer, + ) + else: + # more typical norm -> pool -> fc -> act -> fc + self.head = ClNormMlpClassifierHead( + prev_dim, + num_classes, + hidden_size=int(prev_dim * 4), + pool_type='avg', + norm_layer=norm_layer, + drop_rate=drop_rate, + ) self.apply(self._init_weights) @@ -336,7 +363,7 @@ def _init_weights(self, m): @torch.jit.ignore def no_weight_decay(self): - return {'norm'} + return {} def forward_features(self, x): x = self.stem(x) @@ -345,9 +372,7 @@ def forward_features(self, x): return x def forward_head(self, x, pre_logits: bool = False): - x = x.mean((1, 2)) - x = self.norm(x) - x = self.head(x) + x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) return x def forward(self, x): @@ -366,6 +391,10 @@ def checkpoint_filter_fn(state_dict, model): k = k.replace('downsample_layers.0.', 'stem.') k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k) + if k.startswith('norm.'): + k = k.replace('norm.', 'head.norm1.') + elif k.startswith('head.norm.'): + k = k.replace('head.norm.', 'head.norm2.') out_dict[k] = v return out_dict @@ -443,7 +472,9 @@ def mambaout_small_rw(pretrained=False, **kwargs): depths=[3, 4, 27, 3], dims=[96, 192, 384, 576], stem_mid_norm=False, + downsample='conv_nf', ls_init_value=1e-6, + head_fn='norm_mlp', ) return _create_mambaout('mambaout_small_rw', pretrained=pretrained, **dict(model_args, **kwargs)) @@ -455,5 +486,6 @@ def mambaout_base_rw(pretrained=False, **kwargs): dims=(128, 256, 512, 768), stem_mid_norm=False, ls_init_value=1e-6, + head_fn='norm_mlp', ) return _create_mambaout('mambaout_base_rw', pretrained=pretrained, **dict(model_args, **kwargs)) From 4542cf03f9628fc5ec6292337c41cb14e07d5278 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Fri, 13 Sep 2024 11:25:04 -0700 Subject: [PATCH 4/9] Add features_only, other bits to mambaout, define different base alternatives --- timm/models/mambaout.py | 119 ++++++++++++++++++++++++++++++++-------- 1 file changed, 95 insertions(+), 24 deletions(-) diff --git a/timm/models/mambaout.py b/timm/models/mambaout.py index 3c9900a02d..5c47223787 100644 --- a/timm/models/mambaout.py +++ b/timm/models/mambaout.py @@ -5,6 +5,7 @@ MetaFormer (https://github.com/sail-sg/metaformer), InceptionNeXt (https://github.com/sail-sg/inceptionnext) """ +from collections import OrderedDict from typing import Optional import torch @@ -120,7 +121,7 @@ class MlpHead(nn.Module): def __init__( self, - dim, + in_features, num_classes=1000, pool_type='avg', act_layer=nn.GELU, @@ -130,27 +131,47 @@ def __init__( bias=True, ): super().__init__() - hidden_features = int(mlp_ratio * dim) + if mlp_ratio is not None: + hidden_size = int(mlp_ratio * in_features) + else: + hidden_size = None self.pool_type = pool_type + self.in_features = in_features + self.hidden_size = hidden_size or in_features + + self.norm = norm_layer(in_features) + if hidden_size: + self.pre_logits = nn.Sequential(OrderedDict([ + ('fc', nn.Linear(in_features, hidden_size)), + ('act', act_layer()), + ('norm', norm_layer(hidden_size)) + ])) + self.num_features = hidden_size + else: + self.num_features = in_features + self.pre_logits = nn.Identity() - self.norm1 = norm_layer(dim) - self.fc1 = nn.Linear(dim, hidden_features, bias=bias) - self.act = act_layer() - self.norm2 = norm_layer(hidden_features) - self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias) + self.fc = nn.Linear(hidden_size, num_classes, bias=bias) self.head_dropout = nn.Dropout(drop_rate) + def reset(self, num_classes: int, pool_type: Optional[str] = None, reset_other: bool = False): + if pool_type is not None: + self.pool_type = pool_type + if reset_other: + self.norm = nn.Identity() + self.pre_logits = nn.Identity() + self.num_features = self.in_features + self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + def forward(self, x, pre_logits: bool = False): if self.pool_type == 'avg': x = x.mean((1, 2)) - x = self.norm1(x) - x = self.fc1(x) - x = self.act(x) - x = self.norm2(x) + x = self.norm(x) + x = self.pre_logits(x) x = self.head_dropout(x) if pre_logits: return x - x = self.fc2(x) + x = self.fc(x) return x @@ -284,6 +305,7 @@ def __init__( norm_layer=LayerNorm, act_layer=nn.GELU, conv_ratio=1.0, + expansion_ratio=8/3, kernel_size=7, stem_mid_norm=True, ls_init_value=None, @@ -303,6 +325,7 @@ def __init__( num_stage = len(depths) self.num_stage = num_stage + self.feature_info = [] self.stem = Stem( in_chans, @@ -313,16 +336,20 @@ def __init__( ) prev_dim = dims[0] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] - self.stages = nn.ModuleList() cur = 0 + curr_stride = 4 + self.stages = nn.Sequential() for i in range(num_stage): dim = dims[i] + stride = 2 if curr_stride == 2 or i > 0 else 1 + curr_stride *= stride stage = MambaOutStage( dim=prev_dim, dim_out=dim, depth=depths[i], kernel_size=kernel_size, conv_ratio=conv_ratio, + expansion_ratio=expansion_ratio, downsample=downsample if i > 0 else '', ls_init_value=ls_init_value, norm_layer=norm_layer, @@ -331,6 +358,8 @@ def __init__( ) self.stages.append(stage) prev_dim = dim + # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 + self.feature_info += [dict(num_chs=prev_dim, reduction=curr_stride, module=f'stages.{i}')] cur += depths[i] if head_fn == 'default': @@ -352,6 +381,8 @@ def __init__( norm_layer=norm_layer, drop_rate=drop_rate, ) + self.num_features = prev_dim + self.hidden_size = self.head.num_features self.apply(self._init_weights) @@ -362,13 +393,31 @@ def _init_weights(self, m): nn.init.constant_(m.bias, 0) @torch.jit.ignore - def no_weight_decay(self): - return {} + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+)\.downsample', (0,)), # blocks + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) - for s in self.stages: - x = s(x) + x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): @@ -391,10 +440,14 @@ def checkpoint_filter_fn(state_dict, model): k = k.replace('downsample_layers.0.', 'stem.') k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k) + # remap head names if k.startswith('norm.'): - k = k.replace('norm.', 'head.norm1.') - elif k.startswith('head.norm.'): - k = k.replace('head.norm.', 'head.norm2.') + # this is moving to head since it's after the pooling + k = k.replace('norm.', 'head.norm.') + elif k.startswith('head.'): + k = k.replace('head.fc1.', 'head.pre_logits.fc.') + k = k.replace('head.norm.', 'head.pre_logits.norm.') + k = k.replace('head.fc2.', 'head.fc.') out_dict[k] = v return out_dict @@ -405,7 +458,7 @@ def _cfg(url='', **kwargs): 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 1.0, 'interpolation': 'bicubic', - 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head.fc', **kwargs } @@ -422,7 +475,8 @@ def _cfg(url='', **kwargs): 'mambaout_base': _cfg( url='https://github.com/yuweihao/MambaOut/releases/download/model/mambaout_base.pth'), 'mambaout_small_rw': _cfg(), - 'mambaout_base_rw': _cfg(), + 'mambaout_base_slim_rw': _cfg(), + 'mambaout_base_plus_rw': _cfg(), } @@ -480,12 +534,29 @@ def mambaout_small_rw(pretrained=False, **kwargs): @register_model -def mambaout_base_rw(pretrained=False, **kwargs): +def mambaout_base_slim_rw(pretrained=False, **kwargs): model_args = dict( depths=(3, 4, 27, 3), dims=(128, 256, 512, 768), + expansion_ratio=2.5, + conv_ratio=1.25, stem_mid_norm=False, + downsample='conv_nf', + ls_init_value=1e-6, + head_fn='norm_mlp', + ) + return _create_mambaout('mambaout_base_slim_rw', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def mambaout_base_plus_rw(pretrained=False, **kwargs): + model_args = dict( + depths=(3, 4, 27, 3), + dims=(128, 256, 512, 768), + expansion_ratio=3.0, + stem_mid_norm=False, + downsample='conv_nf', ls_init_value=1e-6, head_fn='norm_mlp', ) - return _create_mambaout('mambaout_base_rw', pretrained=pretrained, **dict(model_args, **kwargs)) + return _create_mambaout('mambaout_base_plus_rw', pretrained=pretrained, **dict(model_args, **kwargs)) From 91e743f2dd8d145cad65eac2a2d1b9cd0cf9a762 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Fri, 13 Sep 2024 17:08:57 -0700 Subject: [PATCH 5/9] Mambaout tweaks --- timm/models/mambaout.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/timm/models/mambaout.py b/timm/models/mambaout.py index 5c47223787..a33554a9c4 100644 --- a/timm/models/mambaout.py +++ b/timm/models/mambaout.py @@ -12,7 +12,7 @@ from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD -from timm.layers import trunc_normal_, DropPath, LayerNorm, LayerScale, ClNormMlpClassifierHead +from timm.layers import trunc_normal_, DropPath, LayerNorm, LayerScale, ClNormMlpClassifierHead, get_act_layer from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model @@ -318,10 +318,12 @@ def __init__( super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate + self.output_fmt = 'NHWC' if not isinstance(depths, (list, tuple)): depths = [depths] # it means the model has only one stage if not isinstance(dims, (list, tuple)): dims = [dims] + act_layer = get_act_layer(act_layer) num_stage = len(depths) self.num_stage = num_stage @@ -456,7 +458,7 @@ def checkpoint_filter_fn(state_dict, model): def _cfg(url='', **kwargs): return { 'url': url, - 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 1.0, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head.fc', **kwargs @@ -477,6 +479,7 @@ def _cfg(url='', **kwargs): 'mambaout_small_rw': _cfg(), 'mambaout_base_slim_rw': _cfg(), 'mambaout_base_plus_rw': _cfg(), + 'test_mambaout': _cfg(input_size=(3, 160, 160), pool_size=(5, 5)), } @@ -554,9 +557,26 @@ def mambaout_base_plus_rw(pretrained=False, **kwargs): depths=(3, 4, 27, 3), dims=(128, 256, 512, 768), expansion_ratio=3.0, + conv_ratio=1.5, stem_mid_norm=False, downsample='conv_nf', ls_init_value=1e-6, + act_layer='silu', head_fn='norm_mlp', ) return _create_mambaout('mambaout_base_plus_rw', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def test_mambaout(pretrained=False, **kwargs): + model_args = dict( + depths=(1, 1, 3, 1), + dims=(16, 32, 48, 64), + expansion_ratio=3, + stem_mid_norm=False, + downsample='conv_nf', + ls_init_value=1e-4, + act_layer='silu', + head_fn='norm_mlp', + ) + return _create_mambaout('test_mambaout', pretrained=pretrained, **dict(model_args, **kwargs)) From 9d1dfe8dbe16c432b951555a947191d2a75b47a2 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Fri, 13 Sep 2024 18:21:05 -0700 Subject: [PATCH 6/9] Incorrectly named head_hidden_size --- timm/models/mambaout.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/timm/models/mambaout.py b/timm/models/mambaout.py index a33554a9c4..c2f2f07b46 100644 --- a/timm/models/mambaout.py +++ b/timm/models/mambaout.py @@ -384,7 +384,7 @@ def __init__( drop_rate=drop_rate, ) self.num_features = prev_dim - self.hidden_size = self.head.num_features + self.head_hidden_size = self.head.num_features self.apply(self._init_weights) From 5dc5ee5b421c9badbb6f27c4f2cf7eb3c2721f1d Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Fri, 13 Sep 2024 19:51:33 -0700 Subject: [PATCH 7/9] Add global_pool to mambaout __init__ and pass to heads --- timm/models/mambaout.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/timm/models/mambaout.py b/timm/models/mambaout.py index c2f2f07b46..bda69b1124 100644 --- a/timm/models/mambaout.py +++ b/timm/models/mambaout.py @@ -300,6 +300,7 @@ def __init__( self, in_chans=3, num_classes=1000, + global_pool='avg', depths=(3, 3, 9, 3), dims=(96, 192, 384, 576), norm_layer=LayerNorm, @@ -369,7 +370,7 @@ def __init__( self.head = MlpHead( prev_dim, num_classes, - pool_type='avg', + pool_type=global_pool, drop_rate=drop_rate, norm_layer=norm_layer, ) @@ -379,7 +380,7 @@ def __init__( prev_dim, num_classes, hidden_size=int(prev_dim * 4), - pool_type='avg', + pool_type=global_pool, norm_layer=norm_layer, drop_rate=drop_rate, ) From 7efb60c2996dcc7335737e5cf6553e81476f548b Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Fri, 13 Sep 2024 21:14:14 -0700 Subject: [PATCH 8/9] Add first_conv for mambaout --- timm/models/mambaout.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/timm/models/mambaout.py b/timm/models/mambaout.py index bda69b1124..91ea0a0914 100644 --- a/timm/models/mambaout.py +++ b/timm/models/mambaout.py @@ -314,7 +314,6 @@ def __init__( drop_path_rate=0., drop_rate=0., head_fn='default', - **kwargs, ): super().__init__() self.num_classes = num_classes @@ -461,7 +460,8 @@ def _cfg(url='', **kwargs): 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 1.0, 'interpolation': 'bicubic', - 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head.fc', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', **kwargs } From 82ae247879a2fdf79edb1b40eda42957a0c1e247 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Fri, 11 Oct 2024 11:07:40 -0700 Subject: [PATCH 9/9] MambaOut weights on hub, configs finalized --- timm/models/mambaout.py | 101 +++++++++++++++++++++++++++++++--------- 1 file changed, 78 insertions(+), 23 deletions(-) diff --git a/timm/models/mambaout.py b/timm/models/mambaout.py index 91ea0a0914..c748e408ea 100644 --- a/timm/models/mambaout.py +++ b/timm/models/mambaout.py @@ -15,7 +15,7 @@ from timm.layers import trunc_normal_, DropPath, LayerNorm, LayerScale, ClNormMlpClassifierHead, get_act_layer from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq -from ._registry import register_model +from ._registry import register_model, generate_default_cfgs class Stem(nn.Module): @@ -435,6 +435,8 @@ def forward(self, x): def checkpoint_filter_fn(state_dict, model): if 'model' in state_dict: state_dict = state_dict['model'] + if 'stem.conv1.weight' in state_dict: + return state_dict import re out_dict = {} @@ -458,30 +460,52 @@ def checkpoint_filter_fn(state_dict, model): def _cfg(url='', **kwargs): return { 'url': url, - 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), - 'crop_pct': 1.0, 'interpolation': 'bicubic', + 'num_classes': 1000, 'input_size': (3, 224, 224), 'test_input_size': (3, 288, 288), + 'pool_size': (7, 7), 'crop_pct': 1.0, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1', 'classifier': 'head.fc', **kwargs } -default_cfgs = { - 'mambaout_femto': _cfg( - url='https://github.com/yuweihao/MambaOut/releases/download/model/mambaout_femto.pth'), - 'mambaout_kobe': _cfg( - url='https://github.com/yuweihao/MambaOut/releases/download/model/mambaout_kobe.pth'), - 'mambaout_tiny': _cfg( - url='https://github.com/yuweihao/MambaOut/releases/download/model/mambaout_tiny.pth'), - 'mambaout_small': _cfg( - url='https://github.com/yuweihao/MambaOut/releases/download/model/mambaout_small.pth'), - 'mambaout_base': _cfg( - url='https://github.com/yuweihao/MambaOut/releases/download/model/mambaout_base.pth'), - 'mambaout_small_rw': _cfg(), - 'mambaout_base_slim_rw': _cfg(), - 'mambaout_base_plus_rw': _cfg(), - 'test_mambaout': _cfg(input_size=(3, 160, 160), pool_size=(5, 5)), -} +default_cfgs = generate_default_cfgs({ + # original weights + 'mambaout_femto.in1k': _cfg( + hf_hub_id='timm/'), + 'mambaout_kobe.in1k': _cfg( + hf_hub_id='timm/'), + 'mambaout_tiny.in1k': _cfg( + hf_hub_id='timm/'), + 'mambaout_small.in1k': _cfg( + hf_hub_id='timm/'), + 'mambaout_base.in1k': _cfg( + hf_hub_id='timm/'), + + # timm experiments below + 'mambaout_small_rw.sw_e450_in1k': _cfg( + hf_hub_id='timm/', + ), + 'mambaout_base_short_rw.sw_e500_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_crop_pct=1.0, + ), + 'mambaout_base_tall_rw.sw_e500_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_crop_pct=1.0, + ), + 'mambaout_base_wide_rw.sw_e500_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_crop_pct=1.0, + ), + 'mambaout_base_plus_rw.sw_e150_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + ), + 'mambaout_base_plus_rw.sw_e150_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + ), + 'test_mambaout': _cfg(input_size=(3, 160, 160), test_input_size=(3, 192, 192), pool_size=(5, 5)), +}) def _create_mambaout(variant, pretrained=False, **kwargs): @@ -538,9 +562,24 @@ def mambaout_small_rw(pretrained=False, **kwargs): @register_model -def mambaout_base_slim_rw(pretrained=False, **kwargs): +def mambaout_base_short_rw(pretrained=False, **kwargs): model_args = dict( - depths=(3, 4, 27, 3), + depths=(3, 3, 25, 3), + dims=(128, 256, 512, 768), + expansion_ratio=3.0, + conv_ratio=1.25, + stem_mid_norm=False, + downsample='conv_nf', + ls_init_value=1e-6, + head_fn='norm_mlp', + ) + return _create_mambaout('mambaout_base_short_rw', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def mambaout_base_tall_rw(pretrained=False, **kwargs): + model_args = dict( + depths=(3, 4, 30, 3), dims=(128, 256, 512, 768), expansion_ratio=2.5, conv_ratio=1.25, @@ -549,11 +588,11 @@ def mambaout_base_slim_rw(pretrained=False, **kwargs): ls_init_value=1e-6, head_fn='norm_mlp', ) - return _create_mambaout('mambaout_base_slim_rw', pretrained=pretrained, **dict(model_args, **kwargs)) + return _create_mambaout('mambaout_base_tall_rw', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model -def mambaout_base_plus_rw(pretrained=False, **kwargs): +def mambaout_base_wide_rw(pretrained=False, **kwargs): model_args = dict( depths=(3, 4, 27, 3), dims=(128, 256, 512, 768), @@ -565,6 +604,22 @@ def mambaout_base_plus_rw(pretrained=False, **kwargs): act_layer='silu', head_fn='norm_mlp', ) + return _create_mambaout('mambaout_base_wide_rw', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def mambaout_base_plus_rw(pretrained=False, **kwargs): + model_args = dict( + depths=(3, 4, 30, 3), + dims=(128, 256, 512, 768), + expansion_ratio=3.0, + conv_ratio=1.5, + stem_mid_norm=False, + downsample='conv_nf', + ls_init_value=1e-6, + act_layer='silu', + head_fn='norm_mlp', + ) return _create_mambaout('mambaout_base_plus_rw', pretrained=pretrained, **dict(model_args, **kwargs))