-
Notifications
You must be signed in to change notification settings - Fork 53
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Your Name
committed
Jun 28, 2022
1 parent
f41d0da
commit f767824
Showing
5 changed files
with
162 additions
and
7 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -10,3 +10,4 @@ vendor/ | |
*.pyc | ||
*.egg-info/ | ||
|
||
weights/ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
import torch | ||
|
||
def load_ckp_unwrap_module(ckp_p): | ||
sd = torch.load(ckp_p, map_location='cpu') | ||
|
||
state_dict = None | ||
if 'state_dict' in sd.keys(): | ||
state_dict = sd['state_dict'] | ||
elif 'model' in sd.keys(): | ||
state_dict = sd['model'] | ||
else: | ||
state_dict = sd | ||
|
||
new_sd = {} | ||
for k, v in state_dict.items(): | ||
if 'module.' in k: | ||
new_sd[k.replace('module.', '')] = v | ||
else: | ||
new_sd[k] = v | ||
return new_sd |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,40 @@ | ||
from nb.torch.backbones.mobileone import MobileOneNet, make_mobileone_s0 | ||
from nb.torch.utils.checkpoint import load_ckp_unwrap_module | ||
import sys | ||
import torch | ||
import os | ||
|
||
a = sys.argv[1] | ||
|
||
sd = load_ckp_unwrap_module(a) | ||
|
||
x = torch.randn(1, 3, 224, 224) | ||
|
||
model = make_mobileone_s0(deploy=False) | ||
model.load_state_dict(sd) | ||
print("original model loaded.") | ||
|
||
for module in model.modules(): | ||
if hasattr(module, "switch_to_deploy"): | ||
module.switch_to_deploy() | ||
|
||
o1 = model(x) | ||
|
||
deploy_model = make_mobileone_s0(deploy=True) | ||
deploy_model.eval() | ||
deploy_model.load_state_dict(model.state_dict()) | ||
o = deploy_model(x) | ||
|
||
print((o1 - o).sum()) | ||
|
||
n_f = os.path.join( | ||
os.path.dirname(a), os.path.basename(a).split(".")[0] + "_reparam.pth" | ||
) | ||
torch.save(model.state_dict(), n_f) | ||
|
||
mod = torch.jit.trace(deploy_model, x) | ||
|
||
n_f2 = os.path.join( | ||
os.path.dirname(a), os.path.basename(a).split(".")[0] + "_reparam.pt" | ||
) | ||
mod.save(n_f2) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,93 @@ | ||
7767517 | ||
91 90 | ||
pnnx.Input pnnx_input_0 0 1 0 #0=(1,3,224,224)f32 | ||
nn.Conv2d convbn2d_0 1 1 0 1 bias=True dilation=(1,1) groups=1 in_channels=3 kernel_size=(3,3) out_channels=48 padding=(1,1) padding_mode=zeros stride=(2,2) @bias=(48)f32 @weight=(48,3,3,3)f32 $input=0 #0=(1,3,224,224)f32 #1=(1,48,112,112)f32 | ||
nn.ReLU stage0.2 1 1 1 2 #1=(1,48,112,112)f32 #2=(1,48,112,112)f32 | ||
nn.Conv2d stage1.0.dw_reparam 1 1 2 3 bias=True dilation=(1,1) groups=48 in_channels=48 kernel_size=(3,3) out_channels=48 padding=(1,1) padding_mode=zeros stride=(2,2) @bias=(48)f32 @weight=(48,1,3,3)f32 #2=(1,48,112,112)f32 #3=(1,48,56,56)f32 | ||
nn.ReLU stage1.0.nonlinearity 1 1 3 4 #3=(1,48,56,56)f32 #4=(1,48,56,56)f32 | ||
nn.Conv2d stage1.0.pw_reparam 1 1 4 5 bias=True dilation=(1,1) groups=1 in_channels=48 kernel_size=(1,1) out_channels=48 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(48)f32 @weight=(48,48,1,1)f32 #4=(1,48,56,56)f32 #5=(1,48,56,56)f32 | ||
nn.ReLU pnnx_unique_0 1 1 5 6 #5=(1,48,56,56)f32 #6=(1,48,56,56)f32 | ||
nn.Conv2d stage1.1.dw_reparam 1 1 6 7 bias=True dilation=(1,1) groups=48 in_channels=48 kernel_size=(3,3) out_channels=48 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(48)f32 @weight=(48,1,3,3)f32 #6=(1,48,56,56)f32 #7=(1,48,56,56)f32 | ||
nn.ReLU stage1.1.nonlinearity 1 1 7 8 #7=(1,48,56,56)f32 #8=(1,48,56,56)f32 | ||
nn.Conv2d stage1.1.pw_reparam 1 1 8 9 bias=True dilation=(1,1) groups=1 in_channels=48 kernel_size=(1,1) out_channels=48 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(48)f32 @weight=(48,48,1,1)f32 #8=(1,48,56,56)f32 #9=(1,48,56,56)f32 | ||
nn.ReLU pnnx_unique_1 1 1 9 10 #9=(1,48,56,56)f32 #10=(1,48,56,56)f32 | ||
nn.Conv2d stage2.0.dw_reparam 1 1 10 11 bias=True dilation=(1,1) groups=48 in_channels=48 kernel_size=(3,3) out_channels=48 padding=(1,1) padding_mode=zeros stride=(2,2) @bias=(48)f32 @weight=(48,1,3,3)f32 #10=(1,48,56,56)f32 #11=(1,48,28,28)f32 | ||
nn.ReLU stage2.0.nonlinearity 1 1 11 12 #11=(1,48,28,28)f32 #12=(1,48,28,28)f32 | ||
nn.Conv2d stage2.0.pw_reparam 1 1 12 13 bias=True dilation=(1,1) groups=1 in_channels=48 kernel_size=(1,1) out_channels=128 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,48,1,1)f32 #12=(1,48,28,28)f32 #13=(1,128,28,28)f32 | ||
nn.ReLU pnnx_unique_2 1 1 13 14 #13=(1,128,28,28)f32 #14=(1,128,28,28)f32 | ||
nn.Conv2d stage2.1.dw_reparam 1 1 14 15 bias=True dilation=(1,1) groups=128 in_channels=128 kernel_size=(3,3) out_channels=128 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,1,3,3)f32 #14=(1,128,28,28)f32 #15=(1,128,28,28)f32 | ||
nn.ReLU stage2.1.nonlinearity 1 1 15 16 #15=(1,128,28,28)f32 #16=(1,128,28,28)f32 | ||
nn.Conv2d stage2.1.pw_reparam 1 1 16 17 bias=True dilation=(1,1) groups=1 in_channels=128 kernel_size=(1,1) out_channels=128 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,128,1,1)f32 #16=(1,128,28,28)f32 #17=(1,128,28,28)f32 | ||
nn.ReLU pnnx_unique_3 1 1 17 18 #17=(1,128,28,28)f32 #18=(1,128,28,28)f32 | ||
nn.Conv2d stage2.2.dw_reparam 1 1 18 19 bias=True dilation=(1,1) groups=128 in_channels=128 kernel_size=(3,3) out_channels=128 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,1,3,3)f32 #18=(1,128,28,28)f32 #19=(1,128,28,28)f32 | ||
nn.ReLU stage2.2.nonlinearity 1 1 19 20 #19=(1,128,28,28)f32 #20=(1,128,28,28)f32 | ||
nn.Conv2d stage2.2.pw_reparam 1 1 20 21 bias=True dilation=(1,1) groups=1 in_channels=128 kernel_size=(1,1) out_channels=128 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,128,1,1)f32 #20=(1,128,28,28)f32 #21=(1,128,28,28)f32 | ||
nn.ReLU pnnx_unique_4 1 1 21 22 #21=(1,128,28,28)f32 #22=(1,128,28,28)f32 | ||
nn.Conv2d stage2.3.dw_reparam 1 1 22 23 bias=True dilation=(1,1) groups=128 in_channels=128 kernel_size=(3,3) out_channels=128 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,1,3,3)f32 #22=(1,128,28,28)f32 #23=(1,128,28,28)f32 | ||
nn.ReLU stage2.3.nonlinearity 1 1 23 24 #23=(1,128,28,28)f32 #24=(1,128,28,28)f32 | ||
nn.Conv2d stage2.3.pw_reparam 1 1 24 25 bias=True dilation=(1,1) groups=1 in_channels=128 kernel_size=(1,1) out_channels=128 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,128,1,1)f32 #24=(1,128,28,28)f32 #25=(1,128,28,28)f32 | ||
nn.ReLU pnnx_unique_5 1 1 25 26 #25=(1,128,28,28)f32 #26=(1,128,28,28)f32 | ||
nn.Conv2d stage2.4.dw_reparam 1 1 26 27 bias=True dilation=(1,1) groups=128 in_channels=128 kernel_size=(3,3) out_channels=128 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,1,3,3)f32 #26=(1,128,28,28)f32 #27=(1,128,28,28)f32 | ||
nn.ReLU stage2.4.nonlinearity 1 1 27 28 #27=(1,128,28,28)f32 #28=(1,128,28,28)f32 | ||
nn.Conv2d stage2.4.pw_reparam 1 1 28 29 bias=True dilation=(1,1) groups=1 in_channels=128 kernel_size=(1,1) out_channels=128 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,128,1,1)f32 #28=(1,128,28,28)f32 #29=(1,128,28,28)f32 | ||
nn.ReLU pnnx_unique_6 1 1 29 30 #29=(1,128,28,28)f32 #30=(1,128,28,28)f32 | ||
nn.Conv2d stage2.5.dw_reparam 1 1 30 31 bias=True dilation=(1,1) groups=128 in_channels=128 kernel_size=(3,3) out_channels=128 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,1,3,3)f32 #30=(1,128,28,28)f32 #31=(1,128,28,28)f32 | ||
nn.ReLU stage2.5.nonlinearity 1 1 31 32 #31=(1,128,28,28)f32 #32=(1,128,28,28)f32 | ||
nn.Conv2d stage2.5.pw_reparam 1 1 32 33 bias=True dilation=(1,1) groups=1 in_channels=128 kernel_size=(1,1) out_channels=128 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,128,1,1)f32 #32=(1,128,28,28)f32 #33=(1,128,28,28)f32 | ||
nn.ReLU pnnx_unique_7 1 1 33 34 #33=(1,128,28,28)f32 #34=(1,128,28,28)f32 | ||
nn.Conv2d stage2.6.dw_reparam 1 1 34 35 bias=True dilation=(1,1) groups=128 in_channels=128 kernel_size=(3,3) out_channels=128 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,1,3,3)f32 #34=(1,128,28,28)f32 #35=(1,128,28,28)f32 | ||
nn.ReLU stage2.6.nonlinearity 1 1 35 36 #35=(1,128,28,28)f32 #36=(1,128,28,28)f32 | ||
nn.Conv2d stage2.6.pw_reparam 1 1 36 37 bias=True dilation=(1,1) groups=1 in_channels=128 kernel_size=(1,1) out_channels=128 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,128,1,1)f32 #36=(1,128,28,28)f32 #37=(1,128,28,28)f32 | ||
nn.ReLU pnnx_unique_8 1 1 37 38 #37=(1,128,28,28)f32 #38=(1,128,28,28)f32 | ||
nn.Conv2d stage2.7.dw_reparam 1 1 38 39 bias=True dilation=(1,1) groups=128 in_channels=128 kernel_size=(3,3) out_channels=128 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,1,3,3)f32 #38=(1,128,28,28)f32 #39=(1,128,28,28)f32 | ||
nn.ReLU stage2.7.nonlinearity 1 1 39 40 #39=(1,128,28,28)f32 #40=(1,128,28,28)f32 | ||
nn.Conv2d stage2.7.pw_reparam 1 1 40 41 bias=True dilation=(1,1) groups=1 in_channels=128 kernel_size=(1,1) out_channels=128 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(128)f32 @weight=(128,128,1,1)f32 #40=(1,128,28,28)f32 #41=(1,128,28,28)f32 | ||
nn.ReLU pnnx_unique_9 1 1 41 42 #41=(1,128,28,28)f32 #42=(1,128,28,28)f32 | ||
nn.Conv2d stage3.0.dw_reparam 1 1 42 43 bias=True dilation=(1,1) groups=128 in_channels=128 kernel_size=(3,3) out_channels=128 padding=(1,1) padding_mode=zeros stride=(2,2) @bias=(128)f32 @weight=(128,1,3,3)f32 #42=(1,128,28,28)f32 #43=(1,128,14,14)f32 | ||
nn.ReLU stage3.0.nonlinearity 1 1 43 44 #43=(1,128,14,14)f32 #44=(1,128,14,14)f32 | ||
nn.Conv2d stage3.0.pw_reparam 1 1 44 45 bias=True dilation=(1,1) groups=1 in_channels=128 kernel_size=(1,1) out_channels=256 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,128,1,1)f32 #44=(1,128,14,14)f32 #45=(1,256,14,14)f32 | ||
nn.ReLU pnnx_unique_10 1 1 45 46 #45=(1,256,14,14)f32 #46=(1,256,14,14)f32 | ||
nn.Conv2d stage3.1.dw_reparam 1 1 46 47 bias=True dilation=(1,1) groups=256 in_channels=256 kernel_size=(3,3) out_channels=256 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,1,3,3)f32 #46=(1,256,14,14)f32 #47=(1,256,14,14)f32 | ||
nn.ReLU stage3.1.nonlinearity 1 1 47 48 #47=(1,256,14,14)f32 #48=(1,256,14,14)f32 | ||
nn.Conv2d stage3.1.pw_reparam 1 1 48 49 bias=True dilation=(1,1) groups=1 in_channels=256 kernel_size=(1,1) out_channels=256 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,256,1,1)f32 #48=(1,256,14,14)f32 #49=(1,256,14,14)f32 | ||
nn.ReLU pnnx_unique_11 1 1 49 50 #49=(1,256,14,14)f32 #50=(1,256,14,14)f32 | ||
nn.Conv2d stage3.2.dw_reparam 1 1 50 51 bias=True dilation=(1,1) groups=256 in_channels=256 kernel_size=(3,3) out_channels=256 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,1,3,3)f32 #50=(1,256,14,14)f32 #51=(1,256,14,14)f32 | ||
nn.ReLU stage3.2.nonlinearity 1 1 51 52 #51=(1,256,14,14)f32 #52=(1,256,14,14)f32 | ||
nn.Conv2d stage3.2.pw_reparam 1 1 52 53 bias=True dilation=(1,1) groups=1 in_channels=256 kernel_size=(1,1) out_channels=256 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,256,1,1)f32 #52=(1,256,14,14)f32 #53=(1,256,14,14)f32 | ||
nn.ReLU pnnx_unique_12 1 1 53 54 #53=(1,256,14,14)f32 #54=(1,256,14,14)f32 | ||
nn.Conv2d stage3.3.dw_reparam 1 1 54 55 bias=True dilation=(1,1) groups=256 in_channels=256 kernel_size=(3,3) out_channels=256 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,1,3,3)f32 #54=(1,256,14,14)f32 #55=(1,256,14,14)f32 | ||
nn.ReLU stage3.3.nonlinearity 1 1 55 56 #55=(1,256,14,14)f32 #56=(1,256,14,14)f32 | ||
nn.Conv2d stage3.3.pw_reparam 1 1 56 57 bias=True dilation=(1,1) groups=1 in_channels=256 kernel_size=(1,1) out_channels=256 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,256,1,1)f32 #56=(1,256,14,14)f32 #57=(1,256,14,14)f32 | ||
nn.ReLU pnnx_unique_13 1 1 57 58 #57=(1,256,14,14)f32 #58=(1,256,14,14)f32 | ||
nn.Conv2d stage3.4.dw_reparam 1 1 58 59 bias=True dilation=(1,1) groups=256 in_channels=256 kernel_size=(3,3) out_channels=256 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,1,3,3)f32 #58=(1,256,14,14)f32 #59=(1,256,14,14)f32 | ||
nn.ReLU stage3.4.nonlinearity 1 1 59 60 #59=(1,256,14,14)f32 #60=(1,256,14,14)f32 | ||
nn.Conv2d stage3.4.pw_reparam 1 1 60 61 bias=True dilation=(1,1) groups=1 in_channels=256 kernel_size=(1,1) out_channels=256 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,256,1,1)f32 #60=(1,256,14,14)f32 #61=(1,256,14,14)f32 | ||
nn.ReLU pnnx_unique_14 1 1 61 62 #61=(1,256,14,14)f32 #62=(1,256,14,14)f32 | ||
nn.Conv2d stage4.0.dw_reparam 1 1 62 63 bias=True dilation=(1,1) groups=256 in_channels=256 kernel_size=(3,3) out_channels=256 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,1,3,3)f32 #62=(1,256,14,14)f32 #63=(1,256,14,14)f32 | ||
nn.ReLU stage4.0.nonlinearity 1 1 63 64 #63=(1,256,14,14)f32 #64=(1,256,14,14)f32 | ||
nn.Conv2d stage4.0.pw_reparam 1 1 64 65 bias=True dilation=(1,1) groups=1 in_channels=256 kernel_size=(1,1) out_channels=256 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,256,1,1)f32 #64=(1,256,14,14)f32 #65=(1,256,14,14)f32 | ||
nn.ReLU pnnx_unique_15 1 1 65 66 #65=(1,256,14,14)f32 #66=(1,256,14,14)f32 | ||
nn.Conv2d stage4.1.dw_reparam 1 1 66 67 bias=True dilation=(1,1) groups=256 in_channels=256 kernel_size=(3,3) out_channels=256 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,1,3,3)f32 #66=(1,256,14,14)f32 #67=(1,256,14,14)f32 | ||
nn.ReLU stage4.1.nonlinearity 1 1 67 68 #67=(1,256,14,14)f32 #68=(1,256,14,14)f32 | ||
nn.Conv2d stage4.1.pw_reparam 1 1 68 69 bias=True dilation=(1,1) groups=1 in_channels=256 kernel_size=(1,1) out_channels=256 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,256,1,1)f32 #68=(1,256,14,14)f32 #69=(1,256,14,14)f32 | ||
nn.ReLU pnnx_unique_16 1 1 69 70 #69=(1,256,14,14)f32 #70=(1,256,14,14)f32 | ||
nn.Conv2d stage4.2.dw_reparam 1 1 70 71 bias=True dilation=(1,1) groups=256 in_channels=256 kernel_size=(3,3) out_channels=256 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,1,3,3)f32 #70=(1,256,14,14)f32 #71=(1,256,14,14)f32 | ||
nn.ReLU stage4.2.nonlinearity 1 1 71 72 #71=(1,256,14,14)f32 #72=(1,256,14,14)f32 | ||
nn.Conv2d stage4.2.pw_reparam 1 1 72 73 bias=True dilation=(1,1) groups=1 in_channels=256 kernel_size=(1,1) out_channels=256 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,256,1,1)f32 #72=(1,256,14,14)f32 #73=(1,256,14,14)f32 | ||
nn.ReLU pnnx_unique_17 1 1 73 74 #73=(1,256,14,14)f32 #74=(1,256,14,14)f32 | ||
nn.Conv2d stage4.3.dw_reparam 1 1 74 75 bias=True dilation=(1,1) groups=256 in_channels=256 kernel_size=(3,3) out_channels=256 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,1,3,3)f32 #74=(1,256,14,14)f32 #75=(1,256,14,14)f32 | ||
nn.ReLU stage4.3.nonlinearity 1 1 75 76 #75=(1,256,14,14)f32 #76=(1,256,14,14)f32 | ||
nn.Conv2d stage4.3.pw_reparam 1 1 76 77 bias=True dilation=(1,1) groups=1 in_channels=256 kernel_size=(1,1) out_channels=256 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,256,1,1)f32 #76=(1,256,14,14)f32 #77=(1,256,14,14)f32 | ||
nn.ReLU pnnx_unique_18 1 1 77 78 #77=(1,256,14,14)f32 #78=(1,256,14,14)f32 | ||
nn.Conv2d stage4.4.dw_reparam 1 1 78 79 bias=True dilation=(1,1) groups=256 in_channels=256 kernel_size=(3,3) out_channels=256 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,1,3,3)f32 #78=(1,256,14,14)f32 #79=(1,256,14,14)f32 | ||
nn.ReLU stage4.4.nonlinearity 1 1 79 80 #79=(1,256,14,14)f32 #80=(1,256,14,14)f32 | ||
nn.Conv2d stage4.4.pw_reparam 1 1 80 81 bias=True dilation=(1,1) groups=1 in_channels=256 kernel_size=(1,1) out_channels=256 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(256)f32 @weight=(256,256,1,1)f32 #80=(1,256,14,14)f32 #81=(1,256,14,14)f32 | ||
nn.ReLU pnnx_unique_19 1 1 81 82 #81=(1,256,14,14)f32 #82=(1,256,14,14)f32 | ||
nn.Conv2d stage5.0.dw_reparam 1 1 82 83 bias=True dilation=(1,1) groups=256 in_channels=256 kernel_size=(3,3) out_channels=256 padding=(1,1) padding_mode=zeros stride=(2,2) @bias=(256)f32 @weight=(256,1,3,3)f32 #82=(1,256,14,14)f32 #83=(1,256,7,7)f32 | ||
nn.ReLU stage5.0.nonlinearity 1 1 83 84 #83=(1,256,7,7)f32 #84=(1,256,7,7)f32 | ||
nn.Conv2d stage5.0.pw_reparam 1 1 84 85 bias=True dilation=(1,1) groups=1 in_channels=256 kernel_size=(1,1) out_channels=1024 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(1024)f32 @weight=(1024,256,1,1)f32 #84=(1,256,7,7)f32 #85=(1,1024,7,7)f32 | ||
nn.ReLU pnnx_unique_20 1 1 85 86 #85=(1,1024,7,7)f32 #86=(1,1024,7,7)f32 | ||
nn.AdaptiveAvgPool2d avg_pool 1 1 86 87 output_size=(1,1) #86=(1,1024,7,7)f32 #87=(1,1024,1,1)f32 | ||
torch.flatten torch.flatten_0 1 1 87 88 end_dim=-1 start_dim=1 $input=87 #87=(1,1024,1,1)f32 #88=(1,1024)f32 | ||
nn.Linear fc1.0 1 1 88 89 bias=True in_features=1024 out_features=1000 @bias=(1000)f32 @weight=(1000,1024)f32 #88=(1,1024)f32 #89=(1,1000)f32 | ||
pnnx.Output pnnx_output_0 1 0 89 #89=(1,1000)f32 |