Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

针对Pool相关层的一些修改 #97

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion Caffe/caffe.proto
Original file line number Diff line number Diff line change
Expand Up @@ -1657,7 +1657,11 @@ message PoolingParameter {

///////////////////////
// Specify floor/ceil mode
optional bool ceil_mode = 13 [default = true];
enum RoundMode {
CEIL = 0;
FLOOR = 1;
}
optional RoundMode round_mode = 13 [default = CEIL];
///////////////////////////////
}

Expand Down
4,577 changes: 2,505 additions & 2,072 deletions Caffe/caffe_pb2.py

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions Caffe/layer_param.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,12 +110,12 @@ def permute_param(self, order1, order2, order3, order4):
self.param.permute_param.CopyFrom(permute_param)


def pool_param(self,type='MAX',kernel_size=2,stride=2,pad=None, ceil_mode = True):
def pool_param(self,type='MAX',kernel_size=2,stride=2,pad=None, round_mode = "CEIL"):
pool_param=pb.PoolingParameter()
pool_param.pool=pool_param.PoolMethod.Value(type)
pool_param.kernel_size=pair_process(kernel_size)
pool_param.stride=pair_process(stride)
pool_param.ceil_mode=ceil_mode
pool_param.round_mode=round_mode
if pad:
if isinstance(pad,tuple):
pool_param.pad_h = pad[0]
Expand Down Expand Up @@ -180,4 +180,4 @@ def copy_from(self,layer_param):
pass

def set_enum(param,key,value):
setattr(param,key,param.Value(value))
setattr(param,key,param.Value(value))
19 changes: 9 additions & 10 deletions example/resnet_pytorch_2_caffe.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,17 @@
sys.path.insert(0,'.')
import torch
from torch.autograd import Variable
from torchvision.models import resnet
#from torchvision.models import resnet
from model import resnet
import pytorch_to_caffe

if __name__=='__main__':
name='resnet18'
resnet18=resnet.resnet18()
checkpoint = torch.load("/home/shining/Downloads/resnet18-5c106cde.pth")
name='resnet50'
resnet50=resnet.resnet50(num_classes=2)

resnet18.load_state_dict(checkpoint)
resnet18.eval()
input=torch.ones([1,3,224,224])
#input=torch.ones([1,3,224,224])
pytorch_to_caffe.trans_net(resnet18,input,name)
#resnet50.load_state_dict(checkpoint)
resnet50.eval()
input=torch.ones([1,3,160,160])
pytorch_to_caffe.trans_net(resnet50,input,name)
pytorch_to_caffe.save_prototxt('{}.prototxt'.format(name))
pytorch_to_caffe.save_caffemodel('{}.caffemodel'.format(name))
pytorch_to_caffe.save_caffemodel('{}.caffemodel'.format(name))
2 changes: 1 addition & 1 deletion example/resnet_pytorch_analysis_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,5 @@
resnet18=resnet.resnet18()
input_tensor=torch.ones(1,3,224,224)
blob_dict, tracked_layers=pytorch_analyser.analyse(resnet18,input_tensor)
pytorch_analyser.save_csv(tracked_layers,'/tmp/analysis.csv')
pytorch_analyser.save_csv(tracked_layers,'analysis.csv')

9 changes: 7 additions & 2 deletions model/resnet.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import torch.utils.model_zoo as model_zoo

Expand Down Expand Up @@ -107,7 +109,7 @@ def __init__(self, block, layers, num_classes=1000):
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)

for m in self.modules():
Expand Down Expand Up @@ -147,8 +149,11 @@ def forward(self, x):
x = self.layer4(x)

x = self.avgpool(x)
x = x.view(x.size(0), -1)
#x = torch.mean(x, dim=(2, 3))
#x = torch.flatten(x, 1)
x = x.view(1, -1)
x = self.fc(x)
x = F.softmax(x, dim=1)

return x

Expand Down
21 changes: 13 additions & 8 deletions pytorch_to_caffe.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,9 @@ def __init__(self):
"""
self.layers={}
self.detail_layers={}
self.detail_blobs={}
self._blobs=Blob_LOG()
self._blobs_data=[]
self.detail_blobs={} #具体层名
self._blobs=Blob_LOG() #根据层的内存地址定位名字
self._blobs_data=[] # 层名
self.cnet=caffe_net.Caffemodel('')
self.debug=True

Expand Down Expand Up @@ -91,8 +91,9 @@ def blobs(self, var):
try:
return self._blobs[var]
except:
print(self._blobs.data.items())
print("WARNING: CANNOT FOUND blob {}".format(var))
return None
#return None

log=TransLog()

Expand Down Expand Up @@ -134,7 +135,7 @@ def _linear(raw,input, weight, bias=None):
x=raw(input,weight,bias)
layer_name=log.add_layer(name='fc')
top_blobs=log.add_blobs([x],name='fc_blob')
layer=caffe_net.Layer_param(name=layer_name,type='InnerProduct',
layer=caffe_net.Layer_param(name=layer_name,type='InnerProduct',
bottom=[log.blobs(input)],top=top_blobs)
layer.fc_param(x.size()[1],has_bias=bias is not None)
if bias is not None:
Expand Down Expand Up @@ -166,8 +167,12 @@ def _pool(type,raw,input,x,kernel_size,stride,padding,ceil_mode):
bottom=[log.blobs(input)], top=top_blobs)
# TODO w,h different kernel, stride and padding
# processing ceil mode
if ceil_mode == True:
round_mode = "CEIL"
else:
round_mode = "FLOOR"
layer.pool_param(kernel_size=kernel_size, stride=kernel_size if stride is None else stride,
pad=padding, type=type.upper() , ceil_mode = ceil_mode)
pad=padding, type=type.upper() , round_mode = round_mode)
log.cnet.add_layer(layer)
if ceil_mode==False and stride is not None:
oheight = (input.size()[2] - _pair(kernel_size)[0] + 2 * _pair(padding)[0]) % (_pair(stride)[0])
Expand All @@ -186,7 +191,7 @@ def _max_pool2d(raw,input, kernel_size, stride=None, padding=0, dilation=1,
_pool('max',raw,input, x, kernel_size, stride, padding,ceil_mode)
return x

def _avg_pool2d(raw,input, kernel_size, stride = None, padding = 0, ceil_mode = False, count_include_pad = True):
def _avg_pool2d(raw,input, kernel_size, stride = None, padding = 0, ceil_mode = False, count_include_pad = True, divisor_override=None):
x = raw(input, kernel_size, stride, padding, ceil_mode, count_include_pad)
_pool('ave',raw,input, x, kernel_size, stride, padding,ceil_mode)
return x
Expand Down Expand Up @@ -491,7 +496,7 @@ def _view(input, *args):
return x
layer_name=log.add_layer(name='view')
top_blobs=log.add_blobs([x],name='view_blob')
layer=caffe_net.Layer_param(name=layer_name,type='Reshape',
layer=caffe_net.Layer_param(name=layer_name,type='Reshape',
bottom=[log.blobs(input)],top=top_blobs)
# TODO: reshpae added to nn_tools layer
dims=list(args)
Expand Down