Description
When I used the script on https://gist.github.com/szagoruyko/8828e09cc4687afd324d, which used the utils.lua, to convert the facebook resnet-50 model file (https://github.com/facebook/fb.resnet.torch), I had the following error:
Couldnt fold these: {
1 :
{
gradBias : CudaTensor - size: 256
bias : CudaTensor - size: 256
output : CudaTensor - size: 10x256x56x56
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 256
running_var : CudaTensor - size: 256
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 256
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 256
gradWeight : CudaTensor - size: 256
save_std : CudaTensor - size: 256
train : false
}
2 :
{
gradBias : CudaTensor - size: 256
bias : CudaTensor - size: 256
output : CudaTensor - size: 10x256x56x56
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 256
running_var : CudaTensor - size: 256
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 256
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 256
gradWeight : CudaTensor - size: 256
save_std : CudaTensor - size: 256
train : false
}
3 :
{
gradBias : CudaTensor - size: 256
bias : CudaTensor - size: 256
output : CudaTensor - size: 10x256x56x56
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 256
running_var : CudaTensor - size: 256
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 256
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 256
gradWeight : CudaTensor - size: 256
save_std : CudaTensor - size: 256
train : false
}
4 :
{
gradBias : CudaTensor - size: 512
bias : CudaTensor - size: 512
output : CudaTensor - size: 10x512x28x28
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 512
running_var : CudaTensor - size: 512
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 512
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 512
gradWeight : CudaTensor - size: 512
save_std : CudaTensor - size: 512
train : false
}
5 :
{
gradBias : CudaTensor - size: 512
bias : CudaTensor - size: 512
output : CudaTensor - size: 10x512x28x28
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 512
running_var : CudaTensor - size: 512
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 512
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 512
gradWeight : CudaTensor - size: 512
save_std : CudaTensor - size: 512
train : false
}
6 :
{
gradBias : CudaTensor - size: 512
bias : CudaTensor - size: 512
output : CudaTensor - size: 10x512x28x28
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 512
running_var : CudaTensor - size: 512
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 512
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 512
gradWeight : CudaTensor - size: 512
save_std : CudaTensor - size: 512
train : false
}
7 :
{
gradBias : CudaTensor - size: 512
bias : CudaTensor - size: 512
output : CudaTensor - size: 10x512x28x28
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 512
running_var : CudaTensor - size: 512
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 512
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 512
gradWeight : CudaTensor - size: 512
save_std : CudaTensor - size: 512
train : false
}
8 :
{
gradBias : CudaTensor - size: 1024
bias : CudaTensor - size: 1024
output : CudaTensor - size: 10x1024x14x14
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 1024
running_var : CudaTensor - size: 1024
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 1024
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 1024
gradWeight : CudaTensor - size: 1024
save_std : CudaTensor - size: 1024
train : false
}
9 :
{
gradBias : CudaTensor - size: 1024
bias : CudaTensor - size: 1024
output : CudaTensor - size: 10x1024x14x14
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 1024
running_var : CudaTensor - size: 1024
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 1024
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 1024
gradWeight : CudaTensor - size: 1024
save_std : CudaTensor - size: 1024
train : false
}
10 :
{
gradBias : CudaTensor - size: 1024
bias : CudaTensor - size: 1024
output : CudaTensor - size: 10x1024x14x14
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 1024
running_var : CudaTensor - size: 1024
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 1024
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 1024
gradWeight : CudaTensor - size: 1024
save_std : CudaTensor - size: 1024
train : false
}
11 :
{
gradBias : CudaTensor - size: 1024
bias : CudaTensor - size: 1024
output : CudaTensor - size: 10x1024x14x14
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 1024
running_var : CudaTensor - size: 1024
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 1024
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 1024
gradWeight : CudaTensor - size: 1024
save_std : CudaTensor - size: 1024
train : false
}
12 :
{
gradBias : CudaTensor - size: 1024
bias : CudaTensor - size: 1024
output : CudaTensor - size: 10x1024x14x14
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 1024
running_var : CudaTensor - size: 1024
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 1024
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 1024
gradWeight : CudaTensor - size: 1024
save_std : CudaTensor - size: 1024
train : false
}
13 :
{
gradBias : CudaTensor - size: 1024
bias : CudaTensor - size: 1024
output : CudaTensor - size: 10x1024x14x14
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 1024
running_var : CudaTensor - size: 1024
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 1024
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 1024
gradWeight : CudaTensor - size: 1024
save_std : CudaTensor - size: 1024
train : false
}
14 :
{
gradBias : CudaTensor - size: 2048
bias : CudaTensor - size: 2048
output : CudaTensor - size: 10x2048x7x7
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 2048
running_var : CudaTensor - size: 2048
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 2048
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 2048
gradWeight :
5222
CudaTensor - size: 2048
save_std : CudaTensor - size: 2048
train : false
}
15 :
{
gradBias : CudaTensor - size: 2048
bias : CudaTensor - size: 2048
output : CudaTensor - size: 10x2048x7x7
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 2048
running_var : CudaTensor - size: 2048
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 2048
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 2048
gradWeight : CudaTensor - size: 2048
save_std : CudaTensor - size: 2048
train : false
}
16 :
{
gradBias : CudaTensor - size: 2048
bias : CudaTensor - size: 2048
output : CudaTensor - size: 10x2048x7x7
gradInput : CudaTensor - empty
save_mean : CudaTensor - size: 2048
running_var : CudaTensor - size: 2048
momentum : 0.1
eps : 1e-05
weight : CudaTensor - size: 2048
_type : "torch.CudaTensor"
affine : true
running_mean : CudaTensor - size: 2048
gradWeight : CudaTensor - size: 2048
save_std : CudaTensor - size: 2048
train : false
}
}
Any idea how to resolve it?