使用 module_stats()函数无法计算量化后的int8 模型的参数量/计算量

开发环境:

  • MegEngine 1.3.1
  • Python 3.7
  • Jupyter Notebook

问题详述:
使用 module_stats()函数统计量化后模型的参数量和计算量,代码如下:

import megengine as mge
import ipynb_importer
from model import MCNN
from megengine.utils import module_stats

mcnn = MCNN()
# print(mcnn)
checkpoint = mge.load('mcnn_net_quantized.pkl')
mcnn.load_state_dict(checkpoint, strict=False)
# 指定输入shape
input_shape = (1, 3, 1200, 1600)

# float model.
total_params, total_flops = module_stats.module_stats(
    mcnn, input_shape, log_params=True, log_flops=True
)
print("params: {} flops: {}".format(total_params, total_flops))

报错如下:

RuntimeError                              Traceback (most recent call last)
<ipython-input-7-46a892a93210> in <module>
     14 # float model.
     15 total_params, total_flops = module_stats.module_stats(
---> 16     mcnn, input_shape, log_params=True, log_flops=True
     17 )
     18 print("params: {} flops: {}".format(total_params, total_flops))

~/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/utils/module_stats.py in module_stats(model, input_size, bar_length_max, log_params, log_flops)
    373     inputs = [zeros(in_size, dtype=np.float32) for in_size in input_size]
    374     with adjust_stats(model, training=False) as model:
--> 375         model(*inputs)
    376 
    377     for h in hooks:

~/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/module/module.py in __call__(self, *inputs, **kwargs)
    146                 inputs = modified_inputs
    147 
--> 148         outputs = self.forward(*inputs, **kwargs)
    149 
    150         for hook in self._forward_hooks.values():

~/workspace/fishIfeed/cejc/model.ipynb in forward(self, x)

~/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/module/module.py in __call__(self, *inputs, **kwargs)
    146                 inputs = modified_inputs
    147 
--> 148         outputs = self.forward(*inputs, **kwargs)
    149 
    150         for hook in self._forward_hooks.values():

~/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/module/sequential.py in forward(self, inp)
     95         # avoid layer_values as a name prefix, see Module.__getattribute__
     96         for layer in [getattr(self, key) for key in self.layer_keys]:
---> 97             inp = layer(inp)
     98         return inp

~/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/module/module.py in __call__(self, *inputs, **kwargs)
    146                 inputs = modified_inputs
    147 
--> 148         outputs = self.forward(*inputs, **kwargs)
    149 
    150         for hook in self._forward_hooks.values():

~/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/module/conv.py in forward(self, inp)
    399 
    400     def forward(self, inp):
--> 401         return self.calc_conv(inp, self.weight, self.bias)
    402 
    403 

~/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/module/conv.py in calc_conv(self, inp, weight, bias)
    395             self.groups,
    396             self.conv_mode,
--> 397             self.compute_mode,
    398         )
    399 

~/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/functional/nn.py in conv2d(inp, weight, bias, stride, padding, dilation, groups, conv_mode, compute_mode)
    167     )
    168     inp, weight = utils.convert_inputs(inp, weight)
--> 169     (output,) = apply(op, inp, weight)
    170     if bias is not None:
    171         output += bias

RuntimeError: assertion `src.dtype.enumv() == filter.dtype.enumv()' failed at /home/code/dnn/src/common/convolution.cpp:597: megdnn::ConvolutionBase<Parameter>::CanonizedFilterMeta megdnn::ConvolutionBase<Parameter>::deduce_layout_fwd(const megdnn::TensorLayout&, const megdnn::TensorLayout&, megdnn::TensorLayout&) const [with Parameter = megdnn::param::Convolution]
extra message: src={1(5760000),3(1920000),1200(1600),1600(1) Float32}, filter={16(243),3(81),9(9),9(1) QuantizedS8}, dst={ Float32}, is_nchw=1, is_xcorr=1, pad_h=4, pad_w=4, stride_h=1, stride_w=1, dilate_h=1, dilate_w=1

backtrace:
/home/megstudio/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/core/lib/libmegengine_export.so(_ZN3mgb13MegBrainErrorC1ERKSs+0x4a) [0x7f48bc4b612a]
/home/megstudio/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/core/lib/libmegengine_export.so(+0x28128c1) [0x7f48bc4f48c1]
/home/megstudio/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/core/lib/libmegengine_export.so(_ZN6megdnn12ErrorHandler15on_megdnn_errorERKSs+0x14) [0x7f48bc890294]
/home/megstudio/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/core/lib/libmegengine_export.so(_ZN6megdnn12ErrorHandler15on_megdnn_errorEPKc+0x22) [0x7f48bc891862]
/home/megstudio/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/core/lib/libmegengine_export.so(_ZN6megdnn15__assert_fail__EPKciS1_S1_S1_z+0x190) [0x7f48bc908280]
/home/megstudio/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/core/lib/libmegengine_export.so(_ZNK6megdnn15ConvolutionBaseINS_5param11ConvolutionEE17deduce_layout_fwdERKNS_12TensorLayoutES6_RS4_+0x583) [0x7f48bc8a4be3]
/home/megstudio/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/core/lib/libmegengine_export.so(_ZN6megdnn18ConvolutionForward13deduce_layoutERKNS_12TensorLayoutES3_RS1_+0x1b) [0x7f48bc89932b]
/home/megstudio/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/core/lib/libmegengine_export.so(_ZNK3mgb3opr18ConvolutionForward20get_output_var_shapeERKN6megdnn11SmallVectorINS2_11TensorShapeELj4EEERS5_+0xda) [0x7f48bc62341a]
/home/megstudio/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/core/lib/libmegengine_export.so(_ZN3mgb2cg5mixin24OutshapePureByInshapeOpr10infer_descEmRN6megdnn11TensorShapeERKNS0_12static_infer6InpValE+0x19d) [0x7f48bc525ebd]
/home/megstudio/.miniconda/envs/xuan/lib/python3.7/site-packages/megengine/core/_imperative_rt.cpython-37m-x86_64-linux-gnu.so(+0x215313) [0x7f4924f05313]

针对上述使用module_stats()函数无法统计量化之后int8模型的计算量问题,请问应该怎样统计量化之后模型的flops呢?

assertion `src.dtype.enumv() == filter.dtype.enumv()’ failed at /home/code/dnn/src/common/convolution.cpp:597: megdnn::ConvolutionBase::CanonizedFilterMeta megdnn::ConvolutionBase::deduce_layout_fwd(const megdnn::TensorLayout&, const megdnn::TensorLayout&, megdnn::TensorLayout&) const [with Parameter = megdnn::param::Convolution]
从这个log上看,应该是模型本身量化有问题,这个conv里面的src和filter的dtype不是同一类别,目前天元没有支持这种conv的运算。