何かと話題のonnx, pytorch, caffe2を使ってみる

その買うを、もっとハッピーに。|ハピタス

onnx, pytorch, caffe2を使った機械学習(jupyter notebookにcodeをcopy & paste)

機械学習学習には他人の書いたコードを分析するのがいいらしいので、チュートリアルに書いてあるコードをjupyter notebookにコピペして学習するのがいいようだ。

スポンサーリンク

squeezenetの携帯端末への転送法

# Some standard imports
import io
import numpy as np
import torch.onnx

下記のsqueezenetの実装はtorchvisionによる

import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.utils.model_zoo as model_zoo


__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1']


model_urls = {
    'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
    'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
}


class Fire(nn.Module):

    def __init__(self, inplanes, squeeze_planes,
                 expand1x1_planes, expand3x3_planes):
        super(Fire, self).__init__()
        self.inplanes = inplanes
        self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
        self.squeeze_activation = nn.ReLU(inplace=True)
        self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
                                   kernel_size=1)
        self.expand1x1_activation = nn.ReLU(inplace=True)
        self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
                                   kernel_size=3, padding=1)
        self.expand3x3_activation = nn.ReLU(inplace=True)

    def forward(self, x):
        x = self.squeeze_activation(self.squeeze(x))
        return torch.cat([
            self.expand1x1_activation(self.expand1x1(x)),
            self.expand3x3_activation(self.expand3x3(x))
        ], 1)


class SqueezeNet(nn.Module):

    def __init__(self, version=1.0, num_classes=1000):
        super(SqueezeNet, self).__init__()
        if version not in [1.0, 1.1]:
            raise ValueError("Unsupported SqueezeNet version {version}:"
                             "1.0 or 1.1 expected".format(version=version))
        self.num_classes = num_classes
        if version == 1.0:
            self.features = nn.Sequential(
                nn.Conv2d(3, 96, kernel_size=7, stride=2),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
                Fire(96, 16, 64, 64),
                Fire(128, 16, 64, 64),
                Fire(128, 32, 128, 128),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
                Fire(256, 32, 128, 128),
                Fire(256, 48, 192, 192),
                Fire(384, 48, 192, 192),
                Fire(384, 64, 256, 256),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
                Fire(512, 64, 256, 256),
            )
        else:
            self.features = nn.Sequential(
                nn.Conv2d(3, 64, kernel_size=3, stride=2),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
                Fire(64, 16, 64, 64),
                Fire(128, 16, 64, 64),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
                Fire(128, 32, 128, 128),
                Fire(256, 32, 128, 128),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
                Fire(256, 48, 192, 192),
                Fire(384, 48, 192, 192),
                Fire(384, 64, 256, 256),
                Fire(512, 64, 256, 256),
            )
        # Final convolution is initialized differently form the rest
        final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
        self.classifier = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.ReLU(inplace=True),
            nn.AvgPool2d(13)
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m is final_conv:
                    init.normal(m.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()

    def forward(self, x):
        x = self.features(x)
        x = self.classifier(x)
        return x.view(x.size(0), self.num_classes)


def squeezenet1_1(pretrained=False, **kwargs):
    r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
    <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
    SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
    than SqueezeNet 1.0, without sacrificing accuracy.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = SqueezeNet(version=1.1, **kwargs)
    if pretrained:
        model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1']))
    return model

以下の関数を呼び出すことでtorch modelを取得できる

# Get pretrained squeezenet model
torch_model = squeezenet1_1(True)
/root/.pyenv/versions/miniconda3-4.3.30/envs/caffe2/lib/python3.6/site-packages/ipykernel_launcher.py:94: UserWarning: nn.init.kaiming_uniform is now deprecated in favor of nn.init.kaiming_uniform_.
/root/.pyenv/versions/miniconda3-4.3.30/envs/caffe2/lib/python3.6/site-packages/ipykernel_launcher.py:92: UserWarning: nn.init.normal is now deprecated in favor of nn.init.normal_.
Downloading: "https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth" to /root/.torch/models/squeezenet1_1-f364aa15.pth
100.0%

そしてpytorch modelをonnx modelとしてexportする。

from torch.autograd import Variable
batch_size = 1    # just a random number

# Input to the model
x = Variable(torch.randn(batch_size, 3, 224, 224), requires_grad=True)

# Export the model
torch_out = torch.onnx._export(torch_model,             # model being run
                               x,                       # model input (or a tuple for multiple inputs)
                               "squeezenet.onnx",       # where to save the model (can be a file or file-like object)
                               export_params=True)      # store the trained parameter weights inside the model file
import onnx
import caffe2.python.onnx.backend
from onnx import helper

# Load the ONNX GraphProto object. Graph is a standard Python protobuf object
model = onnx.load("squeezenet.onnx")

# prepare the caffe2 backend for executing the model this converts the ONNX graph into a
# Caffe2 NetDef that can execute it. Other ONNX backends, like one for CNTK will be
# availiable soon.
prepared_backend = caffe2.python.onnx.backend.prepare(model)

# run the model in Caffe2

# Construct a map from input names to Tensor data.
# The graph itself contains inputs for all weight parameters, followed by the input image.
# Since the weights are already embedded, we just need to pass the input image.
# last input the grap
W = {model.graph.input[0].name: x.data.numpy()}

# Run the Caffe2 net:
c2_out = prepared_backend.run(W)[0]

# Verify the numerical correctness upto 3 decimal places
np.testing.assert_almost_equal(torch_out.data.cpu().numpy(), c2_out, decimal=3)

この後、exportしたモデルを走らせて、pytorch上で走らせたモデルの結果がonnx-caffe2 backend上で走らせた結果とマッチするかを確認する準備が可能になる。

import onnx
import caffe2.python.onnx.backend
from onnx import helper

# Load the ONNX GraphProto object. Graph is a standard Python protobuf object
model = onnx.load("squeezenet.onnx")

# prepare the caffe2 backend for executing the model this converts the ONNX graph into a
# Caffe2 NetDef that can execute it. Other ONNX backends, like one for CNTK will be
# availiable soon.
prepared_backend = caffe2.python.onnx.backend.prepare(model)

# run the model in Caffe2

# Construct a map from input names to Tensor data.
# The graph itself contains inputs for all weight parameters, followed by the input image.
# Since the weights are already embedded, we just need to pass the input image.
# last input the grap
W = {model.graph.input[0].name: x.data.numpy()}

# Run the Caffe2 net:
c2_out = prepared_backend.run(W)[0]

# Verify the numerical correctness upto 3 decimal places
np.testing.assert_almost_equal(torch_out.data.cpu().numpy(), c2_out, decimal=3)

この後、caffe2のクロスプラットフォーム機能を活用することで、モバイル端末で実行するためにモデルを転送できる。

# Export to mobile
from caffe2.python.onnx.backend import Caffe2Backend as c2

init_net, predict_net = c2.onnx_graph_to_caffe2_net(model.graph)
with open("squeeze_init_net.pb", "wb") as f:
    f.write(init_net.SerializeToString())
with open("squeeze_predict_net.pb", "wb") as f:
    f.write(predict_net.SerializeToString())
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-16-f1060b33f068> in <module>()
      2 from caffe2.python.onnx.backend import Caffe2Backend as c2
      3 
----> 4 init_net, predict_net = c2.onnx_graph_to_caffe2_net(model.graph)
      5 with open("squeeze_init_net.pb", "wb") as f:
      6     f.write(init_net.SerializeToString())

~/.pyenv/versions/miniconda3-4.3.30/envs/caffe2/lib/python3.6/site-packages/caffe2/python/onnx/backend.py in onnx_graph_to_caffe2_net(cls, model, device, opset_version)
    960     @classmethod
    961     def onnx_graph_to_caffe2_net(cls, model, device="CPU", opset_version=_known_opset_version):
--> 962         return cls._onnx_model_to_caffe2_net(model, device=device, opset_version=opset_version, include_initializers=True)
    963 
    964     @classmethod

~/.pyenv/versions/miniconda3-4.3.30/envs/caffe2/lib/python3.6/site-packages/caffe2/python/onnx/backend.py in _onnx_model_to_caffe2_net(cls, onnx_model, device, opset_version, include_initializers)
    919         device_option = get_device_option(Device(device))
    920 
--> 921         init_model = cls.optimize_onnx(onnx_model, init=True)
    922         pred_model = cls.optimize_onnx(onnx_model, predict=True)
    923 

~/.pyenv/versions/miniconda3-4.3.30/envs/caffe2/lib/python3.6/site-packages/caffe2/python/onnx/backend.py in optimize_onnx(input, init, predict)
    662         if predict:
    663             passes.append('split_predict')
--> 664         out = onnx.optimizer.optimize(input, passes)
    665         return out
    666 

~/.pyenv/versions/miniconda3-4.3.30/envs/caffe2/lib/python3.6/site-packages/onnx-1.2.1-py3.6-linux-x86_64.egg/onnx/optimizer.py in optimize(model, passes)
     42                   'fuse_transpose_into_gemm']
     43     if not isinstance(model, ModelProto):
---> 44         raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))
     45 
     46     model_str = model.SerializeToString()

ValueError: Optimizer only accepts ModelProto, incorrect type: <class 'onnx_pb2.GraphProto'>

どうやらAPIの仕様が変わったらしく、pytorchサイト経由で新たにcaffe2をソースからビルドする必要があるらしい。

スポンサーリンク

仕様がコロコロ変わり過ぎる

caffe2は仕様がコロコロ変わるので、新しい仕様に追い付くのが大変だ。そのことをネット上で嘆いている人たちも結構いたりする。結局はsourceからcaffe2とonnxをビルドするしかなさそうだし、tutorialも常に最新のものを使うしかなさそうだ。

参考サイトhttps://github.com/

スポンサーリンク
スポンサーリンク