Skip to content

Commit

Permalink
Update code structure.
Browse files Browse the repository at this point in the history
1. Fixed linter errors;
2. Added ,travis;
3. Added Sum layer;
4. Minor tests clean up;
5. Changed imports to package-relatives.
  • Loading branch information
gmalivenko committed May 30, 2018
1 parent b664499 commit 9447272
Show file tree
Hide file tree
Showing 35 changed files with 147 additions and 610 deletions.
27 changes: 27 additions & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
group: travis_latest
language: python
cache: pip
python:
- 2.7
- 3.6
#- nightly
#- pypy
#- pypy3
matrix:
allow_failures:
- python: nightly
- python: pypy
- python: pypy3
install:
#- pip install -r requirements.txt
- pip install flake8 # pytest # add another testing frameworks later
before_script:
# stop the build if there are Python syntax errors or undefined names
- flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
- flake8 . --count --exit-zero --max-complexity=32 --max-line-length=127 --statistics
script:
- true # pytest --capture=sys # add other tests here
notifications:
on_success: change
on_failure: change # `always` will be the setting once code changes slow down
2 changes: 1 addition & 1 deletion pytorch2keras/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import contextlib
from torch.jit import _unique_state_dict

from layers import AVAILABLE_CONVERTERS
from .layers import AVAILABLE_CONVERTERS


@contextlib.contextmanager
Expand Down
36 changes: 32 additions & 4 deletions pytorch2keras/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,6 +436,29 @@ def convert_elementwise_sub(
layers[scope_name] = sub([model0, model1])


def convert_sum(
params, w_name, scope_name, inputs, layers, weights
):
"""
Convert sum.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
"""
print('Converting Sum ...')

def target_layer(x):
return keras.backend.sum(x)

lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]])


def convert_concat(params, w_name, scope_name, inputs, layers, weights):
"""
Convert concatenation.
Expand Down Expand Up @@ -469,6 +492,7 @@ def convert_relu(params, w_name, scope_name, inputs, layers, weights):
"""
print('Converting relu ...')

print(w_name, scope_name)
tf_name = w_name + str(random.random())
relu = keras.layers.Activation('relu', name=tf_name)
layers[scope_name] = relu(layers[inputs[0]])
Expand Down Expand Up @@ -570,7 +594,6 @@ def convert_selu(params, w_name, scope_name, inputs, layers, weights):
layers[scope_name] = selu(layers[inputs[0]])



def convert_transpose(params, w_name, scope_name, inputs, layers, weights):
"""
Convert transpose layer.
Expand Down Expand Up @@ -705,7 +728,9 @@ def convert_reduce_sum(params, w_name, scope_name, inputs, layers, weights):

keepdims = params['keepdims'] > 0
axis = np.array(params['axes'])
target_layer = lambda x: keras.backend.sum(x, keepdims=keepdims, axis=axis)

def target_layer(x, keepdims=keepdims, axis=axis):
return keras.backend.sum(x, keepdims=keepdims, axis=axis)

lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]])
Expand All @@ -725,7 +750,9 @@ def convert_constant(params, w_name, scope_name, inputs, layers, weights):
"""
print('Converting constant ...')

target_layer = lambda x: keras.backend.constant(np.float32(params['value']))
def target_layer(params=params):
return keras.backend.constant(np.float32(params['value']))

lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]])

Expand Down Expand Up @@ -782,7 +809,7 @@ def convert_padding(params, w_name, scope_name, inputs, layers, weights):
padding_name = tf_name + '_pad'
padding_layer = keras.layers.ZeroPadding2D(
padding=((params['pads'][2], params['pads'][6]), (params['pads'][3], params['pads'][7])),
name=tf_name
name=padding_name
)

layers[scope_name] = padding_layer(layers[inputs[0]])
Expand All @@ -801,6 +828,7 @@ def convert_padding(params, w_name, scope_name, inputs, layers, weights):
'onnx::Add': convert_elementwise_add,
'onnx::Mul': convert_elementwise_mul,
'onnx::Sub': convert_elementwise_sub,
'onnx::Sum': convert_sum,
'onnx::Concat': convert_concat,
'onnx::Relu': convert_relu,
'onnx::LeakyRelu': convert_lrelu,
Expand Down
9 changes: 5 additions & 4 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,21 @@
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install


try: # for pip >= 10
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements


# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements('requirements.txt', session='null')


# reqs is a list of requirement
# e.g. ['django==1.5.1', 'mezzanine==1.4.6']
reqs = [str(ir.req) for ir in install_reqs]


setup(name='pytorch2keras',
version='0.1',
description='The model convertor',
Expand Down
10 changes: 2 additions & 8 deletions tests/alexnet.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,8 @@
import keras # work around segfault
import sys
import numpy as np

import torch
import torchvision
from torch.autograd import Variable

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras

from pytorch2keras.converter import pytorch_to_keras
import torchvision

if __name__ == '__main__':
max_error = 0
Expand Down
7 changes: 1 addition & 6 deletions tests/avg_pool.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
import keras # work around segfault
import sys
import numpy as np

import torch
import torch.nn as nn
from torch.autograd import Variable

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras
from pytorch2keras.converter import pytorch_to_keras


class AvgPool(nn.Module):
Expand Down
7 changes: 1 addition & 6 deletions tests/bn.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
import keras # work around segfault
import sys
import numpy as np

import torch
import torch.nn as nn
from torch.autograd import Variable

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras
from pytorch2keras.converter import pytorch_to_keras


class TestConv2d(nn.Module):
Expand Down
7 changes: 1 addition & 6 deletions tests/concat_many.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
import keras # work around segfault
import sys
import numpy as np

import torch
import torch.nn as nn
from torch.autograd import Variable

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras
from pytorch2keras.converter import pytorch_to_keras


class TestConcatMany(nn.Module):
Expand Down
7 changes: 1 addition & 6 deletions tests/const.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
import keras # work around segfault
import sys
import numpy as np

import torch
import torch.nn as nn
from torch.autograd import Variable

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras
from pytorch2keras.converter import pytorch_to_keras


class TestConst(nn.Module):
Expand Down
7 changes: 1 addition & 6 deletions tests/conv2d.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
import keras # work around segfault
import sys
import numpy as np

import torch
import torch.nn as nn
from torch.autograd import Variable

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras
from pytorch2keras.converter import pytorch_to_keras


class TestConv2d(nn.Module):
Expand Down
7 changes: 1 addition & 6 deletions tests/conv2d_channels_last.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
import keras # work around segfault
import sys
import numpy as np

import torch
import torch.nn as nn
from torch.autograd import Variable

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras
from pytorch2keras.converter import pytorch_to_keras


class TestConv2d(nn.Module):
Expand Down
7 changes: 1 addition & 6 deletions tests/conv2d_dilation.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
import keras # work around segfault
import sys
import numpy as np

import torch
import torch.nn as nn
from torch.autograd import Variable

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras
from pytorch2keras.converter import pytorch_to_keras


class TestConv2d(nn.Module):
Expand Down
13 changes: 4 additions & 9 deletions tests/convtranspose2d.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,17 @@
import keras # work around segfault
import sys
import numpy as np

import torch
import torch.nn as nn
from torch.autograd import Variable

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras
from pytorch2keras.converter import pytorch_to_keras


class TestConvTranspose2d(nn.Module):
"""Module for ConvTranspose2d conversion testing
"""

def __init__(self, inp=10, out=16, kernel_size=3, bias=True):
def __init__(self, inp=10, out=16, kernel_size=3, padding=1, bias=True):
super(TestConvTranspose2d, self).__init__()
self.conv2d = nn.ConvTranspose2d(inp, out, kernel_size=kernel_size, bias=bias)
self.conv2d = nn.ConvTranspose2d(inp, out, kernel_size=kernel_size, bias=bias, stride=padding)

def forward(self, x):
x = self.conv2d(x)
Expand All @@ -30,7 +25,7 @@ def forward(self, x):
inp = np.random.randint(kernel_size + 1, 100)
out = np.random.randint(1, 100)

model = TestConvTranspose2d(inp, out, kernel_size, inp % 2)
model = TestConvTranspose2d(inp, out, kernel_size, 2, inp % 3)

input_np = np.random.uniform(0, 1, (1, inp, inp, inp))
input_var = Variable(torch.FloatTensor(input_np))
Expand Down
7 changes: 1 addition & 6 deletions tests/dense.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
import keras # work around segfault
import sys
import numpy as np

import torch
import torch.nn as nn
from torch.autograd import Variable

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras
from pytorch2keras.converter import pytorch_to_keras


class TestDense(nn.Module):
Expand Down
10 changes: 2 additions & 8 deletions tests/densenet.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,9 @@
import keras # work around segfault
import sys
import numpy as np
import math

import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
from pytorch2keras.converter import pytorch_to_keras
import torchvision

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras

if __name__ == '__main__':
max_error = 0
Expand Down
7 changes: 1 addition & 6 deletions tests/droupout.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
import keras # work around segfault
import sys
import numpy as np

import torch
import torch.nn as nn
from torch.autograd import Variable

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras
from pytorch2keras.converter import pytorch_to_keras


class TestDropout(nn.Module):
Expand Down
9 changes: 2 additions & 7 deletions tests/embedding.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
import keras # work around segfault
import sys
import numpy as np

import torch
import torch.nn as nn
from torch.autograd import Variable

sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras
from pytorch2keras.converter import pytorch_to_keras


class TestEmbedding(nn.Module):
Expand All @@ -16,7 +11,7 @@ def __init__(self, input_size):
self.embedd = nn.Embedding(input_size, 100)

def forward(self, input):
return self.embedd(input).sum(dim=0)
return self.embedd(input)


if __name__ == '__main__':
Expand Down
Loading

0 comments on commit 9447272

Please sign in to comment.