Model Zoo & Model Example
1. Model Zoo
You can find models to test in our Model Zoo.
2. Sample Model Code for PyTorch
Let's have a look at an example of how one of the format model file looks like:
import torch
import torch.nn as nn
framework = "pytorch"
main_class = "MyModel"
image_size = 224
batch_size = 16
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=0),
nn.BatchNorm2d(6),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(16 * 53 * 53, 120)
self.relu = nn.ReLU()
self.fc1 = nn.Linear(120, 84)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(84, 1)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
out = self.relu(out)
out = self.fc1(out)
out = self.relu1(out)
out = self.fc2(out)
return out
For other supported formats click here.
3. Sample Model Code for tensorflow
Let's have a look at an example of how one of the format model file looks like:
# As we see in the sample example below, the model's architecture is built
# using the sequential API and has been put inside a python function named "MyModel".
# The function takes input_shape and classes as argurments and returns the model instance.
# import necessary layers
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, ZeroPadding2D,\
Flatten, BatchNormalization, AveragePooling2D, Dense, Activation, Add
from tensorflow.keras.models import Model
from tensorflow.keras import activations
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.regularizers import l2
framework = 'tensorflow'
main_method = 'MyModel'
input_shape = 'input_shape'
output_classes = 'classes'
def res_identity(x, filters):
# renet block where the dimension does not change.
# The skip connection is just used for simple identity conncection
# we will have 3 blocks and then input will be added
x_skip = x # this will be used for an addition with the residual block
f1, f2 = filters
#first block
x = Conv2D(f1, kernel_size=(1, 1), strides=(1, 1), padding='valid', kernel_regularizer=l2(0.001))(x)
x = BatchNormalization()(x)
x = Activation(activations.relu)(x)
#second block # bottleneck (but size kept same with padding)
x = Conv2D(f1, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_regularizer=l2(0.001))(x)
x = BatchNormalization()(x)
x = Activation(activations.relu)(x)
# third block activation used after adding the input
x = Conv2D(f2, kernel_size=(1, 1), strides=(1, 1), padding='valid', kernel_regularizer=l2(0.001))(x)
x = BatchNormalization()(x)
# x = Activation(activations.relu)(x)
# add the input
x = Add()([x, x_skip])
x = Activation(activations.relu)(x)
return x
def res_conv(x, s, filters):
# here the input size changes when using conv blocks
# so the skip connection uses a projection (conv layer) matrix
x_skip = x
f1, f2 = filters
# first block
x = Conv2D(f1, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_regularizer=l2(0.001))(x)
# For example s = 2 would correspond to downsizing the feature map
x = BatchNormalization()(x)
x = Activation(activations.relu)(x)
# second block
x = Conv2D(f1, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_regularizer=l2(0.001))(x)
x = BatchNormalization()(x)
x = Activation(activations.relu)(x)
#third block
x = Conv2D(f2, kernel_size=(1, 1), strides=(1, 1), padding='valid', kernel_regularizer=l2(0.001))(x)
x = BatchNormalization()(x)
# shortcut
x_skip = Conv2D(f2, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_regularizer=l2(0.001))(x_skip)
x_skip = BatchNormalization()(x_skip)
# add
x = Add()([x, x_skip])
x = Activation(activations.relu)(x)
return x
### Combine the above functions to build a 50-layers ResNet.
def MyModel(input_shape = (224, 224), classes=2):
input_im = Input(shape = input_shape)
x = ZeroPadding2D(padding=(3, 3))(input_im)
# 1st stage
# here we perform maxpooling, see the figure above
x = Conv2D(64, kernel_size=(7, 7), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation(activations.relu)(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
#2nd stage
# from here on only conv block and identity block, no pooling
x = res_conv(x, s=1, filters=(64, 256))
x = res_identity(x, filters=(64, 256))
x = res_identity(x, filters=(64, 256))
# 3rd stage
x = res_conv(x, s=2, filters=(128, 512))
x = res_identity(x, filters=(128, 512))
x = res_identity(x, filters=(128, 512))
x = res_identity(x, filters=(128, 512))
# 4th stage
x = res_conv(x, s=2, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
# 5th stage
x = res_conv(x, s=2, filters=(512, 2048))
x = res_identity(x, filters=(512, 2048))
x = res_identity(x, filters=(512, 2048))
# ends with average pooling and dense connection
x = AveragePooling2D((2, 2), padding='same')(x)
x = Flatten()(x)
x = Dense(classes, activation='softmax', kernel_initializer='he_normal')(x) #multi-class
# define the model
model = Model(inputs=input_im, outputs=x, name='Resnet50')
return model
For other supported formats click here.