Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
hfutqian committed Mar 20, 2023
1 parent 128c6de commit bb398f1
Show file tree
Hide file tree
Showing 26 changed files with 13,679 additions and 0 deletions.
Empty file.
422 changes: 422 additions & 0 deletions AdaDFQ/ImageNet/pytorchcv/models/airnet.py

Large diffs are not rendered by default.

383 changes: 383 additions & 0 deletions AdaDFQ/ImageNet/pytorchcv/models/airnext.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,383 @@
"""
AirNeXt for ImageNet-1K, implemented in PyTorch.
Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
"""

__all__ = ['AirNeXt', 'airnext50_32x4d_r2', 'airnext101_32x4d_r2', 'airnext101_32x4d_r16']

import os
import math
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block
from .airnet import AirBlock, AirInitBlock


class AirNeXtBottleneck(nn.Module):
"""
AirNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width,
ratio):
super(AirNeXtBottleneck, self).__init__()
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
self.use_air_block = (stride == 1 and mid_channels < 512)

self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width)
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
stride=stride,
groups=cardinality)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
activation=None)
if self.use_air_block:
self.air = AirBlock(
in_channels=in_channels,
out_channels=group_width,
groups=(cardinality // ratio),
ratio=ratio)

def forward(self, x):
if self.use_air_block:
att = self.air(x)
x = self.conv1(x)
x = self.conv2(x)
if self.use_air_block:
x = x * att
x = self.conv3(x)
return x


class AirNeXtUnit(nn.Module):
"""
AirNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width,
ratio):
super(AirNeXtUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)

self.body = AirNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)

def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x


class AirNeXt(nn.Module):
"""
AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
ratio,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(AirNeXt, self).__init__()
self.in_size = in_size
self.num_classes = num_classes

self.features = nn.Sequential()
self.features.add_module("init_block", AirInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), AirNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))

self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)

self._init_params()

def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)

def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x


def get_airnext(blocks,
cardinality,
bottleneck_width,
base_channels,
ratio,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create AirNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
base_channels: int
Base number of channels.
ratio: int
Air compression ratio.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""

if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported AirNeXt with number of blocks: {}".format(blocks))

bottleneck_expansion = 4
init_block_channels = base_channels
channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))]

channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]

net = AirNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio,
**kwargs)

if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)

return net


def airnext50_32x4d_r2(**kwargs):
"""
AirNeXt50-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=50,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=2,
model_name="airnext50_32x4d_r2",
**kwargs)


def airnext101_32x4d_r2(**kwargs):
"""
AirNeXt101-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=101,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=2,
model_name="airnext101_32x4d_r2",
**kwargs)


def airnext101_32x4d_r16(**kwargs):
"""
AirNeXt101-32x4d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=101,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=16,
model_name="airnext101_32x4d_r16",
**kwargs)


def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count


def _test():
import torch

pretrained = False

models = [
airnext50_32x4d_r2,
airnext101_32x4d_r2,
airnext101_32x4d_r16,
]

for model in models:

net = model(pretrained=pretrained)

# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != airnext50_32x4d_r2 or weight_count == 27604296)
assert (model != airnext101_32x4d_r2 or weight_count == 54099272)
assert (model != airnext101_32x4d_r16 or weight_count == 45456456)

x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))


if __name__ == "__main__":
_test()
Loading

0 comments on commit bb398f1

Please sign in to comment.