ACNet:用于图像超分的非对称卷积网络 (2)

不过这篇论文有一点值得称赞:复现难度非常低。笔者粗看了下结构花了很少时间就完成了code的实现,并仅仅基于DIV2K训练数据训练了66W步。在BI退化方面,笔者训练的模型性能要比作者文中列出来的指标高0.04-0.1dB不等。ACNet的参考实现code如下所示。

class MeanShift(nn.Conv2d): def __init__(self, rgb_range=1.0, rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1): super(MeanShift, self).__init__(3, 3, kernel_size=1) std = torch.Tensor(rgb_std) self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1) self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std self.requires_grad = False class Upsampler(nn.Sequential): def __init__(self, scale, channels, bn=False, act=False, bias=True): m = [] if (scale & (scale - 1)) == 0: for _ in range(int(math.log(scale, 2))): m.append(nn.Conv2d(channels, 4 * channels, 3, 1, 1, bias=bias)) m.append(nn.PixelShuffle(2)) if bn: m.append(nn.BatchNorm2d(channels)) if act: m.append(nn.ReLU(inplace=True)) elif scale == 3: m.append(nn.Conv2d(channels, 9 * channels, 3, 1, 1, bias=bias)) m.append(nn.PixelShuffle(3)) if bn: m.append(nn.BatchNorm2d(channels)) if act: m.append(nn.ReLU(inplace=True)) else: raise NotImplementedError super().__init__(*m) class ACBlock(nn.Module): def __init__(self, in_channels, out_channels): super(ACBlock, self).__init__() self.conv1x3 = nn.Conv2d(in_channels, out_channels, (1, 3), 1, (0, 1)) self.conv3x1 = nn.Conv2d(in_channels, out_channels, (3, 1), 1, (1, 0)) self.conv3x3 = nn.Conv2d(in_channels, out_channels, (3, 3), 1, (1, 1)) def forward(self, x): conv3x1 = self.conv3x1(x) conv1x3 = self.conv1x3(x) conv3x3 = self.conv3x3(x) return conv3x1 + conv1x3 + conv3x3 class ACNet(nn.Module): def __init__(self, scale=2, in_channels=3, out_channels=3, num_features=64, num_blocks=17, rgb_range=1.0): super(ACNet, self).__init__() self.scale = scale self.num_blocks = num_blocks self.num_features = num_features # pre and post process self.sub_mean = MeanShift(rgb_range=rgb_range, sign=-1) self.add_mena = MeanShift(rgb_range=rgb_range, sign=1) # AB module self.blk1 = ACBlock(in_channels, num_features) for idx in range(1, num_blocks): self.__setattr__(f"blk{idx+1}", nn.Sequential(nn.ReLU(inplace=True), ACBlock(num_features, num_features))) # MEB self.lff = nn.Sequential( nn.ReLU(inplace=False), Upsampler(scale, num_features), nn.Conv2d(num_features, num_features, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(num_features, num_features, 3, 1, 1) ) self.hff = nn.Sequential( nn.ReLU(inplace=False), Upsampler(scale, num_features), nn.Conv2d(num_features, num_features, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(num_features, num_features, 3, 1, 1) ) # HFFEB self.fusion = nn.Sequential( nn.ReLU(inplace=False), nn.Conv2d(num_features, num_features, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(num_features, num_features, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(num_features, out_channels, 3, 1, 1), ) def forward(self, x): inputs = self.sub_mean(x) blk1 = self.blk1(inputs) high = blk1 tmp = blk1 for idx in range(1, self.num_blocks): tmp = self.__getattr__(f"blk{idx+1}")(tmp) high = high + tmp lff = self.lff(blk1) hff = self.hff(high) fusion = self.fusion(lff + hff) output = self.add_mena(fusion) return output 推荐阅读

你的感知损失可能用错了,沈春华团队提出随机权值广义感知损失

CVPR2021|超分性能不变,计算量降低50%,董超等人提出用于low-level加速的ClassSR

SANet|融合空域与通道注意力,南京大学提出置换注意力机制

GhostSR|针对图像超分的特征冗余,华为诺亚&北大联合提出GhostSR

图像超分中的那些知识蒸馏

ICLR2021 | 显著提升小模型性能,亚利桑那州立大学&微软联合提出SEED

RepVGG|让你的ConVNet一卷到底,plain网络首次超过80%top1精度

Transformer再下一城!low-level多个任务榜首被占领

通道注意力新突破!从频域角度出发,浙大提出FcaNet

无需额外数据、Tricks、架构调整,CMU开源首个将ResNet50精度提升至80%+新方法

46FPS+1080Px2超分+手机NPU,arm提出一种基于重参数化思想的超高效图像超分方案

动态卷积超进化!通道融合替换注意力,减少75%参数量且性能显著提升 ICLR 2021

CVPR2021|“无痛涨点”的ACNet再进化,清华大学&旷视科技提出Inception类型的DBB

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:https://www.heiqu.com/zyxzff.html