refinenet.py 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. """
  2. Copyright (c) 2019-present NAVER Corp.
  3. MIT License
  4. """
  5. # -*- coding: utf-8 -*-
  6. import torch
  7. import torch.nn as nn
  8. import torch.nn.functional as F
  9. from torch.autograd import Variable
  10. from basenet.vgg16_bn import init_weights
  11. class RefineNet(nn.Module):
  12. def __init__(self):
  13. super(RefineNet, self).__init__()
  14. self.last_conv = nn.Sequential(
  15. nn.Conv2d(34, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True),
  16. nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True),
  17. nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True)
  18. )
  19. self.aspp1 = nn.Sequential(
  20. nn.Conv2d(64, 128, kernel_size=3, dilation=6, padding=6), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
  21. nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
  22. nn.Conv2d(128, 1, kernel_size=1)
  23. )
  24. self.aspp2 = nn.Sequential(
  25. nn.Conv2d(64, 128, kernel_size=3, dilation=12, padding=12), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
  26. nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
  27. nn.Conv2d(128, 1, kernel_size=1)
  28. )
  29. self.aspp3 = nn.Sequential(
  30. nn.Conv2d(64, 128, kernel_size=3, dilation=18, padding=18), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
  31. nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
  32. nn.Conv2d(128, 1, kernel_size=1)
  33. )
  34. self.aspp4 = nn.Sequential(
  35. nn.Conv2d(64, 128, kernel_size=3, dilation=24, padding=24), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
  36. nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
  37. nn.Conv2d(128, 1, kernel_size=1)
  38. )
  39. init_weights(self.last_conv.modules())
  40. init_weights(self.aspp1.modules())
  41. init_weights(self.aspp2.modules())
  42. init_weights(self.aspp3.modules())
  43. init_weights(self.aspp4.modules())
  44. def forward(self, y, upconv4):
  45. refine = torch.cat([y.permute(0,3,1,2), upconv4], dim=1)
  46. refine = self.last_conv(refine)
  47. aspp1 = self.aspp1(refine)
  48. aspp2 = self.aspp2(refine)
  49. aspp3 = self.aspp3(refine)
  50. aspp4 = self.aspp4(refine)
  51. #out = torch.add([aspp1, aspp2, aspp3, aspp4], dim=1)
  52. out = aspp1 + aspp2 + aspp3 + aspp4
  53. return out.permute(0, 2, 3, 1) # , refine.permute(0,2,3,1)