net_s3fd.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle
  15. import paddle.nn as nn
  16. import paddle.nn.functional as F
  17. class L2Norm(nn.Layer):
  18. def __init__(self, n_channels, scale=1.0):
  19. super(L2Norm, self).__init__()
  20. self.n_channels = n_channels
  21. self.scale = scale
  22. self.eps = 1e-10
  23. self.weight = paddle.create_parameter(
  24. shape=[self.n_channels], dtype='float32')
  25. self.weight.set_value(paddle.zeros([self.n_channels]) + self.scale)
  26. def forward(self, x):
  27. norm = x.pow(2).sum(axis=1, keepdim=True).sqrt() + self.eps
  28. x = x / norm * self.weight.reshape([1, -1, 1, 1])
  29. return x
  30. class s3fd(nn.Layer):
  31. def __init__(self):
  32. super(s3fd, self).__init__()
  33. self.conv1_1 = nn.Conv2D(3, 64, kernel_size=3, stride=1, padding=1)
  34. self.conv1_2 = nn.Conv2D(64, 64, kernel_size=3, stride=1, padding=1)
  35. self.conv2_1 = nn.Conv2D(64, 128, kernel_size=3, stride=1, padding=1)
  36. self.conv2_2 = nn.Conv2D(128, 128, kernel_size=3, stride=1, padding=1)
  37. self.conv3_1 = nn.Conv2D(128, 256, kernel_size=3, stride=1, padding=1)
  38. self.conv3_2 = nn.Conv2D(256, 256, kernel_size=3, stride=1, padding=1)
  39. self.conv3_3 = nn.Conv2D(256, 256, kernel_size=3, stride=1, padding=1)
  40. self.conv4_1 = nn.Conv2D(256, 512, kernel_size=3, stride=1, padding=1)
  41. self.conv4_2 = nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1)
  42. self.conv4_3 = nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1)
  43. self.conv5_1 = nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1)
  44. self.conv5_2 = nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1)
  45. self.conv5_3 = nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1)
  46. self.fc6 = nn.Conv2D(512, 1024, kernel_size=3, stride=1, padding=3)
  47. self.fc7 = nn.Conv2D(1024, 1024, kernel_size=1, stride=1, padding=0)
  48. self.conv6_1 = nn.Conv2D(1024, 256, kernel_size=1, stride=1, padding=0)
  49. self.conv6_2 = nn.Conv2D(256, 512, kernel_size=3, stride=2, padding=1)
  50. self.conv7_1 = nn.Conv2D(512, 128, kernel_size=1, stride=1, padding=0)
  51. self.conv7_2 = nn.Conv2D(128, 256, kernel_size=3, stride=2, padding=1)
  52. self.conv3_3_norm = L2Norm(256, scale=10)
  53. self.conv4_3_norm = L2Norm(512, scale=8)
  54. self.conv5_3_norm = L2Norm(512, scale=5)
  55. self.conv3_3_norm_mbox_conf = nn.Conv2D(
  56. 256, 4, kernel_size=3, stride=1, padding=1)
  57. self.conv3_3_norm_mbox_loc = nn.Conv2D(
  58. 256, 4, kernel_size=3, stride=1, padding=1)
  59. self.conv4_3_norm_mbox_conf = nn.Conv2D(
  60. 512, 2, kernel_size=3, stride=1, padding=1)
  61. self.conv4_3_norm_mbox_loc = nn.Conv2D(
  62. 512, 4, kernel_size=3, stride=1, padding=1)
  63. self.conv5_3_norm_mbox_conf = nn.Conv2D(
  64. 512, 2, kernel_size=3, stride=1, padding=1)
  65. self.conv5_3_norm_mbox_loc = nn.Conv2D(
  66. 512, 4, kernel_size=3, stride=1, padding=1)
  67. self.fc7_mbox_conf = nn.Conv2D(
  68. 1024, 2, kernel_size=3, stride=1, padding=1)
  69. self.fc7_mbox_loc = nn.Conv2D(
  70. 1024, 4, kernel_size=3, stride=1, padding=1)
  71. self.conv6_2_mbox_conf = nn.Conv2D(
  72. 512, 2, kernel_size=3, stride=1, padding=1)
  73. self.conv6_2_mbox_loc = nn.Conv2D(
  74. 512, 4, kernel_size=3, stride=1, padding=1)
  75. self.conv7_2_mbox_conf = nn.Conv2D(
  76. 256, 2, kernel_size=3, stride=1, padding=1)
  77. self.conv7_2_mbox_loc = nn.Conv2D(
  78. 256, 4, kernel_size=3, stride=1, padding=1)
  79. def forward(self, x):
  80. h = F.relu(self.conv1_1(x))
  81. h = F.relu(self.conv1_2(h))
  82. h = F.max_pool2d(h, 2, 2)
  83. h = F.relu(self.conv2_1(h))
  84. h = F.relu(self.conv2_2(h))
  85. h = F.max_pool2d(h, 2, 2)
  86. h = F.relu(self.conv3_1(h))
  87. h = F.relu(self.conv3_2(h))
  88. h = F.relu(self.conv3_3(h))
  89. f3_3 = h
  90. h = F.max_pool2d(h, 2, 2)
  91. h = F.relu(self.conv4_1(h))
  92. h = F.relu(self.conv4_2(h))
  93. h = F.relu(self.conv4_3(h))
  94. f4_3 = h
  95. h = F.max_pool2d(h, 2, 2)
  96. h = F.relu(self.conv5_1(h))
  97. h = F.relu(self.conv5_2(h))
  98. h = F.relu(self.conv5_3(h))
  99. f5_3 = h
  100. h = F.max_pool2d(h, 2, 2)
  101. h = F.relu(self.fc6(h))
  102. h = F.relu(self.fc7(h))
  103. ffc7 = h
  104. h = F.relu(self.conv6_1(h))
  105. h = F.relu(self.conv6_2(h))
  106. f6_2 = h
  107. h = F.relu(self.conv7_1(h))
  108. h = F.relu(self.conv7_2(h))
  109. f7_2 = h
  110. f3_3 = self.conv3_3_norm(f3_3)
  111. f4_3 = self.conv4_3_norm(f4_3)
  112. f5_3 = self.conv5_3_norm(f5_3)
  113. cls1 = self.conv3_3_norm_mbox_conf(f3_3)
  114. reg1 = self.conv3_3_norm_mbox_loc(f3_3)
  115. cls2 = self.conv4_3_norm_mbox_conf(f4_3)
  116. reg2 = self.conv4_3_norm_mbox_loc(f4_3)
  117. cls3 = self.conv5_3_norm_mbox_conf(f5_3)
  118. reg3 = self.conv5_3_norm_mbox_loc(f5_3)
  119. cls4 = self.fc7_mbox_conf(ffc7)
  120. reg4 = self.fc7_mbox_loc(ffc7)
  121. cls5 = self.conv6_2_mbox_conf(f6_2)
  122. reg5 = self.conv6_2_mbox_loc(f6_2)
  123. cls6 = self.conv7_2_mbox_conf(f7_2)
  124. reg6 = self.conv7_2_mbox_loc(f7_2)
  125. # max-out background label
  126. chunk = paddle.chunk(cls1, 4, 1)
  127. tmp_max = paddle.where(chunk[0] > chunk[1], chunk[0], chunk[1])
  128. bmax = paddle.where(tmp_max > chunk[2], tmp_max, chunk[2])
  129. cls1 = paddle.concat([bmax, chunk[3]], axis=1)
  130. return [
  131. cls1, reg1, cls2, reg2, cls3, reg3, cls4, reg4, cls5, reg5, cls6,
  132. reg6
  133. ]