142 lines
4.8 KiB
Python
142 lines
4.8 KiB
Python
#!/usr/bin/python
|
|
# -*- coding: UTF-8 -*-
|
|
"""
|
|
@author:andrew
|
|
@file:SaDetectModel.py
|
|
@email:admin@marques22.com
|
|
@email:2021022362@m.scnu.edu.cn
|
|
@time:2023/09/16
|
|
"""
|
|
from torch import nn
|
|
|
|
|
|
class BasicBlock_1d(nn.Module):
|
|
expansion = 1
|
|
|
|
def __init__(self, input_channel, output_channel, stride=1):
|
|
super(BasicBlock_1d, self).__init__()
|
|
self.left = nn.Sequential(
|
|
nn.Conv1d(in_channels=input_channel, out_channels=output_channel,
|
|
kernel_size=3, stride=stride, padding=1, bias=False),
|
|
nn.BatchNorm1d(output_channel),
|
|
nn.ReLU(inplace=True),
|
|
nn.Conv1d(in_channels=output_channel, out_channels=output_channel,
|
|
kernel_size=3, stride=1, padding=1, bias=False),
|
|
nn.BatchNorm1d(output_channel)
|
|
)
|
|
self.right = nn.Sequential()
|
|
|
|
if stride != 1 or input_channel != self.expansion * output_channel:
|
|
self.right = nn.Sequential(
|
|
nn.Conv1d(in_channels=input_channel, out_channels=output_channel * self.expansion,
|
|
kernel_size=1, stride=stride, bias=False),
|
|
nn.BatchNorm1d(self.expansion * output_channel)
|
|
)
|
|
|
|
self.relu = nn.ReLU(inplace=True)
|
|
|
|
def forward(self, x):
|
|
out = self.left(x)
|
|
residual = self.right(x)
|
|
out += residual
|
|
out = self.relu(out)
|
|
return out
|
|
|
|
|
|
class ResNet_1d(nn.Module):
|
|
def __init__(self, block, number_block, num_classes=2):
|
|
super(ResNet_1d, self).__init__()
|
|
|
|
self.in_channel = 64
|
|
|
|
self.conv1 = nn.Conv1d(in_channels=1, out_channels=self.in_channel,
|
|
kernel_size=7, stride=2, padding=3, bias=False)
|
|
self.bn1 = nn.BatchNorm1d(64)
|
|
self.relu = nn.ReLU(inplace=True)
|
|
self.pool1 = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
|
|
|
|
self.layer1 = self._make_layer(block=block, out_channel=64, num_block=number_block[0], stride=1)
|
|
self.layer2 = self._make_layer(block=block, out_channel=128, num_block=number_block[1], stride=2)
|
|
self.layer3 = self._make_layer(block=block, out_channel=256, num_block=number_block[2], stride=2)
|
|
self.layer4 = self._make_layer(block=block, out_channel=512, num_block=number_block[3], stride=2)
|
|
# self.layer5 = self._make_layer(block=block, out_channel=512, num_block=number_block[4], stride=2)
|
|
self.pool2 = nn.AdaptiveAvgPool1d(1)
|
|
|
|
self.features = nn.Sequential(
|
|
# nn.Linear(in_features=1024, out_features=nc),
|
|
nn.Flatten(),
|
|
# nn.Linear(in_features=512 * 23, out_features=512),
|
|
nn.Linear(in_features=512, out_features=num_classes)
|
|
|
|
# nn.Softmax()
|
|
# nn.Sigmoid()
|
|
)
|
|
# self.linear = nn.Linear(512 * block.expansion, num_classes)
|
|
|
|
def _make_layer(self, block, out_channel, num_block, stride):
|
|
strides = [stride] + [1] * (num_block - 1)
|
|
layers = []
|
|
for stride in strides:
|
|
layers.append(block(self.in_channel, out_channel, stride))
|
|
self.in_channel = out_channel * block.expansion
|
|
return nn.Sequential(*layers)
|
|
|
|
def forward(self, x):
|
|
x = self.conv1(x)
|
|
x = self.bn1(x)
|
|
x = self.relu(x)
|
|
x = self.pool1(x)
|
|
x = self.layer1(x)
|
|
x = self.layer2(x)
|
|
x = self.layer3(x)
|
|
x = self.layer4(x)
|
|
# x = self.layer5(x)
|
|
x = self.pool2(x)
|
|
x = x.view(x.size(0), -1)
|
|
|
|
x = self.features(x)
|
|
return x
|
|
|
|
|
|
class ResNet18_LSTM_1d_v2(ResNet_1d):
|
|
def __init__(self, block, number_block, num_classes, hidden_size, num_layers, bidirectional):
|
|
super(ResNet18_LSTM_1d_v2, self).__init__(
|
|
block=block,
|
|
number_block=number_block,
|
|
num_classes=num_classes
|
|
)
|
|
# self.pool3 = nn.MaxPool1d(4)
|
|
self.lstm = nn.LSTM(input_size=512,
|
|
hidden_size=hidden_size,
|
|
num_layers=num_layers,
|
|
bidirectional=bidirectional,
|
|
batch_first=True)
|
|
|
|
def forward(self, x):
|
|
x = self.conv1(x)
|
|
x = self.bn1(x)
|
|
x = self.relu(x)
|
|
x = self.pool1(x)
|
|
x = self.layer1(x)
|
|
x = self.layer2(x)
|
|
x = self.layer3(x)
|
|
x = self.layer4(x)
|
|
x = x.transpose(2, 1)
|
|
x, (h_n1, c_n1) = self.lstm(x)
|
|
x = x[:, -1, :]
|
|
x = self.features(x)
|
|
return x
|
|
|
|
|
|
def ResNet18_v2_LSTM():
|
|
return ResNet18_LSTM_1d_v2(BasicBlock_1d, [2, 2, 2, 2],
|
|
num_classes=2, hidden_size=512, num_layers=2, bidirectional=False)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
# from torchinfo import summary
|
|
# resnet = ResNet18_v2_LSTM().cuda()
|
|
# summary(resnet, (4, 1, 300))
|
|
pass
|
|
|