Heartbeat_Annotation/Deep_Model/Unet.py
2025-02-21 20:40:04 +08:00

1956 lines
75 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# encoding:utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.resnet import resnet34
class _ConvBnReLU(nn.Sequential):
def __init__(self, in_ch,out_ch,kernel_size,padding,dilation=1,stride=1,bn=True, relu=True):
super(_ConvBnReLU, self).__init__()
self.add_module("conv",nn.Conv1d(in_ch, out_ch, kernel_size, stride, padding, dilation, bias=False))
if bn :
self.add_module("bn", nn.BatchNorm1d(out_ch))
if relu:
self.add_module("relu", nn.ReLU())
class _conv_block(nn.Module):
def __init__(self,in_ch,out_ch,kernel_size,padding,dilation=1,stride=1,bn=True, relu=True):
super(_conv_block, self).__init__()
self.conv = nn.Sequential(
_ConvBnReLU(in_ch, out_ch, kernel_size=kernel_size, padding=padding, stride=stride,dilation=dilation),
_ConvBnReLU(out_ch, out_ch, kernel_size=kernel_size, padding=padding, stride=stride,dilation=dilation)
)
def forward(self,x):
x = self.conv(x)
return x
class _conv_2block(nn.Module):
def __init__(self,in_ch,out_ch,kernel_size,padding,dilation=1,stride=1,bn=True, relu=True):
super(_conv_2block, self).__init__()
self.conv = nn.Sequential(
_ConvBnReLU(in_ch, out_ch, kernel_size=kernel_size, padding=padding, stride=stride,dilation=dilation),
_ConvBnReLU(out_ch, out_ch, kernel_size=kernel_size, padding=padding, stride=stride,dilation=dilation),
_ConvBnReLU(out_ch, out_ch, kernel_size=kernel_size, padding=padding, stride=stride, dilation=dilation)
)
def forward(self,x):
x = self.conv(x)
return x
class up_conv(nn.Module): #数据扩大两倍,通道减小两倍
def __init__(self,ch_in,ch_out,kernel_size,padding):
super(up_conv,self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv1d(ch_in,ch_out,kernel_size=kernel_size,stride=1,padding=padding,bias=True),
nn.BatchNorm1d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self,x):
x = self.up(x)
return x
class up_conv_down(nn.Module):
def __init__(self,ch_in,ch_out,kernel_size,padding):
super(up_conv_down,self).__init__()
self.up = nn.Sequential(
nn.Upsample(size=(125)), #指定点数
nn.Conv1d(ch_in,ch_out,kernel_size=kernel_size,stride=1,padding=padding,bias=True),
nn.BatchNorm1d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self,x):
x = self.up(x)
return x
class up_conv2_down(nn.Module):
def __init__(self,ch_in,ch_out,kernel_size,padding):
super(up_conv2_down,self).__init__()
self.up = nn.Sequential(
nn.Upsample(size=(31)), #指定点数
nn.Conv1d(ch_in,ch_out,kernel_size=kernel_size,stride=1,padding=padding,bias=True),
nn.BatchNorm1d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self,x):
x = self.up(x)
return x
class Unet(nn.Module):
def __init__(self):
super(Unet, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature1 = _conv_block(1,32,kernel_size=17,padding=8)
self.maxpool1 = nn.MaxPool1d(kernel_size=2) #500
self.feature2 = _conv_block(32,64,kernel_size=17,padding=8)
self.maxpool2 = nn.MaxPool1d(kernel_size=2) #250
self.feature3 = _conv_block(64,128,kernel_size=17,padding=8)
self.maxpool3 = nn.MaxPool1d(kernel_size=2) #125
self.feature4 = _conv_block(128,256,kernel_size=17,padding=8) #125
self.maxpool4 = nn.MaxPool1d(kernel_size=2,padding=1) #62
self.feature5 = _conv_block(256, 512, kernel_size=17, padding=8)
self.drop = nn.Dropout(0.5)
self.up4 = up_conv_down(512,256,kernel_size=17,padding=8)
self.up4_conv = _conv_block(512,256,kernel_size=17,padding=8)
self.up3 = up_conv(256,128,kernel_size=17,padding=8)
self.up3_conv = _conv_block(256,128,kernel_size=17,padding=8)
self.up2 = up_conv(128,64,kernel_size=17,padding=8)
self.up2_conv = _conv_block(128,64,kernel_size=17,padding=8)
self.up1 = up_conv(64, 32, kernel_size=17, padding=8)
self.up1_conv = _conv_block(64, 32, kernel_size=17, padding=8)
self.detection = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=16, kernel_size=1),
nn.Dropout(0.7),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self,x):
x = self.norm(x)
x1 = self.feature1(x)
x2 = self.maxpool1(x1) #500
x2 = self.feature2(x2)
x3 = self.maxpool2(x2) #250
x3 = self.feature3(x3)
x4 = self.maxpool3(x3) #125
x4 = self.feature4(x4)
x5 = self.maxpool4(x4) #63
x5 = self.drop(self.feature5(x5))
d5 = self.up4(x5) #125
d5 = torch.cat([x4,d5], dim=1) #125 通道数相加
d5 = self.up4_conv(d5)
d4 = self.up3(d5) #250
d4 = torch.cat([x3,d4], dim=1) #250
d4 = self.up3_conv(d4)
d3 = self.up2(d4) #500
d3 = torch.cat([x2,d3], dim=1) #500
d3 = self.up2_conv(d3)
d2 = self.up1(d3) #1000
d2 = torch.cat([x1,d2], dim=1) #1000
d2 = self.up1_conv(d2)
result = self.detection(d2)
return result
class paper_Unet(nn.Module):
def __init__(self):
super(paper_Unet, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature1 = _conv_block(1,16,kernel_size=9,padding=4)
self.maxpool1 = nn.MaxPool1d(kernel_size=2) #500
self.feature2 = _conv_block(16,32,kernel_size=9,padding=4)
self.maxpool2 = nn.MaxPool1d(kernel_size=2) #250
self.feature3 = _conv_block(32,64,kernel_size=9,padding=4)
self.maxpool3 = nn.MaxPool1d(kernel_size=2) #125
self.feature4 = _conv_block(64,128,kernel_size=9,padding=4) #125
self.maxpool4 = nn.MaxPool1d(kernel_size=2,padding=1) #62
self.feature5 = _conv_block(128, 256, kernel_size=9, padding=4)
self.drop = nn.Dropout(0.5)
self.up4 = up_conv_down(256,128,kernel_size=9,padding=4)
self.up4_conv = _conv_block(256,128,kernel_size=9,padding=4)
self.up3 = up_conv(128,64,kernel_size=9,padding=4)
self.up3_conv = _conv_block(128,64,kernel_size=9,padding=4)
self.up2 = up_conv(64,32,kernel_size=9,padding=4)
self.up2_conv = _conv_block(64,32,kernel_size=9,padding=4)
self.up1 = up_conv(32, 16, kernel_size=9,padding=4)
self.up1_conv = _conv_block(32, 16,kernel_size=9,padding=4)
self.detection = nn.Sequential(
nn.Conv1d(in_channels=16, out_channels=16, kernel_size=1),
nn.Dropout(0.7),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self,x):
x = self.norm(x)
x1 = self.feature1(x)
x2 = self.maxpool1(x1) #500
x2 = self.feature2(x2)
x3 = self.maxpool2(x2) #250
x3 = self.feature3(x3)
x4 = self.maxpool3(x3) #125
x4 = self.feature4(x4)
x5 = self.maxpool4(x4) #63
x5 = self.drop(self.feature5(x5))
d5 = self.up4(x5) #125
d5 = torch.cat([x4,d5], dim=1) #125 通道数相加
d5 = self.up4_conv(d5)
d4 = self.up3(d5) #250
d4 = torch.cat([x3,d4], dim=1) #250
d4 = self.up3_conv(d4)
d3 = self.up2(d4) #500
d3 = torch.cat([x2,d3], dim=1) #500
d3 = self.up2_conv(d3)
d2 = self.up1(d3) #1000
d2 = torch.cat([x1,d2], dim=1) #1000
d2 = self.up1_conv(d2)
result = self.detection(d2)
return result
class Threelayer_Unet(nn.Module):
def __init__(self):
super(Threelayer_Unet, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature1 = _conv_block(1,32,kernel_size=17,padding=8)
self.maxpool1 = nn.MaxPool1d(kernel_size=2) #500
self.feature2 = _conv_block(32,64,kernel_size=17,padding=8)
self.maxpool2 = nn.MaxPool1d(kernel_size=2) #250
self.feature3 = _conv_block(64,128,kernel_size=17,padding=8)
self.maxpool3 = nn.MaxPool1d(kernel_size=2) #125
self.feature4 = _conv_block(128, 256, kernel_size=17, padding=8)
self.drop = nn.Dropout(0.5)
self.up3 = up_conv(256,128,kernel_size=17,padding=8)
self.up3_conv = _conv_block(256,128,kernel_size=17,padding=8)
self.up2 = up_conv(128,64,kernel_size=17,padding=8)
self.up2_conv = _conv_block(128,64,kernel_size=17,padding=8)
self.up1 = up_conv(64, 32, kernel_size=17, padding=8)
self.up1_conv = _conv_block(64, 32, kernel_size=17, padding=8)
self.detection = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=16, kernel_size=1),
nn.Dropout(0.7),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self,x):
x = self.norm(x)
x1 = self.feature1(x)
x2 = self.maxpool1(x1) #500
x2 = self.feature2(x2)
x3 = self.maxpool2(x2) #250
x3 = self.feature3(x3)
x4 = self.maxpool3(x3) #125
x4 = self.drop(self.feature4(x4))
d4 = self.up3(x4) #250
d4 = torch.cat([x3,d4], dim=1) #250
d4 = self.up3_conv(d4)
d3 = self.up2(d4) #500
d3 = torch.cat([x2,d3], dim=1) #500
d3 = self.up2_conv(d3)
d2 = self.up1(d3) #1000
d2 = torch.cat([x1,d2], dim=1) #1000
d2 = self.up1_conv(d2)
result = self.detection(d2)
return result
class Fivelayer_Unet(nn.Module):
def __init__(self):
super(Fivelayer_Unet, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature1 = _conv_block(1,32,kernel_size=17,padding=8)
self.maxpool1 = nn.MaxPool1d(kernel_size=2) #500
self.feature2 = _conv_block(32,64,kernel_size=17,padding=8) #500
self.maxpool2 = nn.MaxPool1d(kernel_size=2) #250
self.feature3 = _conv_block(64,128,kernel_size=17,padding=8) #250
self.maxpool3 = nn.MaxPool1d(kernel_size=2) #125
self.feature4 = _conv_block(128,256,kernel_size=17,padding=8) #125
self.maxpool4 = nn.MaxPool1d(kernel_size=2) #62 125-1/2 = 62
self.feature5 = _conv_block(256,512,kernel_size=17,padding=8) #62 512通道
self.maxpool5 = nn.MaxPool1d(kernel_size=2) #31*512
self.feature6 = _conv_block(512, 1024, kernel_size=17, padding=8) #最底层的一个卷积块 31*1024
self.drop = nn.Dropout(0.5)
self.up5 = up_conv(1024,512,kernel_size=17,padding=8) #62*512
# 跨层连接
self.up5_conv = _conv_block(1024,512,kernel_size=17,padding=8) #连接之后的卷积层 64*512
self.up4 = up_conv_down(512,256,kernel_size=17,padding=8) #最底层的一个上采用变为125个点 125*256
# 跨层连接
self.up4_conv = _conv_block(512,256,kernel_size=17,padding=8)
self.up3 = up_conv(256,128,kernel_size=17,padding=8) #250*128
# 跨层连接
self.up3_conv = _conv_block(256,128,kernel_size=17,padding=8) #250*128
self.up2 = up_conv(128,64,kernel_size=17,padding=8) #500*64
# 跨层连接
self.up2_conv = _conv_block(128,64,kernel_size=17,padding=8)
self.up1 = up_conv(64, 32, kernel_size=17, padding=8) ##每一个up之后都会跟一个跨层连接操作的
# 跨层连接
self.up1_conv = _conv_block(64, 32, kernel_size=17, padding=8) #最后一个卷积层
self.detection = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=16, kernel_size=1),
nn.Dropout(0.5),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self,x):
x = self.norm(x)
x1 = self.feature1(x)
x2 = self.maxpool1(x1) #500
x2 = self.feature2(x2)
x3 = self.maxpool2(x2) #250
x3 = self.feature3(x3)
x4 = self.maxpool3(x3) #125
x4 = self.feature4(x4)
x5 = self.maxpool4(x4) #125
x5 = self.feature5(x5)
x6 = self.maxpool5(x5) #62
x6 = self.drop(self.feature6(x6))
d6 = self.up5(x6) #64*512
d6 = torch.cat([x5,d6], dim=1) #64*1024 通道数相加
d6 = self.up5_conv(d6) #64*512
d5 = self.up4(d6) #125*256
d5 = torch.cat([x4,d5], dim=1) #125*512
d5 = self.up4_conv(d5) #125*256
d4 = self.up3(d5) #250*128
d4 = torch.cat([x3,d4], dim=1) #250*256
d4 = self.up3_conv(d4) #250*128
d3 = self.up2(d4) #500*64
d3 = torch.cat([x2,d3], dim=1) #500*128
d3 = self.up2_conv(d3) #500*64
d2 = self.up1(d3) #1000*32
d2 = torch.cat([x1,d2], dim=1) #1000*64
d2 = self.up1_conv(d2) #1000*32
result = self.detection(d2)
return result
class imp_Fivelayer_Unet(nn.Module):
def __init__(self):
super(imp_Fivelayer_Unet, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature1 = _conv_block(1,32,kernel_size=21,padding=10)
self.maxpool1 = nn.MaxPool1d(kernel_size=2) #500
self.feature2 = _conv_block(32,64,kernel_size=17,padding=8) #500
self.maxpool2 = nn.MaxPool1d(kernel_size=2) #250
self.feature3 = _conv_block(64,128,kernel_size=13,padding=6) #250
self.maxpool3 = nn.MaxPool1d(kernel_size=2) #125
self.feature4 = _conv_block(128,256,kernel_size=9,padding=4) #125
self.maxpool4 = nn.MaxPool1d(kernel_size=2) #62 125-1/2 = 62
self.feature5 = _conv_block(256,512,kernel_size=5,padding=2) #62 512通道
self.maxpool5 = nn.MaxPool1d(kernel_size=2) #31*512
self.feature6 = _conv_block(512, 1024, kernel_size=3, padding=1) #最底层的一个卷积块 31*1024
self.drop = nn.Dropout(0.5)
self.up5 = up_conv(1024,512,kernel_size=5,padding=2) #62*512
# 跨层连接
self.up5_conv = _conv_block(1024,512,kernel_size=5,padding=2) #连接之后的卷积层 64*512
self.up4 = up_conv_down(512,256,kernel_size=9,padding=4) #最底层的一个上采用变为125个点 125*256
# 跨层连接
self.up4_conv = _conv_block(512,256,kernel_size=9,padding=4)
self.up3 = up_conv(256,128,kernel_size=13,padding=6) #250*128
# 跨层连接
self.up3_conv = _conv_block(256,128,kernel_size=13,padding=6) #250*128
self.up2 = up_conv(128,64,kernel_size=17,padding=8) #500*64
# 跨层连接
self.up2_conv = _conv_block(128,64,kernel_size=17,padding=8)
self.up1 = up_conv(64, 32, kernel_size=21, padding=10)
self.up1_conv = _conv_block(64, 32, kernel_size=15, padding=7) #最后一个卷积层
self.detection = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=16, kernel_size=1),
nn.Dropout(0.5),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self,x):
x = self.norm(x)
x1 = self.feature1(x)
x2 = self.maxpool1(x1) #500
x2 = self.feature2(x2)
x3 = self.maxpool2(x2) #250
x3 = self.feature3(x3)
x4 = self.maxpool3(x3) #125
x4 = self.feature4(x4)
x5 = self.maxpool4(x4) #125
x5 = self.feature5(x5)
x6 = self.maxpool5(x5) #62
x6 = self.drop(self.feature6(x6))
d6 = self.up5(x6) #64*512
d6 = torch.cat([x5,d6], dim=1) #64*1024 通道数相加
d6 = self.up5_conv(d6) #64*512
d5 = self.up4(d6) #125*256
d5 = torch.cat([x4,d5], dim=1) #125*512
d5 = self.up4_conv(d5) #125*256
d4 = self.up3(d5) #250*128
d4 = torch.cat([x3,d4], dim=1) #250*256
d4 = self.up3_conv(d4) #250*128
d3 = self.up2(d4) #500*64
d3 = torch.cat([x2,d3], dim=1) #500*128
d3 = self.up2_conv(d3) #500*64
d2 = self.up1(d3) #1000*32
d2 = torch.cat([x1,d2], dim=1) #1000*64
d2 = self.up1_conv(d2) #1000*32
result = self.detection(d2)
return result
# class Unetplusplus(nn.Module):
# def __init__(self, c_in=1, nf=[16, 32, 64, 128]):
# super(Unetplusplus, self).__init__()
# self.conv1_1 = _conv_block(c_in, nf[0],kernel_size=17,padding=8)
# self.pool1 = nn.MaxPool1d(2, stride=2)
# self.conv2_1 = _conv_block(nf[0], nf[1],kernel_size=17,padding=8)
# self.pool2 = nn.MaxPool1d(2, stride=2)
# self.up1_2 = nn.Upsample(scale_factor=2, mode='linear', align_corners=True)
# # self.up1_2 = nn.ConvTranspose1d(nf[1], nf[1], kernel_size=2, stride=2)
# self.conv_up1_2 = nn.Conv1d(nf[1], nf[0], kernel_size=3, stride=1, padding=1)
# self.relu1_2 = nn.ReLU()
# self.conv1_2 = _conv_block(nf[1], nf[0],kernel_size=17,padding=8)
#
# self.conv3_1 = _conv_block(nf[1], nf[2],kernel_size=17,padding=8)
# self.pool3 = nn.MaxPool1d(2, stride=2)
# self.up2_2 = nn.Upsample(scale_factor=2, mode='linear', align_corners=True)
# # self.up2_2 = nn.ConvTranspose1d(nf[2], nf[2], kernel_size=2, stride=2)
# self.conv_up2_2 = nn.Conv1d(nf[2], nf[1], kernel_size=3, stride=1, padding=1)
# self.relu2_2 = nn.ReLU()
# self.conv2_2 = _conv_block(nf[2], nf[1],kernel_size=17,padding=8)
# self.up1_3 = nn.Upsample(scale_factor=2, mode='linear', align_corners=True)
# # self.up1_3 = nn.ConvTranspose1d(nf[1], nf[1], kernel_size=2, stride=2)
# self.conv_up1_3 = nn.Conv1d(nf[1], nf[1], kernel_size=3, stride=1, padding=1)
# self.relu1_3 = nn.ReLU()
# self.conv1_3 = _conv_block(nf[2], nf[0],kernel_size=17,padding=8)
#
# self.conv4_1 = _conv_block(nf[2], nf[3],kernel_size=17,padding=8)
# self.up3_2 = nn.Upsample(scale_factor=2, mode='linear', align_corners=True)
# # self.up3_2 = nn.ConvTranspose1d(nf[3], nf[3], kernel_size=2, stride=2)
# self.conv_up3_2 = nn.Conv1d(nf[3], nf[2], kernel_size=3, stride=1, padding=1)
# self.relu3_2 = nn.ReLU()
# self.conv3_2 = _conv_block(nf[3], nf[2],kernel_size=17,padding=8)
# self.up2_3 = nn.Upsample(scale_factor=2, mode='linear', align_corners=True)
# # self.up2_3 = nn.ConvTranspose1d(nf[2], nf[2], kernel_size=2, stride=2)
# self.conv_up2_3 = nn.Conv1d(nf[2], nf[1], kernel_size=3, stride=1, padding=1)
# self.relu2_3 = nn.ReLU()
# self.conv2_3 = _conv_block(nf[1] * 3, nf[1],kernel_size=17,padding=8)
# self.up1_4 = nn.Upsample(scale_factor=2, mode='linear', align_corners=True)
# # self.up1_4 = nn.ConvTranspose1d(nf[1], nf[1], kernel_size=2, stride=2)
# self.conv_up1_4 = nn.Conv1d(nf[1], nf[0], kernel_size=3, stride=1, padding=1)
# self.relu1_4 = nn.ReLU()
# self.conv1_4 = _conv_block(nf[2], nf[1],kernel_size=17,padding=8)
#
# self.output_conv = nn.Conv1d(nf[1], 32, kernel_size=1, stride=1)
#
# self.detection = nn.Sequential(
# nn.Conv1d(in_channels=32, out_channels=16, kernel_size=1),
# nn.Dropout(0.5),
# nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
# )
#
# def forward(self, x):
# conv1_1 = self.conv1_1(x)
# # print("conv1_1",conv1_1.shape)
# pool1 = self.pool1(conv1_1)
# # print("pool1", pool1.shape)
# conv2_1 = self.conv2_1(pool1)
# # print("conv2_1", conv2_1.shape)
# pool2 = self.pool2(conv2_1)
# # print("pool2", pool2.shape)
# up1_2 = self.relu1_2(self.conv_up1_2(self.up1_2(conv2_1))) # 12.9上采样没有起作用,导致维度不对
# # print("up1_2", up1_2.shape)
# conv1_2 = torch.cat([up1_2, conv1_1], dim=1)
# # print("conv1_2", conv1_2.shape)
# conv1_2 = self.conv1_2(conv1_2)
# conv3_1 = self.conv3_1(pool2)
# pool3 = self.pool3(conv3_1)
# # print("pool3", pool3.shape)
# up2_2 = self.relu2_2(self.conv_up2_2(self.up2_2(conv3_1)))
# conv2_2 = torch.cat([up2_2, conv2_1], dim=1)
# conv2_2 = self.conv2_2(conv2_2)
# up1_3 = self.relu1_3(self.conv_up1_3(self.up1_3(conv2_2)))
# conv1_3 = torch.cat([up1_3, conv1_1, conv1_2], dim=1)
# conv1_3 = self.conv1_3(conv1_3)
# conv4_1 = self.conv4_1(pool3)
# # print("conv4_1", conv4_1.shape)
# up3_2 = self.relu3_2(self.conv_up3_2(self.up3_2(conv4_1)))
# # print("up3_2", up3_2.shape)
# # print("conv3_1", conv3_1.shape)
# conv3_2 = torch.cat([up3_2, conv3_1], dim=1)
# conv3_2 = self.conv3_2(conv3_2)
# up2_3 = self.relu2_3(self.conv_up2_3(self.up2_3(conv3_2)))
# conv2_3 = torch.cat([up2_3, conv2_1, conv2_2], dim=1)
# conv2_3 = self.conv2_3(conv2_3)
# up1_4 = self.relu1_4(self.conv_up1_4(self.up1_4(conv2_3)))
# conv1_4 = torch.cat([up1_4, conv1_1, conv1_2, conv1_3], dim=1)
# conv1_4 = self.conv1_4(conv1_4)
# output_conv = self.output_conv(conv1_4)
# # output_conv = output_conv.reshape(-1, output_conv.shape[1] * output_conv.shape[2])
#
# result = self.detection(output_conv)
# return result
class Unetplusplus(nn.Module):
def __init__(self, c_in=1, nf=[16, 32, 64, 128]):
super(Unetplusplus, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.conv1_1 = _conv_block(c_in, nf[0], kernel_size=17, padding=8)
self.pool1 = nn.MaxPool1d(2, stride=2)
self.conv2_1 = _conv_block(nf[0], nf[1], kernel_size=17, padding=8)
self.pool2 = nn.MaxPool1d(2, stride=2)
self.up1_2 = up_conv(nf[1], nf[0],kernel_size=13,padding=6)
self.conv1_2 = _conv_block(nf[1], nf[0], kernel_size=17, padding=8)
self.conv3_1 = _conv_block(nf[1], nf[2], kernel_size=17, padding=8)
self.pool3 = nn.MaxPool1d(2, stride=2)
self.up2_2 = up_conv(nf[2], nf[1], kernel_size=13, padding=6)
self.conv2_2 = _conv_block(nf[2], nf[1], kernel_size=17, padding=8)
self.up1_3 = up_conv(nf[1], nf[0], kernel_size=13, padding=6)
#跨层连接操作
self.conv1_3 = _conv_block(nf[0]*3, nf[0], kernel_size=17, padding=8)
self.conv4_1 = _conv_block(nf[2], nf[3], kernel_size=17, padding=8)
self.up3_2 = up_conv(nf[3], nf[2], kernel_size=13, padding=6) ##输入通道与上一个输出通道一样,输出通道是输入通道的两倍
self.conv3_2 = _conv_block(nf[3], nf[2], kernel_size=17, padding=8)
self.up2_3 = up_conv(nf[2], nf[1], kernel_size=13, padding=6)
self.conv2_3 = _conv_block(nf[1] * 3, nf[1], kernel_size=17, padding=8)
self.up1_4 = up_conv(nf[1], nf[0], kernel_size=13, padding=6)
self.conv1_4 = _conv_block(nf[2], nf[1], kernel_size=17, padding=8)
self.output_conv = nn.Conv1d(nf[1], 32, kernel_size=1, stride=1)
self.detection = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=16, kernel_size=1),
nn.Dropout(0.5),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self, x):
x = self.norm(x)
conv1_1 = self.conv1_1(x)
# print("conv1_1",conv1_1.shape)
pool1 = self.pool1(conv1_1)
# print("pool1", pool1.shape)
conv2_1 = self.conv2_1(pool1)
# print("conv2_1", conv2_1.shape)
pool2 = self.pool2(conv2_1)
# print("pool2", pool2.shape)
up1_2 = self.up1_2(conv2_1) # 12.9上采样没有起作用,导致维度不对
# print("up1_2", up1_2.shape)
conv1_2 = torch.cat([up1_2, conv1_1], dim=1)
# print("conv1_2", conv1_2.shape)
conv1_2 = self.conv1_2(conv1_2)
conv3_1 = self.conv3_1(pool2)
pool3 = self.pool3(conv3_1)
# print("pool3", pool3.shape)
up2_2 = self.up2_2(conv3_1)
conv2_2 = torch.cat([up2_2, conv2_1], dim=1)
conv2_2 = self.conv2_2(conv2_2)
up1_3 = self.up1_3(conv2_2)
conv1_3 = torch.cat([up1_3, conv1_1, conv1_2], dim=1)
conv1_3 = self.conv1_3(conv1_3)
conv4_1 = self.conv4_1(pool3)
# print("conv4_1", conv4_1.shape)
up3_2 = self.up3_2(conv4_1)
# print("up3_2", up3_2.shape)
# print("conv3_1", conv3_1.shape)
conv3_2 = torch.cat([up3_2, conv3_1], dim=1)
conv3_2 = self.conv3_2(conv3_2)
up2_3 = self.up2_3(conv3_2)
conv2_3 = torch.cat([up2_3, conv2_1, conv2_2], dim=1)
conv2_3 = self.conv2_3(conv2_3)
up1_4 = self.up1_4(conv2_3)
conv1_4 = torch.cat([up1_4, conv1_1, conv1_2, conv1_3], dim=1)
conv1_4 = self.conv1_4(conv1_4)
output_conv = self.output_conv(conv1_4)
# output_conv = output_conv.reshape(-1, output_conv.shape[1] * output_conv.shape[2])
result = self.detection(output_conv)
return result
class Fourlayer_Unetplusplus(nn.Module):
def __init__(self, c_in=1, nf=[16, 32, 64, 128,256]):
super(Fourlayer_Unetplusplus, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.conv1_1 = _conv_block(c_in, nf[0], kernel_size=21, padding=10)
self.pool1 = nn.MaxPool1d(2, stride=2)
self.conv2_1 = _conv_block(nf[0], nf[1], kernel_size=17, padding=8)
self.pool2 = nn.MaxPool1d(2, stride=2)
self.up1_2 = up_conv(nf[1], nf[0], kernel_size=17, padding=8)
self.conv1_2 = _conv_block(nf[1], nf[0], kernel_size=21, padding=10)
###########################################################################
self.conv3_1 = _conv_block(nf[1], nf[2], kernel_size=13, padding=6)
self.pool3 = nn.MaxPool1d(2, stride=2)
self.up2_2 = up_conv(nf[2], nf[1], kernel_size=13, padding=6)
self.conv2_2 = _conv_block(nf[2], nf[1], kernel_size=17, padding=8)
self.up1_3 = up_conv(nf[1], nf[0], kernel_size=17, padding=8)
# 跨层连接操作
self.conv1_3 = _conv_block(nf[0] * 3, nf[0], kernel_size=21, padding=10)
###################################################################
self.conv4_1 = _conv_block(nf[2], nf[3], kernel_size=9, padding=4)
self.up3_2 = up_conv(nf[3], nf[2], kernel_size=9, padding=4) ##输入通道与上一个输出通道一样,输出通道是输入通道的两倍
self.conv3_2 = _conv_block(nf[3], nf[2], kernel_size=13, padding=6)
self.up2_3 = up_conv(nf[2], nf[1], kernel_size=13, padding=6)
self.conv2_3 = _conv_block(nf[1] * 3, nf[1], kernel_size=17, padding=8)
self.up1_4 = up_conv(nf[1], nf[0], kernel_size=17, padding=8)
self.conv1_4 = _conv_block(nf[2], nf[1], kernel_size=21, padding=10)
##########################################################################
####################再加深一层##############################################
self.pool4 = nn.MaxPool1d(2, stride=2)
self.conv5_1 = _conv_block(nf[3], nf[4], kernel_size=5, padding=2)
self.up4_2 = up_conv_down(nf[4], nf[3], kernel_size=5, padding=2)
self.conv4_2 = _conv_block(nf[4], nf[3], kernel_size=9, padding=4)
self.up3_3 = up_conv(nf[3], nf[2], kernel_size=9, padding=4)
self.conv3_3 = _conv_block(nf[2]*3, nf[2], kernel_size=13, padding=6)
self.up2_4 = up_conv(nf[2], nf[1], kernel_size=13, padding=6)
self.conv2_4= _conv_block(nf[3], nf[2], kernel_size=17, padding=8) #128->64
self.up1_5 = up_conv(nf[2], nf[1], kernel_size=17, padding=8)
self.conv1_5 = _conv_block(nf[2]+nf[0]*3, nf[2], kernel_size=17, padding=8)
####################################################################
self.output_conv = nn.Conv1d(nf[2], 32, kernel_size=1, stride=1)
self.detection = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=16, kernel_size=1),
nn.Dropout(0.5),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self, x):
x = self.norm(x)
conv1_1 = self.conv1_1(x)
# print("conv1_1",conv1_1.shape)
pool1 = self.pool1(conv1_1)
# print("pool1", pool1.shape)
conv2_1 = self.conv2_1(pool1)
# print("conv2_1", conv2_1.shape)
pool2 = self.pool2(conv2_1)
# print("pool2", pool2.shape)
up1_2 = self.up1_2(conv2_1) # 12.9上采样没有起作用,导致维度不对
# print("up1_2", up1_2.shape)
conv1_2 = torch.cat([up1_2, conv1_1], dim=1)
# print("conv1_2", conv1_2.shape)
conv1_2 = self.conv1_2(conv1_2)
conv3_1 = self.conv3_1(pool2)
pool3 = self.pool3(conv3_1)
# print("pool3", pool3.shape)
up2_2 = self.up2_2(conv3_1)
conv2_2 = torch.cat([up2_2, conv2_1], dim=1)
conv2_2 = self.conv2_2(conv2_2)
up1_3 = self.up1_3(conv2_2)
conv1_3 = torch.cat([up1_3, conv1_1, conv1_2], dim=1)
conv1_3 = self.conv1_3(conv1_3)
conv4_1 = self.conv4_1(pool3)
# print("conv4_1", conv4_1.shape)
up3_2 = self.up3_2(conv4_1)
# print("up3_2", up3_2.shape)
# print("conv3_1", conv3_1.shape)
conv3_2 = torch.cat([up3_2, conv3_1], dim=1)
conv3_2 = self.conv3_2(conv3_2)
up2_3 = self.up2_3(conv3_2)
conv2_3 = torch.cat([up2_3, conv2_1, conv2_2], dim=1)
conv2_3 = self.conv2_3(conv2_3)
up1_4 = self.up1_4(conv2_3)
conv1_4 = torch.cat([up1_4, conv1_1, conv1_2, conv1_3], dim=1)
conv1_4 = self.conv1_4(conv1_4)
pool4 = self.pool4(conv4_1)
conv5_1 = self.conv5_1(pool4)
up4_2 = self.up4_2(conv5_1)
conv4_2 = torch.cat([up4_2, conv4_1], dim=1)
conv4_2 = self.conv4_2(conv4_2)
up3_3 = self.up3_3(conv4_2)
conv3_3 = torch.cat([up3_3, conv3_1, conv3_2], dim=1)
conv3_3 = self.conv3_3(conv3_3)
up2_4 = self.up2_4(conv3_3)
conv2_4 = torch.cat([up2_4, conv2_1, conv2_2, conv2_3], dim=1)
conv2_4 = self.conv2_4(conv2_4)
up1_5 = self.up1_5(conv2_4)
conv1_5 = torch.cat([up1_5, conv1_1, conv1_2, conv1_3,conv1_4], dim=1)
conv1_5 = self.conv1_5(conv1_5)
output_conv = self.output_conv(conv1_5)
# output_conv = output_conv.reshape(-1, output_conv.shape[1] * output_conv.shape[2])
result = self.detection(output_conv)
return result
class Fivelayer_Lstm_Unet(nn.Module):
def __init__(self):
super(Fivelayer_Lstm_Unet, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature1 = _conv_block(1,32,kernel_size=17,padding=8)
self.maxpool1 = nn.MaxPool1d(kernel_size=2) #500
self.feature2 = _conv_block(32,64,kernel_size=17,padding=8) #500
self.maxpool2 = nn.MaxPool1d(kernel_size=2) #250
self.feature3 = _conv_block(64,128,kernel_size=17,padding=8) #250
self.maxpool3 = nn.MaxPool1d(kernel_size=2) #125
self.feature4 = _conv_block(128,256,kernel_size=17,padding=8) #125
self.maxpool4 = nn.MaxPool1d(kernel_size=2) #62 125-1/2 = 62
self.feature5 = _conv_block(256,512,kernel_size=13,padding=6) #62 512通道
self.maxpool5 = nn.MaxPool1d(kernel_size=2) #31*512
self.feature6 = _conv_block(512, 1024, kernel_size=9, padding=4) #最底层的一个卷积块 31*1024
self.feature6_median = nn.LSTM(input_size=1024, hidden_size=1024,bidirectional=True,batch_first=True)
self.feature6_last = _conv_block(2048,1024,kernel_size=9,padding=4)
self.drop = nn.Dropout(0.5)
self.up5 = up_conv(1024,512,kernel_size=17,padding=8) #62*512
# 跨层连接
self.up5_conv = _conv_block(1024,512,kernel_size=17,padding=8) #连接之后的卷积层 64*512
self.up4 = up_conv_down(512,256,kernel_size=17,padding=8) #最底层的一个上采用变为125个点 125*256
# 跨层连接
self.up4_conv = _conv_block(512,256,kernel_size=17,padding=8)
self.up3 = up_conv(256,128,kernel_size=17,padding=8) #250*128
# 跨层连接
self.up3_conv = _conv_block(256,128,kernel_size=17,padding=8) #250*128
self.up2 = up_conv(128,64,kernel_size=17,padding=8) #500*64
# 跨层连接
self.up2_conv = _conv_block(128,64,kernel_size=17,padding=8)
self.up1 = up_conv(64, 32, kernel_size=17, padding=8)
self.up1_conv = _conv_block(64, 32, kernel_size=17, padding=8) #最后一个卷积层
self.detection = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=16, kernel_size=1),
nn.Dropout(0.5),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self,x):
x = self.norm(x)
x1 = self.feature1(x)
x2 = self.maxpool1(x1) #500
x2 = self.feature2(x2)
x3 = self.maxpool2(x2) #250
x3 = self.feature3(x3)
x4 = self.maxpool3(x3) #125
x4 = self.feature4(x4)
x5 = self.maxpool4(x4) #125
x5 = self.feature5(x5)
x6 = self.maxpool5(x5) #62
x6 = self.feature6(x6)
x6,(hh,hc) = self.feature6_median(x6.permute(0,2,1)) #改变维度
x6 = self.feature6_last(x6.permute((0,2,1)))
x6 = self.drop(x6)
d6 = self.up5(x6) #64*512
d6 = torch.cat([x5,d6], dim=1) #64*1024 通道数相加
d6 = self.up5_conv(d6) #64*512
d5 = self.up4(d6) #125*256
d5 = torch.cat([x4,d5], dim=1) #125*512
d5 = self.up4_conv(d5) #125*256
d4 = self.up3(d5) #250*128
d4 = torch.cat([x3,d4], dim=1) #250*256
d4 = self.up3_conv(d4) #250*128
d3 = self.up2(d4) #500*64
d3 = torch.cat([x2,d3], dim=1) #500*128
d3 = self.up2_conv(d3) #500*64
d2 = self.up1(d3) #1000*32
d2 = torch.cat([x1,d2], dim=1) #1000*64
d2 = self.up1_conv(d2) #1000*32
result = self.detection(d2)
return result
class Fourlayer_Lstm_Unet(nn.Module):
def __init__(self):
super(Fourlayer_Lstm_Unet, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature1 = _conv_block(1,32,kernel_size=17,padding=8)
self.maxpool1 = nn.MaxPool1d(kernel_size=2) #500
self.feature2 = _conv_block(32,64,kernel_size=17,padding=8) #500
self.maxpool2 = nn.MaxPool1d(kernel_size=2) #250
self.feature3 = _conv_block(64,128,kernel_size=17,padding=8) #250
self.maxpool3 = nn.MaxPool1d(kernel_size=2) #125
self.feature4 = _conv_block(128,256,kernel_size=17,padding=8) #125
self.maxpool4 = nn.MaxPool1d(kernel_size=2) #62 125-1/2 = 62
self.feature5 = _conv_block(256, 512, kernel_size=9, padding=4) #最底层的一个卷积块 62*512
self.feature5_median = nn.LSTM(input_size=512, hidden_size=512,bidirectional=True,batch_first=True)
self.feature5_last = _conv_block(1024,512,kernel_size=9,padding=4)
self.drop = nn.Dropout(0.5)
self.up4 = up_conv_down(512,256,kernel_size=17,padding=8) #最底层的一个上采用变为125个点 125*256
# 跨层连接
self.up4_conv = _conv_block(512,256,kernel_size=17,padding=8)
self.up3 = up_conv(256,128,kernel_size=17,padding=8) #250*128
# 跨层连接
self.up3_conv = _conv_block(256,128,kernel_size=17,padding=8) #250*128
self.up2 = up_conv(128,64,kernel_size=17,padding=8) #500*64
# 跨层连接
self.up2_conv = _conv_block(128,64,kernel_size=17,padding=8)
self.up1 = up_conv(64, 32, kernel_size=17, padding=8)
self.up1_conv = _conv_block(64, 32, kernel_size=17, padding=8) #最后一个卷积层
self.detection = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=16, kernel_size=1),
nn.Dropout(0.5),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self,x):
x = self.norm(x)
x1 = self.feature1(x)
x2 = self.maxpool1(x1) #500
x2 = self.feature2(x2)
x3 = self.maxpool2(x2) #250
x3 = self.feature3(x3)
x4 = self.maxpool3(x3) #125
x4 = self.feature4(x4)
x5 = self.maxpool4(x4) #125
x6 = self.feature5(x5)
x6,(hh,hc) = self.feature5_median(x6.permute(0,2,1)) #改变维度
x6 = self.feature5_last(x6.permute((0,2,1)))
x6 = self.drop(x6)
d5 = self.up4(x6) #125*256
d5 = torch.cat([x4,d5], dim=1) #125*512
d5 = self.up4_conv(d5) #125*256
d4 = self.up3(d5) #250*128
d4 = torch.cat([x3,d4], dim=1) #250*256
d4 = self.up3_conv(d4) #250*128
d3 = self.up2(d4) #500*64
d3 = torch.cat([x2,d3], dim=1) #500*128
d3 = self.up2_conv(d3) #500*64
d2 = self.up1(d3) #1000*32
d2 = torch.cat([x1,d2], dim=1) #1000*64
d2 = self.up1_conv(d2) #1000*32
result = self.detection(d2)
return result
class Fourlayer_LUnet(nn.Module):
def __init__(self):
super(Fourlayer_LUnet, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature1 = _conv_block(1,32,kernel_size=17,padding=8)
self.maxpool1 = nn.MaxPool1d(kernel_size=2) #500
self.feature2 = _conv_block(32,64,kernel_size=17,padding=8) #500
self.maxpool2 = nn.MaxPool1d(kernel_size=2) #250
self.feature3 = _conv_block(64,128,kernel_size=17,padding=8) #250
self.maxpool3 = nn.MaxPool1d(kernel_size=2) #125
self.feature3_median = nn.LSTM(input_size=128, hidden_size=128,bidirectional=True,batch_first=True)
self.feature3_last = _conv_block(256,128,kernel_size=17,padding=8)
self.feature4 = _conv_block(128,256,kernel_size=17,padding=8) #125
self.maxpool4 = nn.MaxPool1d(kernel_size=2) #62 125-1/2 = 62
self.feature4_median = nn.LSTM(input_size=256, hidden_size=256,bidirectional=True,batch_first=True)
self.feature4_last = _conv_block(512,256,kernel_size=17,padding=8)
self.feature5 = _conv_block(256, 512, kernel_size=9, padding=4) #最底层的一个卷积块 62*512
self.feature5_median = nn.LSTM(input_size=512, hidden_size=512,bidirectional=True,batch_first=True)
self.feature5_last = _conv_block(1024,512,kernel_size=9,padding=4)
self.drop = nn.Dropout(0.5)
self.up4 = up_conv_down(512,256,kernel_size=17,padding=8) #最底层的一个上采用变为125个点 125*256
# 跨层连接
self.up4_conv = _conv_block(512,256,kernel_size=17,padding=8)
self.up3 = up_conv(256,128,kernel_size=17,padding=8) #250*128
# 跨层连接
self.up3_conv = _conv_block(256,128,kernel_size=17,padding=8) #250*128
self.up2 = up_conv(128,64,kernel_size=17,padding=8) #500*64
# 跨层连接
self.up2_conv = _conv_block(128,64,kernel_size=17,padding=8)
self.up1 = up_conv(64, 32, kernel_size=17, padding=8)
self.up1_conv = _conv_block(64, 32, kernel_size=17, padding=8) #最后一个卷积层
self.detection = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=16, kernel_size=1),
nn.Dropout(0.5),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self,x):
x = self.norm(x)
x1 = self.feature1(x)
x2 = self.maxpool1(x1) #500
x2 = self.feature2(x2)
x3 = self.maxpool2(x2) #250
x3 = self.feature3(x3)
x4 = self.maxpool3(x3) #125
x4 = self.feature4(x4)
x5 = self.maxpool4(x4) #125
xx3,(hh,hc) = self.feature3_median(x3.permute(0,2,1)) #改变维度
xx3 = self.feature3_last(xx3.permute((0,2,1)))
xx4,(hh,hc) = self.feature4_median(x4.permute(0,2,1)) #改变维度
xx4 = self.feature4_last(xx4.permute((0,2,1)))
x6 = self.feature5(x5)
x6,(hh,hc) = self.feature5_median(x6.permute(0,2,1)) #改变维度
x6 = self.feature5_last(x6.permute((0,2,1)))
x6 = self.drop(x6)
d5 = self.up4(x6) #125*256
d5 = torch.cat([xx4,d5], dim=1) #125*512
d5 = self.up4_conv(d5) #125*256
d4 = self.up3(d5) #250*128
d4 = torch.cat([xx3,d4], dim=1) #250*256
d4 = self.up3_conv(d4) #250*128
d3 = self.up2(d4) #500*64
d3 = torch.cat([x2,d3], dim=1) #500*128
d3 = self.up2_conv(d3) #500*64
d2 = self.up1(d3) #1000*32
d2 = torch.cat([x1,d2], dim=1) #1000*64
d2 = self.up1_conv(d2) #1000*32
result = self.detection(d2)
return result
class Sixlayer_Unet(nn.Module):
def __init__(self):
super(Sixlayer_Unet, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature1 = _conv_block(1,16,kernel_size=13,padding=6)
self.maxpool1 = nn.MaxPool1d(kernel_size=2) #500
self.feature2 = _conv_block(16,32,kernel_size=13,padding=6) #500
self.maxpool2 = nn.MaxPool1d(kernel_size=2) #250
self.feature3 = _conv_block(32,64,kernel_size=13,padding=6) #250
self.maxpool3 = nn.MaxPool1d(kernel_size=2) #125
self.feature4 = _conv_block(64,128,kernel_size=13,padding=6) #125
self.maxpool4 = nn.MaxPool1d(kernel_size=2) #62 125-1/2 = 62
self.feature5 = _conv_block(128,256,kernel_size=13,padding=6) #62 512通道
self.maxpool5 = nn.MaxPool1d(kernel_size=2) #31*512
self.feature6 = _conv_block(256,512,kernel_size=13,padding=6) #31 1024通道
self.maxpool6 = nn.MaxPool1d(kernel_size=2,padding=1) #16*1024
self.feature7 = _conv_block(512, 1024, kernel_size=9, padding=4) #最底层的一个卷积块 31*1024
self.drop = nn.Dropout(0.5)
self.up6 = up_conv2_down(1024,512,kernel_size=9,padding=4) #31*1024
# 跨层连接
self.up6_conv = _conv_block(1024,512,kernel_size=13,padding=6) #连接之后的卷积层 64*512
self.up5 = up_conv(512,256,kernel_size=13,padding=6) #62*512
# 跨层连接
self.up5_conv = _conv_block(512,256,kernel_size=13,padding=6) #连接之后的卷积层 64*512
self.up4 = up_conv_down(256,128,kernel_size=13,padding=6) #最底层的一个上采用变为125个点 125*256
# 跨层连接
self.up4_conv = _conv_block(256,128,kernel_size=13,padding=6)
self.up3 = up_conv(128,64,kernel_size=13,padding=6) #250*128
# 跨层连接
self.up3_conv = _conv_block(128,64,kernel_size=13,padding=6) #250*128
self.up2 = up_conv(64,32,kernel_size=13,padding=6) #500*64
# 跨层连接
self.up2_conv = _conv_block(64,32,kernel_size=13,padding=6)
self.up1 = up_conv(32, 16, kernel_size=13, padding=6)
self.up1_conv = _conv_block(32, 16, kernel_size=13, padding=6) #最后一个卷积层
self.detection = nn.Sequential(
nn.Conv1d(in_channels=16, out_channels=16, kernel_size=1),
nn.Dropout(0.7),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self,x):
x = self.norm(x)
x1 = self.feature1(x)
x2 = self.maxpool1(x1) #500
x2 = self.feature2(x2)
x3 = self.maxpool2(x2) #250
x3 = self.feature3(x3)
x4 = self.maxpool3(x3) #125
x4 = self.feature4(x4)
x5 = self.maxpool4(x4) #62
x5 = self.feature5(x5)
x6 = self.maxpool5(x5) #31
x6 = self.feature6(x6)
x7 = self.maxpool6(x6) #16
x7 = self.drop(self.feature7(x7))
d7 = self.up6(x7) #31*1024
d7 = torch.cat([x6,d7], dim=1) #31*2048 通道数相加
d7 = self.up6_conv(d7) #31*1024
d6 = self.up5(d7) #62*512
d6 = torch.cat([x5,d6], dim=1) #62*1024 通道数相加
d6 = self.up5_conv(d6) #62*512
d5 = self.up4(d6) #125*256
d5 = torch.cat([x4,d5], dim=1) #125*512
d5 = self.up4_conv(d5) #125*256
d4 = self.up3(d5) #250*128
d4 = torch.cat([x3,d4], dim=1) #250*256
d4 = self.up3_conv(d4) #250*128
d3 = self.up2(d4) #500*64
d3 = torch.cat([x2,d3], dim=1) #500*128
d3 = self.up2_conv(d3) #500*64
d2 = self.up1(d3) #1000*32
d2 = torch.cat([x1,d2], dim=1) #1000*64
d2 = self.up1_conv(d2) #1000*32
result = self.detection(d2)
return result
class Sixlayer_Lstm_Unet(nn.Module):
def __init__(self):
super(Sixlayer_Lstm_Unet, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature1 = _conv_block(1,16,kernel_size=13,padding=6)
self.maxpool1 = nn.MaxPool1d(kernel_size=2) #500
self.feature2 = _conv_block(16,32,kernel_size=13,padding=6) #500
self.maxpool2 = nn.MaxPool1d(kernel_size=2) #250
self.feature3 = _conv_block(32,64,kernel_size=13,padding=6) #250
self.maxpool3 = nn.MaxPool1d(kernel_size=2) #125
self.feature4 = _conv_block(64,128,kernel_size=13,padding=6) #125
self.maxpool4 = nn.MaxPool1d(kernel_size=2) #62 125-1/2 = 62
self.feature5 = _conv_block(128,256,kernel_size=13,padding=6) #62 512通道
self.maxpool5 = nn.MaxPool1d(kernel_size=2) #31*512
self.feature6 = _conv_block(256,512,kernel_size=13,padding=6) #31 1024通道
self.maxpool6 = nn.MaxPool1d(kernel_size=2,padding=1) #16*1024
self.feature7 = _conv_block(512, 1024, kernel_size=9, padding=4) #最底层的一个卷积块 16*1024
self.feature7_median = nn.LSTM(input_size=1024, hidden_size=1024,bidirectional=True,batch_first=True)
self.feature7_last = _conv_block(2048,1024,kernel_size=9,padding=4)
self.drop = nn.Dropout(0.3)
self.up6 = up_conv2_down(1024,512,kernel_size=9,padding=4) #31*1024
# 跨层连接
self.up6_conv = _conv_block(1024,512,kernel_size=13,padding=6) #连接之后的卷积层 64*512
self.up5 = up_conv(512,256,kernel_size=13,padding=6) #62*512
# 跨层连接
self.up5_conv = _conv_block(512,256,kernel_size=13,padding=6) #连接之后的卷积层 64*512
self.up4 = up_conv_down(256,128,kernel_size=13,padding=6) #最底层的一个上采用变为125个点 125*256
# 跨层连接
self.up4_conv = _conv_block(256,128,kernel_size=13,padding=6)
self.up3 = up_conv(128,64,kernel_size=13,padding=6) #250*128
# 跨层连接
self.up3_conv = _conv_block(128,64,kernel_size=13,padding=6) #250*128
self.up2 = up_conv(64,32,kernel_size=13,padding=6) #500*64
# 跨层连接
self.up2_conv = _conv_block(64,32,kernel_size=13,padding=6)
self.up1 = up_conv(32, 16, kernel_size=13, padding=6)
self.up1_conv = _conv_block(32, 16, kernel_size=13, padding=6) #最后一个卷积层
self.detection = nn.Sequential(
nn.Conv1d(in_channels=16, out_channels=16, kernel_size=1),
nn.Dropout(0.5),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self,x):
x = self.norm(x)
x1 = self.feature1(x)
x2 = self.maxpool1(x1) #500
x2 = self.feature2(x2)
x3 = self.maxpool2(x2) #250
x3 = self.feature3(x3)
x4 = self.maxpool3(x3) #125
x4 = self.feature4(x4)
x5 = self.maxpool4(x4) #62
x5 = self.feature5(x5)
x6 = self.maxpool5(x5) #31
x6 = self.feature6(x6)
x7 = self.maxpool6(x6) #16*1024
# x7 = self.drop(self.feature7(x7))
##############################添加LSTM的地方###########################################
x7 = self.feature7(x7)
x7,(hh,hc) = self.feature7_median(x7.permute(0,2,1)) #改变维度
x7 = self.feature7_last(x7.permute((0,2,1)))
x7 = self.drop(x7)
#####################################################################################
d7 = self.up6(x7) #31*1024
d7 = torch.cat([x6,d7], dim=1) #31*2048 通道数相加
d7 = self.up6_conv(d7) #31*1024
d6 = self.up5(d7) #62*512
d6 = torch.cat([x5,d6], dim=1) #62*1024 通道数相加
d6 = self.up5_conv(d6) #62*512
d5 = self.up4(d6) #125*256
d5 = torch.cat([x4,d5], dim=1) #125*512
d5 = self.up4_conv(d5) #125*256
d4 = self.up3(d5) #250*128
d4 = torch.cat([x3,d4], dim=1) #250*256
d4 = self.up3_conv(d4) #250*128
d3 = self.up2(d4) #500*64
d3 = torch.cat([x2,d3], dim=1) #500*128
d3 = self.up2_conv(d3) #500*64
d2 = self.up1(d3) #1000*32
d2 = torch.cat([x1,d2], dim=1) #1000*64
d2 = self.up1_conv(d2) #1000*32
result = self.detection(d2)
return result
class deep_Unet(nn.Module):
def __init__(self):
super(deep_Unet, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature1 = _conv_block(1,32,kernel_size=17,padding=8)
self.maxpool1 = nn.MaxPool1d(kernel_size=2) #500
self.feature2 = _conv_block(32,64,kernel_size=17,padding=8)
self.maxpool2 = nn.MaxPool1d(kernel_size=2) #250
self.feature3 = _conv_2block(64,128,kernel_size=17,padding=8)
self.maxpool3 = nn.MaxPool1d(kernel_size=2) #125
self.feature4 = _conv_2block(128,256,kernel_size=17,padding=8)
self.maxpool4 = nn.MaxPool1d(kernel_size=2,padding=1) #62
self.feature5 = _conv_block(256, 512, kernel_size=17, padding=8)
self.drop = nn.Dropout(0.5)
self.up4 = up_conv_down(512,256,kernel_size=17,padding=8)
self.up4_conv = _conv_block(512,256,kernel_size=17,padding=8)
self.up3 = up_conv(256,128,kernel_size=17,padding=8)
self.up3_conv = _conv_block(256,128,kernel_size=17,padding=8)
self.up2 = up_conv(128,64,kernel_size=17,padding=8)
self.up2_conv = _conv_block(128,64,kernel_size=17,padding=8)
self.up1 = up_conv(64, 32, kernel_size=17, padding=8)
self.up1_conv = _conv_block(64, 32, kernel_size=17, padding=8)
self.detection = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=16, kernel_size=1),
nn.Dropout(0.7),
nn.Conv1d(in_channels=16, out_channels=1, kernel_size=1),
)
def forward(self,x):
x = self.norm(x)
x1 = self.feature1(x)
x2 = self.maxpool1(x1) #500
x2 = self.feature2(x2)
x3 = self.maxpool2(x2) #250
x3 = self.feature3(x3)
x4 = self.maxpool3(x3) #125
x4 = self.feature4(x4)
x5 = self.maxpool4(x4) #63
x5 = self.drop(self.feature5(x5))
d5 = self.up4(x5) #125
d5 = torch.cat([x4,d5], dim=1) #125 通道数相加
d5 = self.up4_conv(d5)
d4 = self.up3(d5) #250
d4 = torch.cat([x3,d4], dim=1) #250
d4 = self.up3_conv(d4)
d3 = self.up2(d4) #500
d3 = torch.cat([x2,d3], dim=1) #500
d3 = self.up2_conv(d3)
d2 = self.up1(d3) #1000
d2 = torch.cat([x1,d2], dim=1) #1000
d2 = self.up1_conv(d2)
result = self.detection(d2)
return result
class Unet_lstm(nn.Module):
def __init__(self):
super(Unet_lstm, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature1 = _conv_block(1,64,kernel_size=17,padding=8) #原来是3517
self.maxpool1 = nn.MaxPool1d(kernel_size=2)
self.feature2 = _conv_block(64,128,kernel_size=17,padding=8)
self.maxpool2 = nn.MaxPool1d(kernel_size=2)
self.feature3 = _conv_block(128,256,kernel_size=17,padding=8) #若要加层或者加lstm 修改从这里开始
self.maxpool3 = nn.MaxPool1d(kernel_size=2)
self.feature4 = _conv_block(256,512,kernel_size=17,padding=8)
self.feature4_median = nn.LSTM(input_size=512, hidden_size=512,bidirectional=True,batch_first=True)
self.feature4_last = _conv_block(1024,512,kernel_size=13,padding=6)
self.up4 = up_conv(512,256,kernel_size=17,padding=8)
self.up4_conv = _conv_block(512,256,kernel_size=17,padding=8)
self.up3 = up_conv(256,128,kernel_size=17,padding=8)
self.up3_conv = _conv_block(256,128,kernel_size=17,padding=8)
self.up2 = up_conv(128,64,kernel_size=17,padding=8)
self.up2_conv = _conv_block(128,64,kernel_size=17,padding=8)
self.detection = nn.Sequential(
nn.Conv1d(in_channels=64, out_channels=32, kernel_size=1),
nn.Dropout(0.5),
nn.Conv1d(in_channels=32, out_channels=1, kernel_size=1),
)
def forward(self,x):
x = self.norm(x)
x1 = self.feature1(x)
x2 = self.maxpool1(x1)
x2 = self.feature2(x2)
x3 = self.maxpool2(x2)
x3 = self.feature3(x3)
x4 = self.maxpool3(x3)
x4 = self.feature4(x4)
x4,(hh,hc) = self.feature4_median(x4.permute(0,2,1)) #改变维度
x4 = self.feature4_last(x4.permute((0,2,1)))
d4 = self.up4(x4)
d4 = torch.cat([x3,d4], dim=1)
d4 = self.up4_conv(d4)
d3 = self.up3(d4)
d3 = torch.cat([x2,d3], dim=1)
d3 = self.up3_conv(d3)
d2 = self.up2(d3)
d2 = torch.cat([x1,d2], dim=1)
d2 = self.up2_conv(d2)
result = self.detection(d2)
return result
###修改卷积核的大小试试
# class Unet_lstm(nn.Module):
# def __init__(self):
# super(Unet_lstm, self).__init__()
# self.norm = nn.BatchNorm1d(1)
# self.feature1 = _conv_block(1,64,kernel_size=35,padding=17) #原来是3517
# self.maxpool1 = nn.MaxPool1d(kernel_size=2)
#
# self.feature2 = _conv_block(64,128,kernel_size=35,padding=17)
# self.maxpool2 = nn.MaxPool1d(kernel_size=2)
#
# self.feature3 = _conv_block(128,256,kernel_size=35,padding=17) #若要加层或者加lstm 修改从这里开始
# self.maxpool3 = nn.MaxPool1d(kernel_size=2)
#
# self.feature4 = _conv_block(256,512,kernel_size=35,padding=17)
# self.feature4_median = nn.LSTM(input_size=512, hidden_size=512,bidirectional=True)
# self.feature4_last = _conv_block(1024,512,kernel_size=13,padding=6)
#
#
# self.up4 = up_conv(512,256,kernel_size=35,padding=17)
# self.up4_conv = _conv_block(512,256,kernel_size=35,padding=17)
#
# self.up3 = up_conv(256,128,kernel_size=35,padding=17)
# self.up3_conv = _conv_block(256,128,kernel_size=35,padding=17)
#
# self.up2 = up_conv(128,64,kernel_size=35,padding=17)
# self.up2_conv = _conv_block(128,64,kernel_size=35,padding=17)
#
# self.detection = nn.Sequential(
# nn.Conv1d(in_channels=64, out_channels=32, kernel_size=1),
# nn.Dropout(0.5),
# nn.Conv1d(in_channels=32, out_channels=1, kernel_size=1),
# )
#
# def forward(self,x):
# x = self.norm(x)
#
# x1 = self.feature1(x)
# x2 = self.maxpool1(x1)
# x2 = self.feature2(x2)
# x3 = self.maxpool2(x2)
# x3 = self.feature3(x3)
# x4 = self.maxpool3(x3)
# x4 = self.feature4(x4)
# x4,(hh,hc) = self.feature4_median(x4.permute(0,2,1)) #改变维度
# x4 = self.feature4_last(x4.permute((0,2,1)))
#
#
# d4 = self.up4(x4)
# d4 = torch.cat([x3,d4], dim=1)
# d4 = self.up4_conv(d4)
# d3 = self.up3(d4)
# d3 = torch.cat([x2,d3], dim=1)
# d3 = self.up3_conv(d3)
# d2 = self.up2(d3)
# d2 = torch.cat([x1,d2], dim=1)
# d2 = self.up2_conv(d2)
#
# result = self.detection(d2)
#
#
# return result
class _Bottleneck(nn.Module):
"""
Bottleneck block of ResNet.
"""
def __init__(self, in_ch, out_ch, stride,kernel_size, padding, downsample):
super(_Bottleneck, self).__init__()
self.conv1 = _ConvBnReLU(in_ch, out_ch, kernel_size,padding, stride=stride)
self.conv2 = _ConvBnReLU(out_ch, out_ch, kernel_size, padding)
self.shortcut = (
_ConvBnReLU(in_ch, out_ch, 1, 0,stride=stride) if downsample else lambda x: x # identity #把第一个块通道数加倍,宽和高减半,然后与卷积层相加
)
def forward(self, x):
h = self.conv1(x)
h = self.conv2(h)
h += self.shortcut(x)
return F.relu(h)
class _ResLayer(nn.Sequential):
def __init__(self,in_ch,out_ch,kernel_size,padding,stride,n_layers):
super(_ResLayer, self).__init__()
for i in range(n_layers):
self.add_module(
"block{}".format(i + 1),
_Bottleneck(
in_ch=(in_ch if i == 0 else out_ch),
out_ch=out_ch,
stride=(stride if i == 0 else 1),
kernel_size=kernel_size,
padding=padding,
downsample=(True if i == 0 else False),
),
)
class ResUNet(nn.Module):
def __init__(self):
super(ResUNet, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.feature = _conv_block(1,16,kernel_size=17,padding=8)
self.encoder1 = _ResLayer(in_ch=16,out_ch=32,kernel_size=35,padding=17,stride=2,n_layers=4)
self.encoder2 = _ResLayer(in_ch=32,out_ch=64,kernel_size=35,padding=17,stride=2,n_layers=4)
self.encoder3 = _ResLayer(in_ch=64,out_ch=128,kernel_size=35,padding=17,stride=2,n_layers=4)
self.up3 = up_conv(128,64,kernel_size=35,padding=17)
self.decoder3 = _conv_block(128,64,kernel_size=35,padding=17)
self.up2 = up_conv(64,32,kernel_size=35,padding=17)
self.decoder2 = _conv_block(64,32,kernel_size=35,padding=17)
self.up1 = up_conv(32,16,kernel_size=35,padding=17)
self.decoder1 = _conv_block(32,16,kernel_size=35,padding=17)
self.detection = nn.Sequential(
nn.Dropout(0.3),
_ConvBnReLU(16,1,kernel_size=1,padding=0,stride=1,bn=False)
)
def forward(self,x):
x = self.norm(x)
out1 = self.feature(x)
out2 = self.encoder1(out1)
out3 = self.encoder2(out2)
out4 = self.encoder3(out3)
up3 = self.up3(out4)
up3 = torch.cat([up3, out3], dim=1)
up2 = self.up2(self.decoder3(up3))
up2 = torch.cat([up2, out2], dim=1)
up1 = self.up1(self.decoder2(up2))
up1 = torch.cat([up1, out1], dim=1)
up1 = self.decoder1(up1)
result = self.detection(up1)
return result
class _MultiKernelLayer(nn.Module):
def __init__(self,in_ch,out_ch,kernel_size,padding):
super(_MultiKernelLayer, self).__init__()
self.MultiKernel = nn.Module()
for k,p in zip(kernel_size,padding):
self.MultiKernel.add_module(
"k{}".format(k),
nn.Sequential(
_ConvBnReLU(in_ch,out_ch,kernel_size=k,padding=p),
_ConvBnReLU(out_ch, out_ch, kernel_size=k, padding=p),
)
)
def forward(self,x):
return torch.cat( [conv(x) for conv in self.MultiKernel.children()],dim=1 )
class DUNet(nn.Module):
def __init__(self):
super(DUNet, self).__init__()
self.encoder1 = _MultiKernelLayer(1, 16 , kernel_size=[3,11,21,31],padding=[1,5,10,15])
self.down1 = nn.MaxPool1d(kernel_size=2)
self.encoder2 = _MultiKernelLayer(64,64,kernel_size=[3,11,21,31],padding=[1,5,10,15])
self.down2 = nn.MaxPool1d(kernel_size=2)
self.encoder3 = _MultiKernelLayer(256, 256, kernel_size=[3, 11, 21, 31], padding=[1, 5, 10, 15])
self.down3 = nn.MaxPool1d(kernel_size=2)
self.classify = _Bottleneck(in_ch=1024, out_ch=1024,kernel_size=7,padding=3,stride=1,downsample=True)
self.up3 = nn.Upsample(scale_factor=2,mode='linear',align_corners=True)
self.decoder3 = nn.Sequential(
_ConvBnReLU(2048, 256, kernel_size=31, padding=15),
_ConvBnReLU(256, 256, kernel_size=31, padding=15),
)
self.up2 = nn.Upsample(scale_factor=2,mode='linear',align_corners=True)
self.decoder2 = nn.Sequential(
_ConvBnReLU(512, 64, kernel_size=31, padding=15),
_ConvBnReLU(64, 64, kernel_size=31, padding=15),
)
self.up1 = nn.Upsample(scale_factor=2,mode='linear',align_corners=True)
self.decoder1 = nn.Sequential(
_ConvBnReLU(128, 64, kernel_size=31, padding=15),
_ConvBnReLU(64, 64, kernel_size=31, padding=15),
)
self.outconv = nn.Conv1d(64,1,kernel_size=1)
def forward(self,x):
x1 = self.encoder1(x)
x2 = self.encoder2(self.down1(x1))
x3 = self.encoder3(self.down2(x2))
x4 = self.classify(self.down3(x3))
x = torch.cat([self.up3(x4),x3],dim=1)
x = self.decoder3(x)
x = torch.cat([self.up2(x),x2],dim=1)
x = self.decoder2(x)
x = torch.cat([self.up1(x),x1],dim=1)
x = self.decoder1(x)
out = self.outconv(x)
return out
class LSTM_UNet(nn.Module):
def __init__(self):
super(LSTM_UNet, self).__init__()
self.stem = nn.BatchNorm1d(1)
self.lstm1 = nn.LSTM(input_size=1,hidden_size=16, bidirectional=True)
self.lstm2 = nn.LSTM(input_size=2,hidden_size=32, bidirectional=True)
self.lstm3 = nn.LSTM(input_size=4,hidden_size=64, bidirectional=True)
self.lstm4 = nn.LSTM(input_size=8,hidden_size=128,bidirectional=True)
self.up3 = nn.Sequential(
nn.Upsample(scale_factor=2,mode='linear',align_corners=False),
nn.Conv1d(256,128,kernel_size=1),
)
self.decoder3 = nn.Sequential(
_ConvBnReLU(256,128,kernel_size=11,padding=5),
_ConvBnReLU(128, 128, kernel_size=11, padding=5),
)
self.up2 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='linear', align_corners=False),
nn.Conv1d(128, 64, kernel_size=1),
)
self.decoder2 = nn.Sequential(
_ConvBnReLU(128, 64, kernel_size=11, padding=5),
_ConvBnReLU(64, 64, kernel_size=11, padding=5),
)
self.up1 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='linear', align_corners=False),
nn.Conv1d(64, 32, kernel_size=1),
)
self.decoder1 = nn.Sequential(
_ConvBnReLU(64, 32, kernel_size=11, padding=5),
_ConvBnReLU(32, 32, kernel_size=11, padding=5),
)
self.output = nn.Conv1d(32,1,kernel_size=1)
def forward(self,x):
x = self.stem(x)
x1,(h_h,hc) = self.lstm1(x.view(-1, 1000, 1))
x2,(h_h,hc) = self.lstm2(x.view(-1, 500, 2))
x3,(h_h,hc) = self.lstm3(x.view(-1, 250, 4))
x4,(h_h,hc) = self.lstm4(x.view(-1, 125, 8))
x1 = x1.permute((0, 2, 1))
x2 = x2.permute((0, 2, 1))
x3 = x3.permute((0, 2, 1))
x4 = x4.permute((0, 2, 1))
x4 = self.up3(x4)
x3 = torch.cat([x4,x3],dim=1)
x3 = self.decoder3(x3)
x3 = self.up2(x3)
x2 = torch.cat([x3,x2],dim=1)
x2 = self.decoder2(x2)
x2 = self.up1(x2)
x1 = torch.cat([x2,x1],dim=1)
x1 = self.decoder1(x1)
out = self.output(x1)
return out
class Recurrent_block(nn.Module):
def __init__(self, ch_out,kernel_size,padding,stride=1,t=2):
super(Recurrent_block, self).__init__()
self.t = t
self.ch_out = ch_out
self.conv = nn.Sequential(
nn.Conv1d(ch_out, ch_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=True),
nn.BatchNorm1d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self, x):
for i in range(self.t):
if i == 0:
x1 = self.conv(x)
x1 = self.conv(x + x1)
return x1
class RRCNN_block(nn.Module):
def __init__(self, ch_in, ch_out,kernel_size,padding,stride=1, t=2):
super(RRCNN_block, self).__init__()
self.RCNN = nn.Sequential(
Recurrent_block(ch_out,kernel_size=kernel_size,padding=padding, stride=stride,t=t),
Recurrent_block(ch_out,kernel_size=kernel_size,padding=padding, stride=stride,t=t)
)
self.Conv_1x1 = nn.Conv1d(ch_in, ch_out, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x = self.Conv_1x1(x)
x1 = self.RCNN(x)
return x + x1
class R2U_Net(nn.Module):
def __init__(self, t=2):
super(R2U_Net, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.Maxpool = nn.MaxPool1d(kernel_size=2, stride=2)
self.RRCNN1 = RRCNN_block(1,64,kernel_size=35,padding=17, t=t)
self.RRCNN2 = RRCNN_block(64, 128, kernel_size=35,padding=17,t=t)
self.RRCNN3 = RRCNN_block(128, 256, kernel_size=35,padding=17,t=t)
self.RRCNN4 = RRCNN_block(256, 512, kernel_size=35,padding=17,t=t)
self.Up4 = up_conv(ch_in=512, ch_out=256,kernel_size=35,padding=17)
self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256,kernel_size=35,padding=17, t=t)
self.Up3 = up_conv(ch_in=256, ch_out=128,kernel_size=35,padding=17)
self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128,kernel_size=35,padding=17, t=t)
self.Up2 = up_conv(ch_in=128, ch_out=64,kernel_size=35,padding=17)
self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64,kernel_size=35,padding=17, t=t)
self.Conv_1x1 = nn.Conv1d(64, 1, kernel_size=1, stride=1, padding=0)
def forward(self, x):
# encoding path
x = self.norm(x)
x1 = self.RRCNN1(x)
x2 = self.Maxpool(x1)
x2 = self.RRCNN2(x2)
x3 = self.Maxpool(x2)
x3 = self.RRCNN3(x3)
x4 = self.Maxpool(x3)
x4 = self.RRCNN4(x4)
# decoding + concat path
d4 = self.Up4(x4)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_RRCNN4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_RRCNN3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_RRCNN2(d2)
d1 = self.Conv_1x1(d2)
return d1
class Attention_block(nn.Module):
def __init__(self, F_g, F_l, F_int):
super(Attention_block, self).__init__()
self.W_g = nn.Sequential(
nn.Conv1d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm1d(F_int)
)
self.W_x = nn.Sequential(
nn.Conv1d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm1d(F_int)
)
self.psi = nn.Sequential(
nn.Conv1d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm1d(1),
nn.Sigmoid()
)
self.relu = nn.ReLU(inplace=True)
def forward(self, g, x):
g1 = self.W_g(g)
x1 = self.W_x(x)
psi = self.relu(g1 + x1)
psi = self.psi(psi)
return x * psi
class AttU_Net(nn.Module):
def __init__(self):
super(AttU_Net, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.Maxpool = nn.MaxPool1d(kernel_size=2, stride=2)
self.Conv1 = _conv_block(1, 64,kernel_size=17,padding=8) #原来为35 17
self.Conv2 = _conv_block(64, 128,kernel_size=17,padding=8)
self.Conv3 = _conv_block(128, 256,kernel_size=17,padding=8)
self.Conv4 = _conv_block(256, 512,kernel_size=17,padding=8)
self.Up4 = up_conv(ch_in=512, ch_out=256,kernel_size=17,padding=8)
self.Att4 = Attention_block(F_g=256, F_l=256, F_int=128)
self.Up_conv4 = _conv_block(512, 256,kernel_size=17,padding=8)
self.Up3 = up_conv(ch_in=256, ch_out=128,kernel_size=17,padding=8)
self.Att3 = Attention_block(F_g=128, F_l=128, F_int=64)
self.Up_conv3 = _conv_block(256, 128,kernel_size=17,padding=8)
self.Up2 = up_conv(ch_in=128, ch_out=64,kernel_size=17,padding=8)
self.Att2 = Attention_block(F_g=64, F_l=64, F_int=32)
self.Up_conv2 = _conv_block(128, 64,kernel_size=17,padding=8)
self.Conv_1x1 = nn.Conv1d(64, 1, kernel_size=1, stride=1, padding=0)
def forward(self, x):
# encoding path
x = self.norm(x)
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2)
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3)
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4)
# decoding + concat path
d4 = self.Up4(x4)
x3 = self.Att4(g=d4, x=x3)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
x2 = self.Att3(g=d3, x=x2)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
x1 = self.Att2(g=d2, x=x1)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
d1 = self.Conv_1x1(d2)
return d1
class R2AttU_Net(nn.Module):
def __init__(self, t=2):
super(R2AttU_Net, self).__init__()
self.norm = nn.BatchNorm1d(1)
self.Maxpool = nn.MaxPool1d(kernel_size=2, stride=2)
self.Upsample = nn.Upsample(scale_factor=2)
self.RRCNN1 = RRCNN_block(1, 64, kernel_size=17,padding=8,t=t)
self.RRCNN2 = RRCNN_block(64, 128, kernel_size=17,padding=8, t=t)
self.RRCNN3 = RRCNN_block(128, 256,kernel_size=17,padding=8, t=t)
self.RRCNN4 = RRCNN_block(256, 512,kernel_size=17,padding=8, t=t)
self.Up4 = up_conv(ch_in=512, ch_out=256,kernel_size=17,padding=8)
self.Att4 = Attention_block(F_g=256, F_l=256, F_int=128)
self.Up_RRCNN4 = RRCNN_block(512, 256,kernel_size=17,padding=8, t=t)
self.Up3 = up_conv(ch_in=256, ch_out=128,kernel_size=17,padding=8)
self.Att3 = Attention_block(F_g=128, F_l=128, F_int=64)
self.Up_RRCNN3 = RRCNN_block(256, 128,kernel_size=17,padding=8, t=t)
self.Up2 = up_conv(ch_in=128, ch_out=64,kernel_size=17,padding=8,)
self.Att2 = Attention_block(F_g=64, F_l=64, F_int=32)
self.Up_RRCNN2 = RRCNN_block(128, 64,kernel_size=17,padding=8, t=t)
self.Conv_1x1 = nn.Conv1d(64, 1, kernel_size=1, stride=1, padding=0)
def forward(self, x):
# encoding path
x = self.norm(x)
x1 = self.RRCNN1(x)
x2 = self.Maxpool(x1)
x2 = self.RRCNN2(x2)
x3 = self.Maxpool(x2)
x3 = self.RRCNN3(x3)
x4 = self.Maxpool(x3)
x4 = self.RRCNN4(x4)
# decoding + concat path
d4 = self.Up4(x4)
x3 = self.Att4(g=d4, x=x3)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_RRCNN4(d4)
d3 = self.Up3(d4)
x2 = self.Att3(g=d3, x=x2)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_RRCNN3(d3)
d2 = self.Up2(d3)
x1 = self.Att2(g=d2, x=x1)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_RRCNN2(d2)
d1 = self.Conv_1x1(d2)
return d1
#
# from torchsummary import summary
# # print(torch.cuda.is_available())
#
# X = torch.randn((16,1,1000)).cuda()
# model = Fourlayer_Unetplusplus().cuda()
# y = model(X)
# print(y.shape)
# summary(model, (1,1000)) ###查看每一层的输出shape以及模型的大小等参数
#
# X = torch.randn((16,3,256,256)).cuda()
# model = resnet34().cuda()
# y = model(X)
# print(y.shape)
# summary(model, (3,1000,1000)) ###查看每一层的输出shape以及模型的大小等参数