reppan.py
yolov6\models\reppan.py
目录
reppan.py
1.所需的库和模块
2.class RepPANNeck(nn.Module):
3.class RepBiFPANNeck(nn.Module):
4.class RepPANNeck6(nn.Module):
5.class RepBiFPANNeck6(nn.Module):
6.class CSPRepPANNeck(nn.Module):
7.class CSPRepBiFPANNeck(nn.Module):
8.class CSPRepPANNeck_P6(nn.Module):
9.class CSPRepBiFPANNeck_P6(nn.Module):
10.class Lite_EffiNeck(nn.Module):
1.所需的库和模块
import torch
from torch import nn
from yolov6.layers.common import RepBlock, RepVGGBlock, BottleRep, BepC3, ConvBNReLU, Transpose, BiFusion, \MBLABlock, ConvBNHS, CSPBlock, DPBlock
2.class RepPANNeck(nn.Module):
# _QUANT=False
class RepPANNeck(nn.Module):# RepPANNeck 模块# EfficientRep 是该模型的默认主干。# RepPANNeck 在特征融合能力和硬件效率之间取得了平衡。"""RepPANNeck ModuleEfficientRep is the default backbone of this model.RepPANNeck has the balance of feature fusion ability and hardware efficiency."""def __init__(self,channels_list=None,num_repeats=None,block=RepVGGBlock):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneself.Rep_p4 = RepBlock(in_channels=channels_list[3] + channels_list[5],out_channels=channels_list[5],n=num_repeats[5],block=block)self.Rep_p3 = RepBlock(in_channels=channels_list[2] + channels_list[6],out_channels=channels_list[6],n=num_repeats[6],block=block)self.Rep_n3 = RepBlock(in_channels=channels_list[6] + channels_list[7],out_channels=channels_list[8],n=num_repeats[7],block=block)self.Rep_n4 = RepBlock(in_channels=channels_list[5] + channels_list[9],out_channels=channels_list[10],n=num_repeats[8],block=block)self.reduce_layer0 = ConvBNReLU(in_channels=channels_list[4],out_channels=channels_list[5],kernel_size=1,stride=1)self.upsample0 = Transpose(in_channels=channels_list[5],out_channels=channels_list[5],)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[5],out_channels=channels_list[6],kernel_size=1,stride=1)self.upsample1 = Transpose(in_channels=channels_list[6],out_channels=channels_list[6])self.downsample2 = ConvBNReLU(in_channels=channels_list[6],out_channels=channels_list[7],kernel_size=3,stride=2)self.downsample1 = ConvBNReLU(in_channels=channels_list[8],out_channels=channels_list[9],kernel_size=3,stride=2)# 它用于在模型的上采样(upsample)操作后插入量化操作,以支持量化神经网络。量化是一种模型优化技术,可以减少模型的大小和计算需求,同时保持模型的精度,这对于部署到资源受限的设备(如移动设备或嵌入式系统)非常有用。# self :类的实例。# num_bits :量化时使用的位数。# calib_method :校准方法,用于确定量化参数。def upsample_enable_quant(self, num_bits, calib_method): # upsample_enable_quant上采样启用量化# 在上采样后插入 fakequantprint("Insert fakequant after upsample")# 在 upsample 操作后插入 fakequant 以构建 TensorRT 引擎# Insert fakequant after upsample op to build TensorRT engine# 量化工具箱 pytorch_quantization 通过提供一个方便的 PyTorch 库来补充 TensorRT ,该库有助于生成可优化的 QAT 模型。该工具包提供了一个 API 来自动或手动为 QAT 或 PTQ 准备模型。# API 的核心是 TensorQuantizer 模块,它可以量化、伪量化或收集张量的统计信息。它与 QuantDescriptor 一起使用,后者描述了如何量化张量。在 TensorQuantizer 之上的是量化模块,这些模块被设计为 PyTorch 全精度模块的替代品。# 这些是使用 TensorQuantizer 对模块的权重和输入进行伪量化或收集统计信息的方便模块。from pytorch_quantization import nn as quant_nnfrom pytorch_quantization.tensor_quant import QuantDescriptor# QuantDescriptor 定义了张量应该如何量化。# 创建一个 QuantDescriptor 实例,用于描述量化参数,包括 位数 和 校准方法 。conv2d_input_default_desc = QuantDescriptor(num_bits=num_bits, calib_method=calib_method)# 创建一个 TensorQuantizer 实例,用于量化上采样特征0,并将量化描述符 conv2d_input_default_desc 传递给它。self.upsample_feat0_quant = quant_nn.TensorQuantizer(conv2d_input_default_desc)# 创建另一个 TensorQuantizer 实例,用于量化上采样特征1,同样使用相同的量化描述符。self.upsample_feat1_quant = quant_nn.TensorQuantizer(conv2d_input_default_desc)# global _QUANT# 设置一个标志 _QUANT 为 True ,表明模型已经启用了量化。self._QUANT = True# 定义了前向传播方法,接受一个参数 input ,它是一个包含三个特征层的元组 (x2, x1, x0) 。def forward(self, input):(x2, x1, x0) = input# 将最高级别的特征层 x0 通过一个降维层 reduce_layer0 进行处理。fpn_out0 = self.reduce_layer0(x0)# 将降维后的特征 fpn_out0 通过上采样层 upsample0 进行上采样。upsample_feat0 = self.upsample0(fpn_out0)# 检查模型是否启用了量化。if hasattr(self, '_QUANT') and self._QUANT is True:# 如果启用了量化,则对上采样后的特征 upsample_feat0 应用量化。upsample_feat0 = self.upsample_feat0_quant(upsample_feat0)# 将上采样后的特征 upsample_feat0 和次高级别特征层 x1 在通道维度上进行拼接。f_concat_layer0 = torch.cat([upsample_feat0, x1], 1)# 对拼接后的特征 f_concat_layer0 应用重复卷积操作 Rep_p4 。f_out0 = self.Rep_p4(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)upsample_feat1 = self.upsample1(fpn_out1)if hasattr(self, '_QUANT') and self._QUANT is True:upsample_feat1 = self.upsample_feat1_quant(upsample_feat1)f_concat_layer1 = torch.cat([upsample_feat1, x2], 1)pan_out2 = self.Rep_p3(f_concat_layer1)down_feat1 = self.downsample2(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n3(p_concat_layer1)down_feat0 = self.downsample1(pan_out1)p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n4(p_concat_layer2)# 将所有输出特征层组合成一个列表。outputs = [pan_out2, pan_out1, pan_out0]# 返回输出特征层列表。return outputs
3.class RepBiFPANNeck(nn.Module):
class RepBiFPANNeck(nn.Module):# RepBiFPANNeck 模块"""RepBiFPANNeck Module"""# [64, 128, 256, 512, 1024] # [256, 128, 128, 256, 256, 512]def __init__(self,channels_list=None,num_repeats=None,block=RepVGGBlock):super().__init__()assert channels_list is not Noneassert num_repeats is not None self.reduce_layer0 = ConvBNReLU(in_channels=channels_list[4], # 1024out_channels=channels_list[5], # 256kernel_size=1,stride=1)self.Bifusion0 = BiFusion(in_channels=[channels_list[3], channels_list[2]], # 512, 256out_channels=channels_list[5], # 256)self.Rep_p4 = RepBlock(in_channels=channels_list[5], # 256out_channels=channels_list[5], # 256n=num_repeats[5],block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[5], # 256out_channels=channels_list[6], # 128kernel_size=1,stride=1)self.Bifusion1 = BiFusion(in_channels=[channels_list[2], channels_list[1]], # 256, 128out_channels=channels_list[6], # 128)self.Rep_p3 = RepBlock(in_channels=channels_list[6], # 128out_channels=channels_list[6], # 128n=num_repeats[6],block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[6], # 128out_channels=channels_list[7], # 128kernel_size=3,stride=2)self.Rep_n3 = RepBlock(in_channels=channels_list[6] + channels_list[7], # 128 + 128out_channels=channels_list[8], # 256n=num_repeats[7],block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[8], # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)self.Rep_n4 = RepBlock(in_channels=channels_list[5] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[8],block=block)def forward(self, input):(x3, x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)f_concat_layer0 = self.Bifusion0([fpn_out0, x1, x2])f_out0 = self.Rep_p4(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)f_concat_layer1 = self.Bifusion1([fpn_out1, x2, x3])pan_out2 = self.Rep_p3(f_concat_layer1)down_feat1 = self.downsample2(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n3(p_concat_layer1)down_feat0 = self.downsample1(pan_out1)p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n4(p_concat_layer2)outputs = [pan_out2, pan_out1, pan_out0]return outputs
4.class RepPANNeck6(nn.Module):
class RepPANNeck6(nn.Module):# RepPANNeck+P6 模块# EfficientRep 是该模型的默认主干。# RepPANNeck 在特征融合能力和硬件效率之间取得了平衡。"""RepPANNeck+P6 ModuleEfficientRep is the default backbone of this model.RepPANNeck has the balance of feature fusion ability and hardware efficiency."""# [64, 128, 256, 512, 768, 1024] # [512, 256, 128, 256, 512, 1024]def __init__(self,channels_list=None,num_repeats=None,block=RepVGGBlock):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneself.reduce_layer0 = ConvBNReLU(in_channels=channels_list[5], # 1024out_channels=channels_list[6], # 512kernel_size=1,stride=1)self.upsample0 = Transpose(in_channels=channels_list[6], # 512out_channels=channels_list[6], # 512)self.Rep_p5 = RepBlock(in_channels=channels_list[4] + channels_list[6], # 768 + 512out_channels=channels_list[6], # 512n=num_repeats[6],block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[6], # 512out_channels=channels_list[7], # 256kernel_size=1,stride=1)self.upsample1 = Transpose(in_channels=channels_list[7], # 256out_channels=channels_list[7] # 256)self.Rep_p4 = RepBlock(in_channels=channels_list[3] + channels_list[7], # 512 + 256out_channels=channels_list[7], # 256n=num_repeats[7],block=block)self.reduce_layer2 = ConvBNReLU(in_channels=channels_list[7], # 256out_channels=channels_list[8], # 128kernel_size=1,stride=1)self.upsample2 = Transpose(in_channels=channels_list[8], # 128out_channels=channels_list[8] # 128)self.Rep_p3 = RepBlock(in_channels=channels_list[2] + channels_list[8], # 256 + 128out_channels=channels_list[8], # 128n=num_repeats[8],block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[8], # 128out_channels=channels_list[8], # 128kernel_size=3,stride=2)self.Rep_n4 = RepBlock(in_channels=channels_list[8] + channels_list[8], # 128 + 128out_channels=channels_list[9], # 256n=num_repeats[9],block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[9], # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)self.Rep_n5 = RepBlock(in_channels=channels_list[7] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[10],block=block)self.downsample0 = ConvBNReLU(in_channels=channels_list[10], # 512out_channels=channels_list[10], # 512kernel_size=3,stride=2)self.Rep_n6 = RepBlock(in_channels=channels_list[6] + channels_list[10], # 512 + 512out_channels=channels_list[11], # 1024n=num_repeats[11],block=block)def forward(self, input):(x3, x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)upsample_feat0 = self.upsample0(fpn_out0)f_concat_layer0 = torch.cat([upsample_feat0, x1], 1)f_out0 = self.Rep_p5(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)upsample_feat1 = self.upsample1(fpn_out1)f_concat_layer1 = torch.cat([upsample_feat1, x2], 1)f_out1 = self.Rep_p4(f_concat_layer1)fpn_out2 = self.reduce_layer2(f_out1)upsample_feat2 = self.upsample2(fpn_out2)f_concat_layer2 = torch.cat([upsample_feat2, x3], 1)pan_out3 = self.Rep_p3(f_concat_layer2) # P3down_feat2 = self.downsample2(pan_out3)p_concat_layer2 = torch.cat([down_feat2, fpn_out2], 1)pan_out2 = self.Rep_n4(p_concat_layer2) # P4down_feat1 = self.downsample1(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n5(p_concat_layer1) # P5down_feat0 = self.downsample0(pan_out1)p_concat_layer0 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n6(p_concat_layer0) # P6outputs = [pan_out3, pan_out2, pan_out1, pan_out0]return outputs
5.class RepBiFPANNeck6(nn.Module):
class RepBiFPANNeck6(nn.Module):# RepBiFPANNeck_P6 模块"""RepBiFPANNeck_P6 Module"""# [64, 128, 256, 512, 768, 1024] # [512, 256, 128, 256, 512, 1024]def __init__(self,channels_list=None,num_repeats=None,block=RepVGGBlock):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneself.reduce_layer0 = ConvBNReLU(in_channels=channels_list[5], # 1024out_channels=channels_list[6], # 512kernel_size=1,stride=1)self.Bifusion0 = BiFusion(in_channels=[channels_list[4], channels_list[6]], # 768, 512out_channels=channels_list[6], # 512)self.Rep_p5 = RepBlock(in_channels=channels_list[6], # 512out_channels=channels_list[6], # 512n=num_repeats[6],block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[6], # 512out_channels=channels_list[7], # 256kernel_size=1,stride=1)self.Bifusion1 = BiFusion(in_channels=[channels_list[3], channels_list[7]], # 512, 256out_channels=channels_list[7], # 256)self.Rep_p4 = RepBlock(in_channels=channels_list[7], # 256out_channels=channels_list[7], # 256n=num_repeats[7],block=block)self.reduce_layer2 = ConvBNReLU(in_channels=channels_list[7], # 256out_channels=channels_list[8], # 128kernel_size=1,stride=1)self.Bifusion2 = BiFusion(in_channels=[channels_list[2], channels_list[8]], # 256, 128out_channels=channels_list[8], # 128)self.Rep_p3 = RepBlock(in_channels=channels_list[8], # 128out_channels=channels_list[8], # 128n=num_repeats[8],block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[8], # 128out_channels=channels_list[8], # 128kernel_size=3,stride=2)self.Rep_n4 = RepBlock(in_channels=channels_list[8] + channels_list[8], # 128 + 128out_channels=channels_list[9], # 256n=num_repeats[9],block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[9], # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)self.Rep_n5 = RepBlock(in_channels=channels_list[7] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[10],block=block)self.downsample0 = ConvBNReLU(in_channels=channels_list[10], # 512out_channels=channels_list[10], # 512kernel_size=3,stride=2)self.Rep_n6 = RepBlock(in_channels=channels_list[6] + channels_list[10], # 512 + 512out_channels=channels_list[11], # 1024n=num_repeats[11],block=block)def forward(self, input):(x4, x3, x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)f_concat_layer0 = self.Bifusion0([fpn_out0, x1, x2])f_out0 = self.Rep_p5(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)f_concat_layer1 = self.Bifusion1([fpn_out1, x2, x3])f_out1 = self.Rep_p4(f_concat_layer1)fpn_out2 = self.reduce_layer2(f_out1)f_concat_layer2 = self.Bifusion2([fpn_out2, x3, x4])pan_out3 = self.Rep_p3(f_concat_layer2) # P3down_feat2 = self.downsample2(pan_out3)p_concat_layer2 = torch.cat([down_feat2, fpn_out2], 1)pan_out2 = self.Rep_n4(p_concat_layer2) # P4down_feat1 = self.downsample1(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n5(p_concat_layer1) # P5down_feat0 = self.downsample0(pan_out1)p_concat_layer0 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n6(p_concat_layer0) # P6outputs = [pan_out3, pan_out2, pan_out1, pan_out0]return outputs
6.class CSPRepPANNeck(nn.Module):
class CSPRepPANNeck(nn.Module):# CSPRepPANNeck 模块。"""CSPRepPANNeck module."""def __init__(self,channels_list=None,num_repeats=None,block=BottleRep,csp_e=float(1)/2):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneself.Rep_p4 = BepC3(in_channels=channels_list[3] + channels_list[5], # 512 + 256out_channels=channels_list[5], # 256n=num_repeats[5],e=csp_e,block=block)self.Rep_p3 = BepC3(in_channels=channels_list[2] + channels_list[6], # 256 + 128out_channels=channels_list[6], # 128n=num_repeats[6],e=csp_e,block=block)self.Rep_n3 = BepC3(in_channels=channels_list[6] + channels_list[7], # 128 + 128out_channels=channels_list[8], # 256n=num_repeats[7],e=csp_e,block=block)self.Rep_n4 = BepC3(in_channels=channels_list[5] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[8],e=csp_e,block=block)self.reduce_layer0 = ConvBNReLU(in_channels=channels_list[4], # 1024out_channels=channels_list[5], # 256kernel_size=1,stride=1)self.upsample0 = Transpose(in_channels=channels_list[5], # 256out_channels=channels_list[5], # 256)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[5], # 256out_channels=channels_list[6], # 128kernel_size=1,stride=1)self.upsample1 = Transpose(in_channels=channels_list[6], # 128out_channels=channels_list[6] # 128)self.downsample2 = ConvBNReLU(in_channels=channels_list[6], # 128out_channels=channels_list[7], # 128kernel_size=3,stride=2)self.downsample1 = ConvBNReLU(in_channels=channels_list[8], # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)def forward(self, input):(x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)upsample_feat0 = self.upsample0(fpn_out0)f_concat_layer0 = torch.cat([upsample_feat0, x1], 1)f_out0 = self.Rep_p4(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)upsample_feat1 = self.upsample1(fpn_out1)f_concat_layer1 = torch.cat([upsample_feat1, x2], 1)pan_out2 = self.Rep_p3(f_concat_layer1)down_feat1 = self.downsample2(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n3(p_concat_layer1)down_feat0 = self.downsample1(pan_out1)p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n4(p_concat_layer2)outputs = [pan_out2, pan_out1, pan_out0]return outputs
7.class CSPRepBiFPANNeck(nn.Module):
class CSPRepBiFPANNeck(nn.Module): # CSPRepBiFPANNeck 模块。"""CSPRepBiFPANNeck module. """def __init__(self,channels_list=None,num_repeats=None,block=BottleRep,csp_e=float(1)/2,stage_block_type="BepC3"):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneif stage_block_type == "BepC3":stage_block = BepC3elif stage_block_type == "MBLABlock":stage_block = MBLABlockelse:raise NotImplementedErrorself.reduce_layer0 = ConvBNReLU(in_channels=channels_list[4], # 1024 out_channels=channels_list[5], # 256 kernel_size=1,stride=1)self.Bifusion0 = BiFusion(in_channels=[channels_list[3], channels_list[2]], # 512, 256out_channels=channels_list[5], # 256)self.Rep_p4 = stage_block(in_channels=channels_list[5], # 256out_channels=channels_list[5], # 256n=num_repeats[5],e=csp_e,block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[5], # 256out_channels=channels_list[6], # 128 kernel_size=1,stride=1)self.Bifusion1 = BiFusion(in_channels=[channels_list[2], channels_list[1]], # 256, 128out_channels=channels_list[6], # 128)self.Rep_p3 = stage_block(in_channels=channels_list[6], # 128out_channels=channels_list[6], # 128 n=num_repeats[6],e=csp_e,block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[6], # 128 out_channels=channels_list[7], # 128 kernel_size=3,stride=2)self.Rep_n3 = stage_block(in_channels=channels_list[6] + channels_list[7], # 128 + 128 out_channels=channels_list[8], # 256n=num_repeats[7],e=csp_e,block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[8], # 256 out_channels=channels_list[9], # 256 kernel_size=3,stride=2)self.Rep_n4 = stage_block(in_channels=channels_list[5] + channels_list[9], # 256 + 256 out_channels=channels_list[10], # 512 n=num_repeats[8],e=csp_e,block=block)def forward(self, input):(x3, x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)f_concat_layer0 = self.Bifusion0([fpn_out0, x1, x2])f_out0 = self.Rep_p4(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)f_concat_layer1 = self.Bifusion1([fpn_out1, x2, x3])pan_out2 = self.Rep_p3(f_concat_layer1)down_feat1 = self.downsample2(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n3(p_concat_layer1)down_feat0 = self.downsample1(pan_out1)p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n4(p_concat_layer2)outputs = [pan_out2, pan_out1, pan_out0]return outputs
8.class CSPRepPANNeck_P6(nn.Module):
class CSPRepPANNeck_P6(nn.Module):# CSPRepPANNeck_P6 模块"""CSPRepPANNeck_P6 Module"""# [64, 128, 256, 512, 768, 1024] # [512, 256, 128, 256, 512, 1024]def __init__(self,channels_list=None,num_repeats=None,block=BottleRep,csp_e=float(1)/2):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneself.reduce_layer0 = ConvBNReLU(in_channels=channels_list[5], # 1024out_channels=channels_list[6], # 512kernel_size=1,stride=1)self.upsample0 = Transpose(in_channels=channels_list[6], # 512out_channels=channels_list[6], # 512)self.Rep_p5 = BepC3(in_channels=channels_list[4] + channels_list[6], # 768 + 512out_channels=channels_list[6], # 512n=num_repeats[6],e=csp_e,block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[6], # 512out_channels=channels_list[7], # 256kernel_size=1,stride=1)self.upsample1 = Transpose(in_channels=channels_list[7], # 256out_channels=channels_list[7] # 256)self.Rep_p4 = BepC3(in_channels=channels_list[3] + channels_list[7], # 512 + 256out_channels=channels_list[7], # 256n=num_repeats[7],e=csp_e,block=block)self.reduce_layer2 = ConvBNReLU(in_channels=channels_list[7], # 256out_channels=channels_list[8], # 128kernel_size=1,stride=1)self.upsample2 = Transpose(in_channels=channels_list[8], # 128out_channels=channels_list[8] # 128)self.Rep_p3 = BepC3(in_channels=channels_list[2] + channels_list[8], # 256 + 128out_channels=channels_list[8], # 128n=num_repeats[8],e=csp_e,block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[8], # 128out_channels=channels_list[8], # 128kernel_size=3,stride=2)self.Rep_n4 = BepC3(in_channels=channels_list[8] + channels_list[8], # 128 + 128out_channels=channels_list[9], # 256n=num_repeats[9],e=csp_e,block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[9], # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)self.Rep_n5 = BepC3(in_channels=channels_list[7] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[10],e=csp_e,block=block)self.downsample0 = ConvBNReLU(in_channels=channels_list[10], # 512out_channels=channels_list[10], # 512kernel_size=3,stride=2)self.Rep_n6 = BepC3(in_channels=channels_list[6] + channels_list[10], # 512 + 512out_channels=channels_list[11], # 1024n=num_repeats[11],e=csp_e,block=block)def forward(self, input):(x3, x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)upsample_feat0 = self.upsample0(fpn_out0)f_concat_layer0 = torch.cat([upsample_feat0, x1], 1)f_out0 = self.Rep_p5(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)upsample_feat1 = self.upsample1(fpn_out1)f_concat_layer1 = torch.cat([upsample_feat1, x2], 1)f_out1 = self.Rep_p4(f_concat_layer1)fpn_out2 = self.reduce_layer2(f_out1)upsample_feat2 = self.upsample2(fpn_out2)f_concat_layer2 = torch.cat([upsample_feat2, x3], 1)pan_out3 = self.Rep_p3(f_concat_layer2) # P3down_feat2 = self.downsample2(pan_out3)p_concat_layer2 = torch.cat([down_feat2, fpn_out2], 1)pan_out2 = self.Rep_n4(p_concat_layer2) # P4down_feat1 = self.downsample1(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n5(p_concat_layer1) # P5down_feat0 = self.downsample0(pan_out1)p_concat_layer0 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n6(p_concat_layer0) # P6outputs = [pan_out3, pan_out2, pan_out1, pan_out0]return outputs
9.class CSPRepBiFPANNeck_P6(nn.Module):
class CSPRepBiFPANNeck_P6(nn.Module):# CSPRepBiFPANNeck_P6 模块"""CSPRepBiFPANNeck_P6 Module"""# [64, 128, 256, 512, 768, 1024] # [512, 256, 128, 256, 512, 1024]def __init__(self,channels_list=None,num_repeats=None,block=BottleRep,csp_e=float(1)/2):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneself.reduce_layer0 = ConvBNReLU(in_channels=channels_list[5], # 1024out_channels=channels_list[6], # 512kernel_size=1,stride=1)self.Bifusion0 = BiFusion(in_channels=[channels_list[4], channels_list[6]], # 768, 512out_channels=channels_list[6], # 512)self.Rep_p5 = BepC3(in_channels=channels_list[6], # 512out_channels=channels_list[6], # 512n=num_repeats[6],e=csp_e,block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[6], # 512out_channels=channels_list[7], # 256kernel_size=1,stride=1)self.Bifusion1 = BiFusion(in_channels=[channels_list[3], channels_list[7]], # 512, 256out_channels=channels_list[7], # 256)self.Rep_p4 = BepC3(in_channels=channels_list[7], # 256out_channels=channels_list[7], # 256n=num_repeats[7],e=csp_e,block=block)self.reduce_layer2 = ConvBNReLU(in_channels=channels_list[7], # 256out_channels=channels_list[8], # 128kernel_size=1,stride=1)self.Bifusion2 = BiFusion(in_channels=[channels_list[2], channels_list[8]], # 256, 128out_channels=channels_list[8], # 128)self.Rep_p3 = BepC3(in_channels=channels_list[8], # 128out_channels=channels_list[8], # 128n=num_repeats[8],e=csp_e,block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[8], # 128out_channels=channels_list[8], # 128kernel_size=3,stride=2)self.Rep_n4 = BepC3(in_channels=channels_list[8] + channels_list[8], # 128 + 128out_channels=channels_list[9], # 256n=num_repeats[9],e=csp_e,block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[9], # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)self.Rep_n5 = BepC3(in_channels=channels_list[7] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[10],e=csp_e,block=block)self.downsample0 = ConvBNReLU(in_channels=channels_list[10], # 512out_channels=channels_list[10], # 512kernel_size=3,stride=2)self.Rep_n6 = BepC3(in_channels=channels_list[6] + channels_list[10], # 512 + 512out_channels=channels_list[11], # 1024n=num_repeats[11],e=csp_e,block=block)def forward(self, input):(x4, x3, x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)f_concat_layer0 = self.Bifusion0([fpn_out0, x1, x2])f_out0 = self.Rep_p5(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)f_concat_layer1 = self.Bifusion1([fpn_out1, x2, x3])f_out1 = self.Rep_p4(f_concat_layer1)fpn_out2 = self.reduce_layer2(f_out1)f_concat_layer2 = self.Bifusion2([fpn_out2, x3, x4])pan_out3 = self.Rep_p3(f_concat_layer2) # P3down_feat2 = self.downsample2(pan_out3)p_concat_layer2 = torch.cat([down_feat2, fpn_out2], 1)pan_out2 = self.Rep_n4(p_concat_layer2) # P4down_feat1 = self.downsample1(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n5(p_concat_layer1) # P5down_feat0 = self.downsample0(pan_out1)p_concat_layer0 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n6(p_concat_layer0) # P6outputs = [pan_out3, pan_out2, pan_out1, pan_out0]return outputs
10.class Lite_EffiNeck(nn.Module):
class Lite_EffiNeck(nn.Module):def __init__(self,in_channels,unified_channels,):super().__init__()self.reduce_layer0 = ConvBNHS(in_channels=in_channels[0],out_channels=unified_channels,kernel_size=1,stride=1,padding=0)self.reduce_layer1 = ConvBNHS(in_channels=in_channels[1],out_channels=unified_channels,kernel_size=1,stride=1,padding=0)self.reduce_layer2 = ConvBNHS(in_channels=in_channels[2],out_channels=unified_channels,kernel_size=1,stride=1,padding=0)# torch.nn.Upsample(size=None, scale_factor=None, mode='nearest', align_corners=None)# size :根据不同的输入类型制定的输出大小。# scale_factor :指定输出为输入的多少倍数。如果输入为 tuple ,其也要制定为 tuple 类型。# mode :可使用的上采样算法,有'nearest', 'linear', 'bilinear', 'bicubic' and 'trilinear'. 默认使用'nearest'。# align_corners :如果为True,输入的角像素将与输出张量对齐,因此将保存下来这些像素的值。仅当使用的算法为'linear', 'bilinear'or 'trilinear'时可以使用。默认设置为False。self.upsample0 = nn.Upsample(scale_factor=2, mode='nearest')self.upsample1 = nn.Upsample(scale_factor=2, mode='nearest')self.Csp_p4 = CSPBlock(in_channels=unified_channels*2,out_channels=unified_channels,kernel_size=5)self.Csp_p3 = CSPBlock(in_channels=unified_channels*2,out_channels=unified_channels,kernel_size=5)self.Csp_n3 = CSPBlock(in_channels=unified_channels*2,out_channels=unified_channels,kernel_size=5)self.Csp_n4 = CSPBlock(in_channels=unified_channels*2,out_channels=unified_channels,kernel_size=5)self.downsample2 = DPBlock(in_channel=unified_channels,out_channel=unified_channels,kernel_size=5,stride=2)self.downsample1 = DPBlock(in_channel=unified_channels,out_channel=unified_channels,kernel_size=5,stride=2)self.p6_conv_1 = DPBlock(in_channel=unified_channels,out_channel=unified_channels,kernel_size=5,stride=2)self.p6_conv_2 = DPBlock(in_channel=unified_channels,out_channel=unified_channels,kernel_size=5,stride=2)def forward(self, input):(x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0) #c5x1 = self.reduce_layer1(x1) #c4x2 = self.reduce_layer2(x2) #c3upsample_feat0 = self.upsample0(fpn_out0)f_concat_layer0 = torch.cat([upsample_feat0, x1], 1)f_out1 = self.Csp_p4(f_concat_layer0)upsample_feat1 = self.upsample1(f_out1)f_concat_layer1 = torch.cat([upsample_feat1, x2], 1)pan_out3 = self.Csp_p3(f_concat_layer1) #p3down_feat1 = self.downsample2(pan_out3)p_concat_layer1 = torch.cat([down_feat1, f_out1], 1)pan_out2 = self.Csp_n3(p_concat_layer1) #p4down_feat0 = self.downsample1(pan_out2)p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)pan_out1 = self.Csp_n4(p_concat_layer2) #p5top_features = self.p6_conv_1(fpn_out0) pan_out0 = top_features + self.p6_conv_2(pan_out1) #p6outputs = [pan_out3, pan_out2, pan_out1, pan_out0]return outputs