YOLOv6-4.0部分代码阅读笔记-reppan.py

reppan.py

yolov6\models\reppan.py

目录

reppan.py

1.所需的库和模块

2.class RepPANNeck(nn.Module): 

3.class RepBiFPANNeck(nn.Module): 

4.class RepPANNeck6(nn.Module): 

5.class RepBiFPANNeck6(nn.Module): 

6.class CSPRepPANNeck(nn.Module): 

7.class CSPRepBiFPANNeck(nn.Module): 

8.class CSPRepPANNeck_P6(nn.Module): 

9.class CSPRepBiFPANNeck_P6(nn.Module): 

10.class Lite_EffiNeck(nn.Module): 


1.所需的库和模块

import torch
from torch import nn
from yolov6.layers.common import RepBlock, RepVGGBlock, BottleRep, BepC3, ConvBNReLU, Transpose, BiFusion, \MBLABlock, ConvBNHS, CSPBlock, DPBlock

2.class RepPANNeck(nn.Module): 

# _QUANT=False
class RepPANNeck(nn.Module):# RepPANNeck 模块# EfficientRep 是该模型的默认主干。# RepPANNeck 在特征融合能力和硬件效率之间取得了平衡。"""RepPANNeck ModuleEfficientRep is the default backbone of this model.RepPANNeck has the balance of feature fusion ability and hardware efficiency."""def __init__(self,channels_list=None,num_repeats=None,block=RepVGGBlock):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneself.Rep_p4 = RepBlock(in_channels=channels_list[3] + channels_list[5],out_channels=channels_list[5],n=num_repeats[5],block=block)self.Rep_p3 = RepBlock(in_channels=channels_list[2] + channels_list[6],out_channels=channels_list[6],n=num_repeats[6],block=block)self.Rep_n3 = RepBlock(in_channels=channels_list[6] + channels_list[7],out_channels=channels_list[8],n=num_repeats[7],block=block)self.Rep_n4 = RepBlock(in_channels=channels_list[5] + channels_list[9],out_channels=channels_list[10],n=num_repeats[8],block=block)self.reduce_layer0 = ConvBNReLU(in_channels=channels_list[4],out_channels=channels_list[5],kernel_size=1,stride=1)self.upsample0 = Transpose(in_channels=channels_list[5],out_channels=channels_list[5],)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[5],out_channels=channels_list[6],kernel_size=1,stride=1)self.upsample1 = Transpose(in_channels=channels_list[6],out_channels=channels_list[6])self.downsample2 = ConvBNReLU(in_channels=channels_list[6],out_channels=channels_list[7],kernel_size=3,stride=2)self.downsample1 = ConvBNReLU(in_channels=channels_list[8],out_channels=channels_list[9],kernel_size=3,stride=2)# 它用于在模型的上采样(upsample)操作后插入量化操作,以支持量化神经网络。量化是一种模型优化技术,可以减少模型的大小和计算需求,同时保持模型的精度,这对于部署到资源受限的设备(如移动设备或嵌入式系统)非常有用。# self :类的实例。# num_bits :量化时使用的位数。# calib_method :校准方法,用于确定量化参数。def upsample_enable_quant(self, num_bits, calib_method):    # upsample_enable_quant上采样启用量化# 在上采样后插入 fakequantprint("Insert fakequant after upsample")# 在 upsample 操作后插入 fakequant 以构建 TensorRT 引擎# Insert fakequant after upsample op to build TensorRT engine# 量化工具箱 pytorch_quantization 通过提供一个方便的 PyTorch 库来补充 TensorRT ,该库有助于生成可优化的 QAT 模型。该工具包提供了一个 API 来自动或手动为 QAT 或 PTQ 准备模型。# API 的核心是 TensorQuantizer 模块,它可以量化、伪量化或收集张量的统计信息。它与 QuantDescriptor 一起使用,后者描述了如何量化张量。在 TensorQuantizer 之上的是量化模块,这些模块被设计为 PyTorch 全精度模块的替代品。# 这些是使用 TensorQuantizer 对模块的权重和输入进行伪量化或收集统计信息的方便模块。from pytorch_quantization import nn as quant_nnfrom pytorch_quantization.tensor_quant import QuantDescriptor# QuantDescriptor 定义了张量应该如何量化。# 创建一个 QuantDescriptor 实例,用于描述量化参数,包括 位数 和 校准方法 。conv2d_input_default_desc = QuantDescriptor(num_bits=num_bits, calib_method=calib_method)# 创建一个 TensorQuantizer 实例,用于量化上采样特征0,并将量化描述符 conv2d_input_default_desc 传递给它。self.upsample_feat0_quant = quant_nn.TensorQuantizer(conv2d_input_default_desc)# 创建另一个 TensorQuantizer 实例,用于量化上采样特征1,同样使用相同的量化描述符。self.upsample_feat1_quant = quant_nn.TensorQuantizer(conv2d_input_default_desc)# global _QUANT# 设置一个标志 _QUANT 为 True ,表明模型已经启用了量化。self._QUANT = True# 定义了前向传播方法,接受一个参数 input ,它是一个包含三个特征层的元组 (x2, x1, x0) 。def forward(self, input):(x2, x1, x0) = input# 将最高级别的特征层 x0 通过一个降维层 reduce_layer0 进行处理。fpn_out0 = self.reduce_layer0(x0)# 将降维后的特征 fpn_out0 通过上采样层 upsample0 进行上采样。upsample_feat0 = self.upsample0(fpn_out0)# 检查模型是否启用了量化。if hasattr(self, '_QUANT') and self._QUANT is True:# 如果启用了量化,则对上采样后的特征 upsample_feat0 应用量化。upsample_feat0 = self.upsample_feat0_quant(upsample_feat0)# 将上采样后的特征 upsample_feat0 和次高级别特征层 x1 在通道维度上进行拼接。f_concat_layer0 = torch.cat([upsample_feat0, x1], 1)# 对拼接后的特征 f_concat_layer0 应用重复卷积操作 Rep_p4 。f_out0 = self.Rep_p4(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)upsample_feat1 = self.upsample1(fpn_out1)if hasattr(self, '_QUANT') and self._QUANT is True:upsample_feat1 = self.upsample_feat1_quant(upsample_feat1)f_concat_layer1 = torch.cat([upsample_feat1, x2], 1)pan_out2 = self.Rep_p3(f_concat_layer1)down_feat1 = self.downsample2(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n3(p_concat_layer1)down_feat0 = self.downsample1(pan_out1)p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n4(p_concat_layer2)# 将所有输出特征层组合成一个列表。outputs = [pan_out2, pan_out1, pan_out0]# 返回输出特征层列表。return outputs

3.class RepBiFPANNeck(nn.Module): 

class RepBiFPANNeck(nn.Module):# RepBiFPANNeck 模块"""RepBiFPANNeck Module"""# [64, 128, 256, 512, 1024] # [256, 128, 128, 256, 256, 512]def __init__(self,channels_list=None,num_repeats=None,block=RepVGGBlock):super().__init__()assert channels_list is not Noneassert num_repeats is not None  self.reduce_layer0 = ConvBNReLU(in_channels=channels_list[4], # 1024out_channels=channels_list[5], # 256kernel_size=1,stride=1)self.Bifusion0 = BiFusion(in_channels=[channels_list[3], channels_list[2]], # 512, 256out_channels=channels_list[5], # 256)self.Rep_p4 = RepBlock(in_channels=channels_list[5], # 256out_channels=channels_list[5], # 256n=num_repeats[5],block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[5], # 256out_channels=channels_list[6], # 128kernel_size=1,stride=1)self.Bifusion1 = BiFusion(in_channels=[channels_list[2], channels_list[1]], # 256, 128out_channels=channels_list[6], # 128)self.Rep_p3 = RepBlock(in_channels=channels_list[6], # 128out_channels=channels_list[6], # 128n=num_repeats[6],block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[6], # 128out_channels=channels_list[7], # 128kernel_size=3,stride=2)self.Rep_n3 = RepBlock(in_channels=channels_list[6] + channels_list[7], # 128 + 128out_channels=channels_list[8], # 256n=num_repeats[7],block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[8], # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)self.Rep_n4 = RepBlock(in_channels=channels_list[5] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[8],block=block)def forward(self, input):(x3, x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)f_concat_layer0 = self.Bifusion0([fpn_out0, x1, x2])f_out0 = self.Rep_p4(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)f_concat_layer1 = self.Bifusion1([fpn_out1, x2, x3])pan_out2 = self.Rep_p3(f_concat_layer1)down_feat1 = self.downsample2(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n3(p_concat_layer1)down_feat0 = self.downsample1(pan_out1)p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n4(p_concat_layer2)outputs = [pan_out2, pan_out1, pan_out0]return outputs

4.class RepPANNeck6(nn.Module): 

class RepPANNeck6(nn.Module):# RepPANNeck+P6 模块# EfficientRep 是该模型的默认主干。# RepPANNeck 在特征融合能力和硬件效率之间取得了平衡。"""RepPANNeck+P6 ModuleEfficientRep is the default backbone of this model.RepPANNeck has the balance of feature fusion ability and hardware efficiency."""# [64, 128, 256, 512, 768, 1024]   # [512, 256, 128, 256, 512, 1024]def __init__(self,channels_list=None,num_repeats=None,block=RepVGGBlock):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneself.reduce_layer0 = ConvBNReLU(in_channels=channels_list[5], # 1024out_channels=channels_list[6], # 512kernel_size=1,stride=1)self.upsample0 = Transpose(in_channels=channels_list[6],  # 512out_channels=channels_list[6], # 512)self.Rep_p5 = RepBlock(in_channels=channels_list[4] + channels_list[6], # 768 + 512out_channels=channels_list[6], # 512n=num_repeats[6],block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[6],  # 512out_channels=channels_list[7], # 256kernel_size=1,stride=1)self.upsample1 = Transpose(in_channels=channels_list[7], # 256out_channels=channels_list[7] # 256)self.Rep_p4 = RepBlock(in_channels=channels_list[3] + channels_list[7], # 512 + 256out_channels=channels_list[7], # 256n=num_repeats[7],block=block)self.reduce_layer2 = ConvBNReLU(in_channels=channels_list[7],  # 256out_channels=channels_list[8], # 128kernel_size=1,stride=1)self.upsample2 = Transpose(in_channels=channels_list[8], # 128out_channels=channels_list[8] # 128)self.Rep_p3 = RepBlock(in_channels=channels_list[2] + channels_list[8], # 256 + 128out_channels=channels_list[8], # 128n=num_repeats[8],block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[8],  # 128out_channels=channels_list[8], # 128kernel_size=3,stride=2)self.Rep_n4 = RepBlock(in_channels=channels_list[8] + channels_list[8], # 128 + 128out_channels=channels_list[9], # 256n=num_repeats[9],block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[9],  # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)self.Rep_n5 = RepBlock(in_channels=channels_list[7] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[10],block=block)self.downsample0 = ConvBNReLU(in_channels=channels_list[10],  # 512out_channels=channels_list[10], # 512kernel_size=3,stride=2)self.Rep_n6 = RepBlock(in_channels=channels_list[6] + channels_list[10], # 512 + 512out_channels=channels_list[11], # 1024n=num_repeats[11],block=block)def forward(self, input):(x3, x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)upsample_feat0 = self.upsample0(fpn_out0)f_concat_layer0 = torch.cat([upsample_feat0, x1], 1)f_out0 = self.Rep_p5(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)upsample_feat1 = self.upsample1(fpn_out1)f_concat_layer1 = torch.cat([upsample_feat1, x2], 1)f_out1 = self.Rep_p4(f_concat_layer1)fpn_out2 = self.reduce_layer2(f_out1)upsample_feat2 = self.upsample2(fpn_out2)f_concat_layer2 = torch.cat([upsample_feat2, x3], 1)pan_out3 = self.Rep_p3(f_concat_layer2) # P3down_feat2 = self.downsample2(pan_out3)p_concat_layer2 = torch.cat([down_feat2, fpn_out2], 1)pan_out2 = self.Rep_n4(p_concat_layer2) # P4down_feat1 = self.downsample1(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n5(p_concat_layer1) # P5down_feat0 = self.downsample0(pan_out1)p_concat_layer0 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n6(p_concat_layer0) # P6outputs = [pan_out3, pan_out2, pan_out1, pan_out0]return outputs

5.class RepBiFPANNeck6(nn.Module): 

class RepBiFPANNeck6(nn.Module):# RepBiFPANNeck_P6 模块"""RepBiFPANNeck_P6 Module"""# [64, 128, 256, 512, 768, 1024]   # [512, 256, 128, 256, 512, 1024]def __init__(self,channels_list=None,num_repeats=None,block=RepVGGBlock):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneself.reduce_layer0 = ConvBNReLU(in_channels=channels_list[5], # 1024out_channels=channels_list[6], # 512kernel_size=1,stride=1)self.Bifusion0 = BiFusion(in_channels=[channels_list[4], channels_list[6]], # 768, 512out_channels=channels_list[6], # 512)self.Rep_p5 = RepBlock(in_channels=channels_list[6], # 512out_channels=channels_list[6], # 512n=num_repeats[6],block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[6],  # 512out_channels=channels_list[7], # 256kernel_size=1,stride=1)self.Bifusion1 = BiFusion(in_channels=[channels_list[3], channels_list[7]], # 512, 256out_channels=channels_list[7], # 256)self.Rep_p4 = RepBlock(in_channels=channels_list[7], # 256out_channels=channels_list[7], # 256n=num_repeats[7],block=block)self.reduce_layer2 = ConvBNReLU(in_channels=channels_list[7],  # 256out_channels=channels_list[8], # 128kernel_size=1,stride=1)self.Bifusion2 = BiFusion(in_channels=[channels_list[2], channels_list[8]], # 256, 128out_channels=channels_list[8], # 128)self.Rep_p3 = RepBlock(in_channels=channels_list[8], # 128out_channels=channels_list[8], # 128n=num_repeats[8],block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[8],  # 128out_channels=channels_list[8], # 128kernel_size=3,stride=2)self.Rep_n4 = RepBlock(in_channels=channels_list[8] + channels_list[8], # 128 + 128out_channels=channels_list[9], # 256n=num_repeats[9],block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[9],  # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)self.Rep_n5 = RepBlock(in_channels=channels_list[7] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[10],block=block)self.downsample0 = ConvBNReLU(in_channels=channels_list[10],  # 512out_channels=channels_list[10], # 512kernel_size=3,stride=2)self.Rep_n6 = RepBlock(in_channels=channels_list[6] + channels_list[10], # 512 + 512out_channels=channels_list[11], # 1024n=num_repeats[11],block=block)def forward(self, input):(x4, x3, x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)f_concat_layer0 = self.Bifusion0([fpn_out0, x1, x2])f_out0 = self.Rep_p5(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)f_concat_layer1 = self.Bifusion1([fpn_out1, x2, x3])f_out1 = self.Rep_p4(f_concat_layer1)fpn_out2 = self.reduce_layer2(f_out1)f_concat_layer2 = self.Bifusion2([fpn_out2, x3, x4])pan_out3 = self.Rep_p3(f_concat_layer2) # P3down_feat2 = self.downsample2(pan_out3)p_concat_layer2 = torch.cat([down_feat2, fpn_out2], 1)pan_out2 = self.Rep_n4(p_concat_layer2) # P4down_feat1 = self.downsample1(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n5(p_concat_layer1) # P5down_feat0 = self.downsample0(pan_out1)p_concat_layer0 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n6(p_concat_layer0) # P6outputs = [pan_out3, pan_out2, pan_out1, pan_out0]return outputs

6.class CSPRepPANNeck(nn.Module): 

class CSPRepPANNeck(nn.Module):# CSPRepPANNeck 模块。"""CSPRepPANNeck module."""def __init__(self,channels_list=None,num_repeats=None,block=BottleRep,csp_e=float(1)/2):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneself.Rep_p4 = BepC3(in_channels=channels_list[3] + channels_list[5], # 512 + 256out_channels=channels_list[5], # 256n=num_repeats[5],e=csp_e,block=block)self.Rep_p3 = BepC3(in_channels=channels_list[2] + channels_list[6], # 256 + 128out_channels=channels_list[6], # 128n=num_repeats[6],e=csp_e,block=block)self.Rep_n3 = BepC3(in_channels=channels_list[6] + channels_list[7], # 128 + 128out_channels=channels_list[8], # 256n=num_repeats[7],e=csp_e,block=block)self.Rep_n4 = BepC3(in_channels=channels_list[5] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[8],e=csp_e,block=block)self.reduce_layer0 = ConvBNReLU(in_channels=channels_list[4], # 1024out_channels=channels_list[5], # 256kernel_size=1,stride=1)self.upsample0 = Transpose(in_channels=channels_list[5], # 256out_channels=channels_list[5], # 256)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[5], # 256out_channels=channels_list[6], # 128kernel_size=1,stride=1)self.upsample1 = Transpose(in_channels=channels_list[6], # 128out_channels=channels_list[6] # 128)self.downsample2 = ConvBNReLU(in_channels=channels_list[6], # 128out_channels=channels_list[7], # 128kernel_size=3,stride=2)self.downsample1 = ConvBNReLU(in_channels=channels_list[8], # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)def forward(self, input):(x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)upsample_feat0 = self.upsample0(fpn_out0)f_concat_layer0 = torch.cat([upsample_feat0, x1], 1)f_out0 = self.Rep_p4(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)upsample_feat1 = self.upsample1(fpn_out1)f_concat_layer1 = torch.cat([upsample_feat1, x2], 1)pan_out2 = self.Rep_p3(f_concat_layer1)down_feat1 = self.downsample2(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n3(p_concat_layer1)down_feat0 = self.downsample1(pan_out1)p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n4(p_concat_layer2)outputs = [pan_out2, pan_out1, pan_out0]return outputs

7.class CSPRepBiFPANNeck(nn.Module): 

class CSPRepBiFPANNeck(nn.Module): # CSPRepBiFPANNeck 模块。"""CSPRepBiFPANNeck module. """def __init__(self,channels_list=None,num_repeats=None,block=BottleRep,csp_e=float(1)/2,stage_block_type="BepC3"):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneif stage_block_type == "BepC3":stage_block = BepC3elif stage_block_type == "MBLABlock":stage_block = MBLABlockelse:raise NotImplementedErrorself.reduce_layer0 = ConvBNReLU(in_channels=channels_list[4], # 1024 out_channels=channels_list[5], # 256 kernel_size=1,stride=1)self.Bifusion0 = BiFusion(in_channels=[channels_list[3], channels_list[2]], # 512, 256out_channels=channels_list[5], # 256)self.Rep_p4 = stage_block(in_channels=channels_list[5], # 256out_channels=channels_list[5], # 256n=num_repeats[5],e=csp_e,block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[5], # 256out_channels=channels_list[6], # 128 kernel_size=1,stride=1)self.Bifusion1 = BiFusion(in_channels=[channels_list[2], channels_list[1]], # 256, 128out_channels=channels_list[6], # 128)self.Rep_p3 = stage_block(in_channels=channels_list[6], # 128out_channels=channels_list[6], # 128 n=num_repeats[6],e=csp_e,block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[6], # 128 out_channels=channels_list[7], # 128 kernel_size=3,stride=2)self.Rep_n3 = stage_block(in_channels=channels_list[6] + channels_list[7], # 128 + 128 out_channels=channels_list[8], # 256n=num_repeats[7],e=csp_e,block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[8], # 256 out_channels=channels_list[9], # 256 kernel_size=3,stride=2)self.Rep_n4 = stage_block(in_channels=channels_list[5] + channels_list[9], # 256 + 256 out_channels=channels_list[10], # 512 n=num_repeats[8],e=csp_e,block=block)def forward(self, input):(x3, x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)f_concat_layer0 = self.Bifusion0([fpn_out0, x1, x2])f_out0 = self.Rep_p4(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)f_concat_layer1 = self.Bifusion1([fpn_out1, x2, x3])pan_out2 = self.Rep_p3(f_concat_layer1)down_feat1 = self.downsample2(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n3(p_concat_layer1)down_feat0 = self.downsample1(pan_out1)p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n4(p_concat_layer2)outputs = [pan_out2, pan_out1, pan_out0]return outputs

8.class CSPRepPANNeck_P6(nn.Module): 

class CSPRepPANNeck_P6(nn.Module):# CSPRepPANNeck_P6 模块"""CSPRepPANNeck_P6 Module"""# [64, 128, 256, 512, 768, 1024]   # [512, 256, 128, 256, 512, 1024]def __init__(self,channels_list=None,num_repeats=None,block=BottleRep,csp_e=float(1)/2):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneself.reduce_layer0 = ConvBNReLU(in_channels=channels_list[5], # 1024out_channels=channels_list[6], # 512kernel_size=1,stride=1)self.upsample0 = Transpose(in_channels=channels_list[6],  # 512out_channels=channels_list[6], # 512)self.Rep_p5 = BepC3(in_channels=channels_list[4] + channels_list[6], # 768 + 512out_channels=channels_list[6], # 512n=num_repeats[6],e=csp_e,block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[6],  # 512out_channels=channels_list[7], # 256kernel_size=1,stride=1)self.upsample1 = Transpose(in_channels=channels_list[7], # 256out_channels=channels_list[7] # 256)self.Rep_p4 = BepC3(in_channels=channels_list[3] + channels_list[7], # 512 + 256out_channels=channels_list[7], # 256n=num_repeats[7],e=csp_e,block=block)self.reduce_layer2 = ConvBNReLU(in_channels=channels_list[7],  # 256out_channels=channels_list[8], # 128kernel_size=1,stride=1)self.upsample2 = Transpose(in_channels=channels_list[8], # 128out_channels=channels_list[8] # 128)self.Rep_p3 = BepC3(in_channels=channels_list[2] + channels_list[8], # 256 + 128out_channels=channels_list[8], # 128n=num_repeats[8],e=csp_e,block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[8],  # 128out_channels=channels_list[8], # 128kernel_size=3,stride=2)self.Rep_n4 = BepC3(in_channels=channels_list[8] + channels_list[8], # 128 + 128out_channels=channels_list[9], # 256n=num_repeats[9],e=csp_e,block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[9],  # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)self.Rep_n5 = BepC3(in_channels=channels_list[7] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[10],e=csp_e,block=block)self.downsample0 = ConvBNReLU(in_channels=channels_list[10],  # 512out_channels=channels_list[10], # 512kernel_size=3,stride=2)self.Rep_n6 = BepC3(in_channels=channels_list[6] + channels_list[10], # 512 + 512out_channels=channels_list[11], # 1024n=num_repeats[11],e=csp_e,block=block)def forward(self, input):(x3, x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)upsample_feat0 = self.upsample0(fpn_out0)f_concat_layer0 = torch.cat([upsample_feat0, x1], 1)f_out0 = self.Rep_p5(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)upsample_feat1 = self.upsample1(fpn_out1)f_concat_layer1 = torch.cat([upsample_feat1, x2], 1)f_out1 = self.Rep_p4(f_concat_layer1)fpn_out2 = self.reduce_layer2(f_out1)upsample_feat2 = self.upsample2(fpn_out2)f_concat_layer2 = torch.cat([upsample_feat2, x3], 1)pan_out3 = self.Rep_p3(f_concat_layer2) # P3down_feat2 = self.downsample2(pan_out3)p_concat_layer2 = torch.cat([down_feat2, fpn_out2], 1)pan_out2 = self.Rep_n4(p_concat_layer2) # P4down_feat1 = self.downsample1(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n5(p_concat_layer1) # P5down_feat0 = self.downsample0(pan_out1)p_concat_layer0 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n6(p_concat_layer0) # P6outputs = [pan_out3, pan_out2, pan_out1, pan_out0]return outputs

9.class CSPRepBiFPANNeck_P6(nn.Module): 

class CSPRepBiFPANNeck_P6(nn.Module):# CSPRepBiFPANNeck_P6 模块"""CSPRepBiFPANNeck_P6 Module"""# [64, 128, 256, 512, 768, 1024]   # [512, 256, 128, 256, 512, 1024]def __init__(self,channels_list=None,num_repeats=None,block=BottleRep,csp_e=float(1)/2):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneself.reduce_layer0 = ConvBNReLU(in_channels=channels_list[5], # 1024out_channels=channels_list[6], # 512kernel_size=1,stride=1)self.Bifusion0 = BiFusion(in_channels=[channels_list[4], channels_list[6]], # 768, 512out_channels=channels_list[6], # 512)self.Rep_p5 = BepC3(in_channels=channels_list[6], # 512out_channels=channels_list[6], # 512n=num_repeats[6],e=csp_e,block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[6],  # 512out_channels=channels_list[7], # 256kernel_size=1,stride=1)self.Bifusion1 = BiFusion(in_channels=[channels_list[3], channels_list[7]], # 512, 256out_channels=channels_list[7], # 256)self.Rep_p4 = BepC3(in_channels=channels_list[7], # 256out_channels=channels_list[7], # 256n=num_repeats[7],e=csp_e,block=block)self.reduce_layer2 = ConvBNReLU(in_channels=channels_list[7],  # 256out_channels=channels_list[8], # 128kernel_size=1,stride=1)self.Bifusion2 = BiFusion(in_channels=[channels_list[2], channels_list[8]], # 256, 128out_channels=channels_list[8], # 128)self.Rep_p3 = BepC3(in_channels=channels_list[8], # 128out_channels=channels_list[8], # 128n=num_repeats[8],e=csp_e,block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[8],  # 128out_channels=channels_list[8], # 128kernel_size=3,stride=2)self.Rep_n4 = BepC3(in_channels=channels_list[8] + channels_list[8], # 128 + 128out_channels=channels_list[9], # 256n=num_repeats[9],e=csp_e,block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[9],  # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)self.Rep_n5 = BepC3(in_channels=channels_list[7] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[10],e=csp_e,block=block)self.downsample0 = ConvBNReLU(in_channels=channels_list[10],  # 512out_channels=channels_list[10], # 512kernel_size=3,stride=2)self.Rep_n6 = BepC3(in_channels=channels_list[6] + channels_list[10], # 512 + 512out_channels=channels_list[11], # 1024n=num_repeats[11],e=csp_e,block=block)def forward(self, input):(x4, x3, x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0)f_concat_layer0 = self.Bifusion0([fpn_out0, x1, x2])f_out0 = self.Rep_p5(f_concat_layer0)fpn_out1 = self.reduce_layer1(f_out0)f_concat_layer1 = self.Bifusion1([fpn_out1, x2, x3])f_out1 = self.Rep_p4(f_concat_layer1)fpn_out2 = self.reduce_layer2(f_out1)f_concat_layer2 = self.Bifusion2([fpn_out2, x3, x4])pan_out3 = self.Rep_p3(f_concat_layer2) # P3down_feat2 = self.downsample2(pan_out3)p_concat_layer2 = torch.cat([down_feat2, fpn_out2], 1)pan_out2 = self.Rep_n4(p_concat_layer2) # P4down_feat1 = self.downsample1(pan_out2)p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)pan_out1 = self.Rep_n5(p_concat_layer1) # P5down_feat0 = self.downsample0(pan_out1)p_concat_layer0 = torch.cat([down_feat0, fpn_out0], 1)pan_out0 = self.Rep_n6(p_concat_layer0) # P6outputs = [pan_out3, pan_out2, pan_out1, pan_out0]return outputs

10.class Lite_EffiNeck(nn.Module): 

class Lite_EffiNeck(nn.Module):def __init__(self,in_channels,unified_channels,):super().__init__()self.reduce_layer0 = ConvBNHS(in_channels=in_channels[0],out_channels=unified_channels,kernel_size=1,stride=1,padding=0)self.reduce_layer1 = ConvBNHS(in_channels=in_channels[1],out_channels=unified_channels,kernel_size=1,stride=1,padding=0)self.reduce_layer2 = ConvBNHS(in_channels=in_channels[2],out_channels=unified_channels,kernel_size=1,stride=1,padding=0)# torch.nn.Upsample(size=None, scale_factor=None, mode='nearest', align_corners=None)# size :根据不同的输入类型制定的输出大小。# scale_factor :指定输出为输入的多少倍数。如果输入为 tuple ,其也要制定为 tuple 类型。# mode :可使用的上采样算法,有'nearest', 'linear', 'bilinear', 'bicubic' and 'trilinear'. 默认使用'nearest'。# align_corners :如果为True,输入的角像素将与输出张量对齐,因此将保存下来这些像素的值。仅当使用的算法为'linear', 'bilinear'or 'trilinear'时可以使用。默认设置为False。self.upsample0 = nn.Upsample(scale_factor=2, mode='nearest')self.upsample1 = nn.Upsample(scale_factor=2, mode='nearest')self.Csp_p4 = CSPBlock(in_channels=unified_channels*2,out_channels=unified_channels,kernel_size=5)self.Csp_p3 = CSPBlock(in_channels=unified_channels*2,out_channels=unified_channels,kernel_size=5)self.Csp_n3 = CSPBlock(in_channels=unified_channels*2,out_channels=unified_channels,kernel_size=5)self.Csp_n4 = CSPBlock(in_channels=unified_channels*2,out_channels=unified_channels,kernel_size=5)self.downsample2 = DPBlock(in_channel=unified_channels,out_channel=unified_channels,kernel_size=5,stride=2)self.downsample1 = DPBlock(in_channel=unified_channels,out_channel=unified_channels,kernel_size=5,stride=2)self.p6_conv_1 = DPBlock(in_channel=unified_channels,out_channel=unified_channels,kernel_size=5,stride=2)self.p6_conv_2 = DPBlock(in_channel=unified_channels,out_channel=unified_channels,kernel_size=5,stride=2)def forward(self, input):(x2, x1, x0) = inputfpn_out0 = self.reduce_layer0(x0) #c5x1 = self.reduce_layer1(x1)       #c4x2 = self.reduce_layer2(x2)       #c3upsample_feat0 = self.upsample0(fpn_out0)f_concat_layer0 = torch.cat([upsample_feat0, x1], 1)f_out1 = self.Csp_p4(f_concat_layer0)upsample_feat1 = self.upsample1(f_out1)f_concat_layer1 = torch.cat([upsample_feat1, x2], 1)pan_out3 = self.Csp_p3(f_concat_layer1) #p3down_feat1 = self.downsample2(pan_out3)p_concat_layer1 = torch.cat([down_feat1, f_out1], 1)pan_out2 = self.Csp_n3(p_concat_layer1)  #p4down_feat0 = self.downsample1(pan_out2)p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)pan_out1 = self.Csp_n4(p_concat_layer2)  #p5top_features = self.p6_conv_1(fpn_out0)    pan_out0 = top_features + self.p6_conv_2(pan_out1)  #p6outputs = [pan_out3, pan_out2, pan_out1, pan_out0]return outputs

 

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.xdnf.cn/news/8820.html

如若内容造成侵权/违法违规/事实不符,请联系一条长河网进行投诉反馈,一经查实,立即删除!

相关文章

leetcode 693.交替位二进制数

1.题目要求&#xff1a; 2.题目代码: class Solution { public:bool hasAlternatingBits(int n) {int num n;//设置数组存入二进制位vector<int> array;while(num){array.push_back(num % 2); num num / 2;}//把数组颠倒就能得到此数真正二进制位reverse(array.begin…

IP协议知识点总结

IP协议主要分为三个 1. 地址管理 每个网络上的设备, 要能分配一个唯一的地址 2. 路由选择 小A 给小B 发消息, 具体应该走什么路线 3. 地址管理 IP 地址. 本质上是一个 32 位的整数 通常将, 32 位的整数使用点分十进制来表示, 如 192.168.1.1 一共可以表示 42 亿 9 千万个地址…

【重学 MySQL】八十二、深入探索 CASE 语句的应用

【重学 MySQL】八十二、深入探索 CASE 语句的应用 CASE语句的两种形式CASE语句的应用场景数据分类动态排序条件计算在 SELECT 子句中使用在 WHERE子句中使用在 ORDER BY 子句中使用 注意事项 在MySQL中&#xff0c;CASE 语句提供了一种强大的方式来实现条件分支逻辑&#xff0c…

机器学习1_机器学习定义——MOOC

一、机器学习定义 定义一 1959年Arthur Samuel提出机器学习的定义&#xff1a; Machine Learning is Fields of study that gives computers the ability to learn without being explicitly programmed. 译文&#xff1a;机器学习是这样的领域&#xff0c;它赋予计算机学习的…

充电桩--OCPP 充电通讯协议介绍

一、OCPP协议介绍 OCPP的全称是 Open Charge Point Protocol 即开放充电点协议&#xff0c; 它是免费开放的协议&#xff0c;该协议由位于荷兰的组织 OCA&#xff08;开放充电联盟&#xff09;进行制定。Open Charge Point Protocol (OCPP) 开放充电点协议用于充电站(CS)和任何…

如何制作公司小程序

我是【码云数智】平台的黄导&#xff0c;今天分享&#xff1a;如何制作公司小程序 企业小程序怎么制作&#xff0c;企业小程序制作不仅成为了连接消费者与品牌的桥梁&#xff0c;更是企业数字化转型的重要一环。 01、小程序制作流程 02、微信小程序开发多少钱 03、微信小程…

明道云正式发布国际品牌Nocoly

在2024年明道云伙伴大会上&#xff0c;明道云正式发布了其国际品牌Nocoly以及国际版产品Nocoly HAP。这标志着公司正式开启了海外业务。明道云的海外业务由全资拥有的Nocoly.com Limited经营&#xff0c;该公司注册在香港特别行政区。总部位于上海的明道云已经将围绕HAP超级应用…

如何构建一个可扩展的测试自动化框架?

以下为作者观点&#xff1a; 假设你是测试自动化方面的新手&#xff0c;想参与构建一个框架。在这种情况下&#xff0c;重要的是要了解框架所需的组件&#xff0c;以及它们是如何组合的。思考项目的具体需求和目标&#xff0c;以及可能遇到的困难和挑战。 假如你是一个测试架…

C++builder中的人工智能(11):双曲正切激活函数(ANN函数)?

在这篇文章中&#xff0c;我们将探讨双曲正切函数&#xff08;tanh&#xff09;是什么&#xff0c;以及如何在C中使用这个函数。让我们来回答这些问题。 在AI中激活函数意味着什么&#xff1f; 激活函数&#xff08;phi()&#xff09;&#xff0c;也称为转移函数或阈值函数&a…

基于SSM+VUE宠物医院后台管理系统JAVA|VUE|Springboot计算机毕业设计源代码+数据库+LW文档+开题报告+答辩稿+部署教+代码讲解

源代码数据库LW文档&#xff08;1万字以上&#xff09;开题报告答辩稿 部署教程代码讲解代码时间修改教程 一、开发工具、运行环境、开发技术 开发工具 1、操作系统&#xff1a;Window操作系统 2、开发工具&#xff1a;IntelliJ IDEA或者Eclipse 3、数据库存储&#xff1a…

二、SSM框架制作CRM系统案例

一、搭建框架 1、首先创建下面的目录结构 2、添加相关依赖&#xff1a; <?xml version"1.0" encoding"UTF-8"?> <project xmlns"http://maven.apache.org/POM/4.0.0"xmlns:xsi"http://www.w3.org/2001/XMLSchema-inst…

【GPTs】Email Responder Pro:高效生成专业回复邮件

博客主页&#xff1a; [小ᶻZ࿆] 本文专栏: AIGC | GPTs应用实例 文章目录 &#x1f4af;GPTs指令&#x1f4af;前言&#x1f4af;Email Responder Pro主要功能适用场景优点缺点 &#x1f4af;小结 &#x1f4af;GPTs指令 Email Craft is a specialized assistant for cra…

知识课堂之域名系统中实现动态代理

怎么在域名系统中解析动态ip&#xff0c;这一直是一个需要解决的问题&#xff0c;人们对与网络的稳定连接与灵活运用已经成为生活和工作中不可或缺的一部分&#xff0c;因此这样的问题的解决迫在眉睫。 大家对于动态ip是什么&#xff0c;应该都有所了解了&#xff0c;所谓的动…

【Go语言】| 第1课:Golang安装+环境配置+Goland下载

&#x1f60e; 作者介绍&#xff1a;我是程序员洲洲&#xff0c;一个热爱写作的非著名程序员。CSDN全栈优质领域创作者、华为云博客社区云享专家、阿里云博客社区专家博主。 &#x1f913; 同时欢迎大家关注其他专栏&#xff0c;我将分享Web前后端开发、人工智能、机器学习、深…

程序猿要失业了,一行代码没写,1小时嘴搓了一个图片分割插件(好看又好用)

如题&#xff0c;一行代码没写&#xff0c;使用 AI 编程工具实现了一个浏览器图片分割插件的开发&#xff0c;先看效果吧&#xff08; Chrome商店上架审核中~ &#xff09; 支持点击&#xff0c;拖拽&#xff0c;直接粘贴&#xff0c;还支持预览&#xff0c;次数统计&#xff0…

基于SpringBoot+Vue实现新零售商城系统

作者主页&#xff1a;编程千纸鹤 作者简介&#xff1a;Java领域优质创作者、CSDN博客专家 、CSDN内容合伙人、掘金特邀作者、阿里云博客专家、51CTO特邀作者、多年架构师设计经验、多年校企合作经验&#xff0c;被多个学校常年聘为校外企业导师&#xff0c;指导学生毕业设计并参…

【湖南】《湖南省省直单位政府投资信息化项目预算编制与财政评审工作指南(试行)》湘财办〔2024〕10号-省市费用标准解读系列06

2024年4月12日&#xff0c;湖南省财政厅发布实施《湖南省省直单位政府投资信息化项目预算编制与财政评审工作指南&#xff08;试行&#xff09;》湘财办〔2024〕10号&#xff08;以下简称“10号文”&#xff09;&#xff0c;该文件旨在指导提高湖南省直单位政府投资信息化项目预…

攻防靶场(28):通过SNMP进行信息收集 JOY

目录 1.侦查 1.1 获取目标网络信息&#xff1a;IP地址 1.2 主动扫描&#xff1a;扫描IP地址块 1.3 收集受害者主机信息&#xff1a;软件 2. 数据窃取 2.1 通过备用协议窃取&#xff1a;通过未加密的非C2协议窃取 2.2 通过备用协议窃取&#xff1a;通过未加密的非C2协议窃取 3. …

DCDC-LLC谐振电路Q值与系统增益变化相反的原因

1.谐振电路的Q值定义 LLC电路的Q值定义: 它表述的是整个电路的能量存储与耗散的关系。损耗越小&#xff0c;Q值越大&#xff0c;损耗越大&#xff0c;Q值越小。 Q的另一种写法是&#xff1a; 这个公式来由&#xff0c;因为谐振频率&#xff1a; 所以&#xff1a; 所以&#…