当前位置: 首页 > article >正文

resnetv1骨干

# 普通的卷积残差块
def apply_basic_block(
    x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None
):
    # 预设块名称前缀
    if name is None:
        name = f"v1_basic_block_{keras.backend.get_uid('v1_basic_block_')}"
    # 设置残差连接前段
    # 如果conv_shortcut为True,用点卷积切换通道,之后批次标准化,这时一般要下采样
    if conv_shortcut:
        shortcut = keras.layers.Conv2D(
            filters,
            1,
            strides=stride,
            use_bias=False,
            name=name + "_0_conv",
        )(x)
        shortcut = keras.layers.BatchNormalization(
            axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_0_bn"
        )(shortcut)
    else: # 否则不变
        shortcut = x
    # 普通卷积,strides=2时,下采样
    x = keras.layers.Conv2D(
        filters,
        kernel_size,
        padding="SAME",
        strides=stride,
        use_bias=False,
        name=name + "_1_conv",
    )(x)
    # 批次激活块
    x = keras.layers.BatchNormalization(
        axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_1_bn"
    )(x)
    x = keras.layers.Activation("relu", name=name + "_1_relu")(x)
    # 第二个普通卷积,步长为1
    x = keras.layers.Conv2D(
        filters,
        kernel_size,
        padding="SAME",
        use_bias=False,
        name=name + "_2_conv",
    )(x)
    x = keras.layers.BatchNormalization( # 批次标准化
        axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_2_bn"
    )(x)
    # 注意:残差连接前的两个残差块,都只是批次标准化处理,并没用激活函数
    # 这是因为激活函数会破坏残差的线性,因为卷积是线性的
    x = keras.layers.Add(name=name + "_add")([shortcut, x])
    # 之后经过激活函数处理
    x = keras.layers.Activation("relu", name=name + "_out")(x)
    return x
# 特殊的卷积提取块(宽--窄--宽)
def apply_block(
    x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None
):
    # 预设块前缀 v1_block_1
    if name is None:
        name = f"v1_block_{keras.backend.get_uid('v1_block')}"
    # 如果设置了conv_shortcut=True,用点卷积切换通道(4c),之后批次标准化,这时一般要下采样
    # 这是设置残差前段
    if conv_shortcut:
        shortcut = keras.layers.Conv2D(
            4 * filters,
            1,
            strides=stride,
            use_bias=False,
            name=name + "_0_conv",
        )(x)
        shortcut = keras.layers.BatchNormalization(
            axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_0_bn"
        )(shortcut)
    else: # 否则,残差前段=x(传入数据)
        shortcut = x
    # 点卷积切换通道,strides=2时,下采样
    x = keras.layers.Conv2D(
        filters, 1, strides=stride, use_bias=False, name=name + "_1_conv"
    )(x)
    # 批次激活块
    x = keras.layers.BatchNormalization(
        axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_1_bn"
    )(x)
    x = keras.layers.Activation("relu", name=name + "_1_relu")(x)
    # 普通卷积,步长采用默认1
    x = keras.layers.Conv2D(
        filters,
        kernel_size,
        padding="SAME",
        use_bias=False,
        name=name + "_2_conv",
    )(x)
    # 批次激活块
    x = keras.layers.BatchNormalization(
        axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_2_bn"
    )(x)
    x = keras.layers.Activation("relu", name=name + "_2_relu")(x)
    # 点卷积切换通道到4c
    x = keras.layers.Conv2D(
        4 * filters, 1, use_bias=False, name=name + "_3_conv"
    )(x)
    x = keras.layers.BatchNormalization( # 批次标准化
        axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_3_bn"
    )(x)
    # 残差连接,残差前不用激活函数,因为会破坏残差的线性
    x = keras.layers.Add(name=name + "_add")([shortcut, x])
    # 残差后用激活函数(这时通道是4c)
    x = keras.layers.Activation("relu", name=name + "_out")(x)
    return x
# 堆叠的残差块
def apply_stack(
    x,
    filters,
    blocks,
    stride=2,
    name=None,
    block_type="block",
    first_shortcut=True,
):
    # 设置默认名称前缀
    if name is None:
        name = "v1_stack"
    # 根据block_type的类型使用不同的提取块函数
    if block_type == "basic_block":
        block_fn = apply_basic_block # 基本卷积残差块
    elif block_type == "block":
        block_fn = apply_block # 特殊的卷积残差块
    else:
        raise ValueError(
            """`block_type` must be either "basic_block" or "block". """
            f"Received block_type={block_type}."
        )
    # 第一次特征提取,通常要下采样
    x = block_fn(
        x,
        filters,
        stride=stride,
        name=name + "_block1",
        conv_shortcut=first_shortcut,
    )
    # 之后的特征提取,步长一般是1,不进行下采样,只是残差
    for i in range(2, blocks + 1):
        x = block_fn(
            x, filters, conv_shortcut=False, name=name + "_block" + str(i)
        )
    return x
# keras_cv_export:导入当前类的路径
@keras_cv_export("keras_cv.models.ResNetBackbone")
class ResNetBackbone(Backbone): # resnet骨干
    def __init__(
        self,
        *,
        stackwise_filters, # 通道
        stackwise_blocks,
        stackwise_strides, # 步长列表
        include_rescaling, # 是否内部归一化
        input_shape=(None, None, 3), # 输入形状
        input_tensor=None, # 输入的数据
        block_type="block",
        **kwargs,
    ):
        # 模型输入
        inputs = utils.parse_model_inputs(input_shape, input_tensor) # (224,224,3)
        x = inputs # 中间变量
        # 如果要内部归一化
        if include_rescaling:
            x = keras.layers.Rescaling(1 / 255.0)(x) # 归一化
        # 第一次下采样(112,112,3)
        x = keras.layers.Conv2D(
            64, 7, strides=2, use_bias=False, padding="same", name="conv1_conv"
        )(x)
        # 批次激活块
        x = keras.layers.BatchNormalization(
            axis=BN_AXIS, epsilon=BN_EPSILON, name="conv1_bn"
        )(x)
        x = keras.layers.Activation("relu", name="conv1_relu")(x)
        # 最大池化(56,56,3)
        x = keras.layers.MaxPooling2D(
            3, strides=2, padding="same", name="pool1_pool"
        )(x)
        # 不同层级
        num_stacks = len(stackwise_filters)
        # 对应金字塔层级的特征图
        pyramid_level_inputs = {}
        # 遍历不同层级
        for stack_index in range(num_stacks):
            # 应用特征提取模块
            x = apply_stack(
                x,
                filters=stackwise_filters[stack_index],
                blocks=stackwise_blocks[stack_index], # 相同配置的块深度
                stride=stackwise_strides[stack_index],
                block_type=block_type, # 提取块的类型,根据这个选是用基本的卷积块,还是瓶颈块
                # 你看变量名称会坑死你,其实这个是说第一次如果要下采样的话,那残差前段也要跟着下采样
                # 不然你无法残差,条件就是如果block_type == "block"(特殊的卷积残差块)或者
                # stack_index > 0(基本卷积残差块)
                first_shortcut=(block_type == "block" or stack_index > 0),
                name=f"v2_stack_{stack_index}",
            )
            # 对应金字塔层级特征图
            pyramid_level_inputs[f"P{stack_index + 2}"] = (
                utils.get_tensor_input_name(x)
            )

        # Create model.
        super().__init__(inputs=inputs, outputs=x, **kwargs)
        # 设置实例属性
        self.pyramid_level_inputs = pyramid_level_inputs
        self.stackwise_filters = stackwise_filters
        self.stackwise_blocks = stackwise_blocks
        self.stackwise_strides = stackwise_strides
        self.include_rescaling = include_rescaling
        self.input_tensor = input_tensor
        self.block_type = block_type
    
    def get_config(self):
        config = super().get_config() # 获取父类的配置字典
        config.update( # 更新字典,加入了子类的配置
            {
                "stackwise_filters": self.stackwise_filters,
                "stackwise_blocks": self.stackwise_blocks,
                "stackwise_strides": self.stackwise_strides,
                "include_rescaling": self.include_rescaling,
                # Remove batch dimension from `input_shape`
                "input_shape": self.input_shape[1:],
                "input_tensor": self.input_tensor,
                "block_type": self.block_type,
            }
        )
        return config
    # 类属性(返回预设的配置)
    @classproperty
    def presets(cls):
        """Dictionary of preset names and configurations."""
        return copy.deepcopy(backbone_presets)
    # 类属性(包含权重的配置)
    @classproperty
    def presets_with_weights(cls):
        return copy.deepcopy(backbone_presets_with_weights)
# 使用自定义配置随机初始化backbone
model = ResNetBackbone(
    input_shape=(224,224,3),
    stackwise_filters=[64, 128, 256, 512], # 通道数
    stackwise_blocks=[2, 2, 2, 2], # 块深度
    stackwise_strides=[1, 2, 2, 2], # 步长
    include_rescaling=False,
)
len(model.layers)
model.pyramid_level_inputs
[model.get_layer(i).output for i in model.pyramid_level_inputs.values()]
model.summary()
input_data = tf.ones(shape=(8, 224, 224, 3))
output = model(input_data)
output.shape

# 注解,导入类的路径
@keras_cv_export("keras_cv.models.ResNet18Backbone")
class ResNet18Backbone(ResNetBackbone):
    def __new__(
        cls,
        include_rescaling=True,
        input_shape=(None, None, 3),
        input_tensor=None,
        **kwargs,
    ):
        # 把传入参数更新到kwargs里
        kwargs.update(
            {
                "include_rescaling": include_rescaling,
                "input_shape": input_shape,
                "input_tensor": input_tensor,
            }
        )
        # 获取resnet18骨干网络
        return ResNetBackbone.from_preset("resnet18", **kwargs)

    @classproperty
    def presets(cls):
        return {}

    @classproperty
    def presets_with_weights(cls):
        return {}
model1=ResNet18Backbone(input_shape=(224,224, 3))
model1.summary()
model1.pyramid_level_inputs
@keras_cv_export("keras_cv.models.ResNet34Backbone")
class ResNet34Backbone(ResNetBackbone):
    def __new__(
        cls,
        include_rescaling=True,
        input_shape=(None, None, 3),
        input_tensor=None,
        **kwargs,
    ):
        # Pack args in kwargs
        kwargs.update(
            {
                "include_rescaling": include_rescaling,
                "input_shape": input_shape,
                "input_tensor": input_tensor,
            }
        )
        return ResNetBackbone.from_preset("resnet34", **kwargs)

    @classproperty
    def presets(cls):
        """Dictionary of preset names and configurations."""
        return {}

    @classproperty
    def presets_with_weights(cls):
        """Dictionary of preset names and configurations that include
        weights."""
        return {}
@keras_cv_export("keras_cv.models.ResNet50Backbone")
class ResNet50Backbone(ResNetBackbone):
    def __new__(
        cls,
        include_rescaling=True,
        input_shape=(None, None, 3),
        input_tensor=None,
        **kwargs,
    ):
        # Pack args in kwargs
        kwargs.update(
            {
                "include_rescaling": include_rescaling,
                "input_shape": input_shape,
                "input_tensor": input_tensor,
            }
        )
        return ResNetBackbone.from_preset("resnet50", **kwargs)

    @classproperty
    def presets(cls):
        """Dictionary of preset names and configurations."""
        return {
            "resnet50_imagenet": copy.deepcopy(
                backbone_presets["resnet50_imagenet"]
            ),
        }

    @classproperty
    def presets_with_weights(cls):
        """Dictionary of preset names and configurations that include
        weights."""
        return cls.presets

@keras_cv_export("keras_cv.models.ResNet101Backbone")
class ResNet101Backbone(ResNetBackbone):
    def __new__(
        cls,
        include_rescaling=True,
        input_shape=(None, None, 3),
        input_tensor=None,
        **kwargs,
    ):
        # Pack args in kwargs
        kwargs.update(
            {
                "include_rescaling": include_rescaling,
                "input_shape": input_shape,
                "input_tensor": input_tensor,
            }
        )
        return ResNetBackbone.from_preset("resnet101", **kwargs)

    @classproperty
    def presets(cls):
        """Dictionary of preset names and configurations."""
        return {}

    @classproperty
    def presets_with_weights(cls):
        """Dictionary of preset names and configurations that include
        weights."""
        return {}
@keras_cv_export("keras_cv.models.ResNet152Backbone")
class ResNet152Backbone(ResNetBackbone):
    def __new__(
        cls,
        include_rescaling=True,
        input_shape=(None, None, 3),
        input_tensor=None,
        **kwargs,
    ):
        # Pack args in kwargs
        kwargs.update(
            {
                "include_rescaling": include_rescaling,
                "input_shape": input_shape,
                "input_tensor": input_tensor,
            }
        )
        return ResNetBackbone.from_preset("resnet152", **kwargs)

    @classproperty
    def presets(cls):
        """Dictionary of preset names and configurations."""
        return {}

    @classproperty
    def presets_with_weights(cls):
        """Dictionary of preset names and configurations that include
        weights."""
        return {}

model2=ResNet152Backbone(input_shape=(224,224,3))
len(model2.layers)
[model2.get_layer(i).output for i in model2.pyramid_level_inputs.values()]
model2.get_config()

{'name': 'res_net_backbone',
 'trainable': True,
 'stackwise_filters': [64, 128, 256, 512],
 'stackwise_blocks': [3, 8, 36, 3],
 'stackwise_strides': [1, 2, 2, 2],
 'include_rescaling': True,
 'input_shape': (224, 224, 3),
 'input_tensor': None,
 'block_type': 'block'}

http://www.kler.cn/news/358361.html

相关文章:

  • 阿里巴巴 | 推出升级版AI翻译工具:Marco MT 性能超越Google和ChatGPT
  • oracle创建用户与表空间,用户授权、以及导入dmp数据泵文件
  • Python----QT篇基础篇(一)
  • 图像中的融合
  • zotero文献管理学习
  • 柬埔寨旅游应该准备的高棉语翻译器《柬埔寨语翻译通》app语音翻译功能让你跟当地人无阻沟通交流,高棉语OCR识别技术分享
  • 桂林美景探索:SpringBoot旅游平台指南
  • 5.C++经典实例-判断输入的年份是否为闰年
  • go 中指针的执行效率比较
  • AI大模型开发架构设计(14)——基于LangChain大模型的案例架构实战
  • Windows环境apache控制台命令行启动、停止、重启httpd服务
  • 【Flutter】页面布局:线性布局(Row 和 Column)
  • mybatis针对枚举的处理的总结
  • 《Vue3 版本差异》Vue3.5+ 在组件或HTML元素绑定 ref 差异
  • (RAG 系列)重排序模型部署以及接入 fastgpt 平台
  • 【Flutter】页面布局:弹性布局(Flex)
  • Linux下的进程解析(level 2)
  • C++核心编程和桌面应用开发 第十二天(输入输出流 流对象 写文件 读文件)
  • 鸿蒙应用开发----西西购物商城(一)
  • G-Set(增长集合,Grow-Only Set)