网络很奇怪,主要是学习操作
import keras.backend as Kfrom keras.layers import Input, Conv2D, BatchNormalization, MaxPooling2D,mergefrom keras.models import Modeldef build_model():kernel = 3# 分类网络模型构建【自由发挥 网络很怪,卷积+并联空洞卷积+相当于全连接作用的卷积】input_tensor = Input(shape=(64, 64, 3))x = Conv2D(64, (kernel, kernel), activation=\'relu\', padding=\'same\', name=\'conv1_1\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(input_tensor)x = BatchNormalization()(x)x = Conv2D(64, (kernel, kernel), activation=\'relu\', padding=\'same\', name=\'conv1_2\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(x)x = BatchNormalization()(x)x = MaxPooling2D((2, 2), strides=(2, 2))(x)x = Conv2D(128, (kernel, kernel), activation=\'relu\', padding=\'same\', name=\'conv2_1\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(x)x = BatchNormalization()(x)x = Conv2D(128, (kernel, kernel), activation=\'relu\', padding=\'same\', name=\'conv2_2\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(x)x = BatchNormalization()(x)x = MaxPooling2D((2, 2), strides=(2, 2))(x)x = Conv2D(256, (kernel, kernel), activation=\'relu\', padding=\'same\', name=\'conv3_1\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(x)x = BatchNormalization()(x)x = Conv2D(256, (kernel, kernel), activation=\'relu\', padding=\'same\', name=\'conv3_2\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(x)x = BatchNormalization()(x)x = Conv2D(256, (kernel, kernel), activation=\'relu\', padding=\'same\', name=\'conv3_3\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(x)x = BatchNormalization()(x)x = MaxPooling2D((2, 2), strides=(2, 2))(x)# 空洞卷积x1 = Conv2D(256, (kernel, kernel), activation=\'relu\', padding=\'same\',dilation_rate=(2, 2), name=\'conv4_1\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(x)x1_1 = BatchNormalization()(x1)x2 = Conv2D(256, (kernel, kernel), activation=\'relu\', padding=\'same\',dilation_rate=(6, 6),name=\'conv4_2\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(x)x2_1 = BatchNormalization()(x2)x3 = Conv2D(256, (kernel, kernel), activation=\'relu\', padding=\'same\', dilation_rate=(12, 12),name=\'conv4_3\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(x)x3_1 = BatchNormalization()(x3)x_s= merge.concatenate([x1_1,x2_1,x3_1],axis = 3)# 特征按照第三维度通道进行融合 把3个256通道的特征图进行融合x = MaxPooling2D((8, 8), strides=(1, 1))(x_s)x = Conv2D(1024, (1, 1), activation=\'relu\', padding=\'valid\', name=\'fc5\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(x)x = BatchNormalization()(x)x = Conv2D(1024, (1, 1), activation=\'relu\', padding=\'valid\', name=\'fc6\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(x)x = BatchNormalization()(x)outputs = Conv2D(2, (1, 1), activation=\'softmax\', padding=\'valid\', name=\'fc7\', kernel_initializer=\'he_normal\',bias_initializer=\'zeros\')(x)model = Model(inputs=input_tensor, outputs=outputs, name=\"selfmodel\")return modelif __name__ == \'__main__\':encoder_model = build_model()print(encoder_model.summary())K.clear_session()# 清空之前model占用的内存
给博主点赞呦!