Verified Commit f745c1ab authored by Kiryuu Sakuya's avatar Kiryuu Sakuya 🎵
Browse files

Add exam.10

parent 5771ab16
import sys
import tensorflow as tf
import os
os.environ[\"TF_CPP_MIN_LOG_LEVEL\"]='3'
import warnings
warnings.filterwarnings('ignore')
sys.path.append('step3')
sys.path.append('step9')
from generatorCompleted import batchGenerator
from outputsUtilsCompleted import softmax, returnOneHot, computeAccuracy
from prevModules import (Inception_traditional, Inception_parallelAsymmetricConv,
Inception_AsymmetricConv,InitialPart,reduction,ResNetBlock)
#********** Begin **********#
# 定义placeholder 开始
keeProb = tf.placeholder(tf.float32, shape=(),name='dropout_keep_prob')
batchImgInput = tf.placeholder(tf.float32, shape=(None, 224, 224, 3),name='batchImgInput')
labels = tf.placeholder(tf.float32, shape=(None, 4),name='Labels')
# 定义placeholder 结束
# 第一层卷积+归一化+池化 开始
conv1 = tf.layers.Conv2D(filters=96, kernel_size=(11, 11), strides=(4, 4), padding='valid', activation=tf.nn.relu)(
batchImgInput)
lrn1 = tf.nn.local_response_normalization(conv1, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0)
pool1 = tf.layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')(conv1)
# 第一层卷积+归一化+池化 结束
# 第二层卷积+归一化+池化 开始
conv2 = tf.layers.Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding='same', activation=tf.nn.relu)(
pool1)
lrn2 = tf.nn.local_response_normalization(conv2, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0)
pool2 = tf.layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')(conv2)
# 第二层卷积+归一化+池化 结束
# 定义三层直接连接的卷积 开始
conv3 = tf.layers.Conv2D(filters=192, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=tf.nn.relu)(
pool2)
conv4 = tf.layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=tf.nn.relu)(
conv3)
# 定义三层直接连接的卷积 结束
# 池化后变为一维 开始
pool3 = tf.layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')(conv4)
flatten = tf.layers.Flatten()(pool3)
# 池化后变为一维 结束
# 第一层全连接+随机失活 开始
dense1 = tf.layers.Dense(units=256, activation=tf.nn.relu)(flatten)
dropout1 = tf.nn.dropout(dense1, keeProb)
# 第一层全连接+随机失活 结束
# 第三层全连接+随机失活 开始
# dense3 = tf.layers.Dense(units=256, activation=tf.nn.relu)(dropout1)
# dropout3 = tf.nn.dropout(dense3, keeProb)
# 第三层全连接+随机失活 结束
# 额外加了一层全连接层 输出为类别数量 开始
outPuts = tf.layers.Dense(units=4, activation=None,name='model_outputs')(dropout1)
# 额外加了一层全连接层 输出为类别数量 结束
# 定义损失 开始
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=outPuts, labels=labels))
# 定义损失 结束
# 定义训练 开始
train = tf.train.AdamOptimizer(learning_rate=0.00001).minimize(loss)
# 定义训练 结束
saver = tf.train.Saver()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
G_Train = batchGenerator(batchSize=8)
G_Valid = batchGenerator(batchSize=8, basePath='data/processed/valid_224')
acc_Train = []
max_acc = 0
for i in range(24):
X, Y = G_Train.getBatch()
cur_BatchSize = X.shape[0]
_, cur_loss = sess.run([train, loss],
feed_dict={batchImgInput: X, labels: Y, keeProb: 0.8})
print(i)
# print(i, end=': loss: ')
# print(cur_loss)
acc_v = 0
# 验证集
for i in range(10):
X_v, Y_v = G_Valid.getBatch()
output_v = softmax(
sess.run(outPuts,
feed_dict={batchImgInput: X_v, labels: Y_v, keeProb: 1.}))
output_v = returnOneHot(output_v)
acc_v += computeAccuracy(output_v, Y_v)
acc_v /= 10
print('current accuracy: ', str(acc_v))
# if acc_v \u003e 0.7 and acc_v \u003e max_acc:
# max_acc = acc_v
# saver.save(sess, \"step10/Model/FinalNet\")
#********** End **********#
\ No newline at end of file
import sys
import tensorflow as tf
sys.path.append('step3')
sys.path.append('step7')
sys.path.append('step8')
sys.path.append('step9')
from generatorCompleted import batchGenerator
from outputsUtilsCompleted import softmax, returnOneHot, computeAccuracy
from InceptionCompleted import (Inception_traditional, Inception_parallelAsymmetricConv,
Inception_AsymmetricConv,InitialPart,reduction)
from ResNetCompleted import ResNetBlock
# 定义placeholder 开始
Input = tf.placeholder(shape=(None, 224, 224, 3), dtype=tf.float32, name='Imgs')
keep_prob = tf.placeholder(tf.float32, shape=(), name='dropout_keep_prob')
Labels = tf.placeholder(shape=(None, 4), dtype=tf.float32, name='Labels')
# 定义placeholder 结束
# 模型初始部分
processedInitially = InitialPart(Input)
Inception_traditional_1 = Inception_traditional(processedInitially)
reduction_1 = reduction(Inception_traditional_1)
Inception_Asymmetric_1 = Inception_AsymmetricConv(reduction_1)
reduction_2 = reduction(Inception_Asymmetric_1)
Inception_parallelAsymmetric_1 = Inception_parallelAsymmetricConv(reduction_2)
featureSize = Inception_parallelAsymmetric_1.get_shape()[1].value
averagePool1 = tf.layers.average_pooling2d(Inception_parallelAsymmetric_1, pool_size=featureSize, strides=1,
padding='same')
flattened = tf.layers.flatten(averagePool1)
dropout = tf.nn.dropout(flattened, keep_prob)
outputs = tf.layers.dense(dropout, units=4)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=outputs, labels=Labels))
train = tf.train.AdamOptimizer().minimize(loss)
saver = tf.train.Saver()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
G_Train = batchGenerator(batchSize=128)
G_Valid = batchGenerator(batchSize=80, basePath='data/processed/valid_224')
acc_Train = []
acc_Val = []
max_acc = 0
for i in range(256):
X, Y = G_Train.getBatch()
cur_BatchSize = X.shape[0]
_, cur_loss = sess.run([train, loss],
feed_dict={Input: X, Labels: Y, keep_prob: 0.8})
if i % 1 == 0:
print(i, end=': loss: ')
print(cur_loss)
# 验证集
X_v, Y_v = G_Valid.getBatch()
output_v = softmax(
sess.run(outputs,
feed_dict={Input: X_v, Labels: Y_v, keep_prob: 1.}))
output_v = returnOneHot(output_v)
acc_v = computeAccuracy(output_v, Y_v)
acc_Val.append(acc_v)
print('current accuracy: ',str(acc_v))
if acc_v \u003e 0.7 and acc_v \u003e max_acc:
max_acc = acc_v
saver.save(sess, \"step10/Model/FinalNet\")
import sys
import tensorflow as tf
sys.path.append('step3')
sys.path.append('step7')
sys.path.append('step8')
sys.path.append('step9')
from generatorCompleted import batchGenerator
from outputsUtilsCompleted import softmax, returnOneHot, computeAccuracy
from InceptionCompleted import (Inception_traditional, Inception_parallelAsymmetricConv,
Inception_AsymmetricConv,InitialPart,reduction)
from ResNetCompleted import ResNetBlock
BNTraining = tf.placeholder(tf.bool,name='BNTraining')
keeProb = tf.placeholder(tf.float32, shape=(),name='dropout_keep_prob')
batchImgInput = tf.placeholder(tf.float32, shape=(None, 224, 224, 3),name='batchImgInput')
labels = tf.placeholder(tf.float32, shape=(None, 4),name='Labels')
InputBatchSize = tf.placeholder(tf.int32,name='InputBatchSize')
conv1 = tf.layers.conv2d(batchImgInput, filters=96, kernel_size=11, strides=4,padding='same',
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(conv1, pool_size=2, strides=2, padding='same')
conv2 = tf.layers.conv2d(pool1, filters=128, kernel_size=3, strides=1,padding='same',
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(conv2, pool_size=3, strides=2, padding='same')
resBlock1 = ResNetBlock(pool2, batchNormTraining=BNTraining, batchSize=InputBatchSize)
conv3 = tf.layers.conv2d(resBlock1, filters=128, kernel_size=3,strides=1,padding='same',
activation=tf.nn.relu)
resBlock2 = ResNetBlock(conv3, batchNormTraining=BNTraining, batchSize=InputBatchSize)
conv4 = tf.layers.conv2d(resBlock2, filters=64, kernel_size=3,strides=1,padding='same',
activation=tf.nn.relu)
resBlock3 = ResNetBlock(conv4, batchNormTraining=BNTraining, batchSize=InputBatchSize)
conv5 = tf.layers.conv2d(resBlock3, filters=64, kernel_size=3,strides=1,padding='same',
activation=tf.nn.relu)
pool3 = tf.layers.max_pooling2d(conv5, pool_size=2, strides=2, padding='same')
flattened = tf.layers.flatten(pool3)
dense1 = tf.layers.dense(flattened, units=256)
dropout1 = tf.nn.dropout(dense1, keeProb)
dense2 = tf.layers.dense(dropout1, units=4,name='model_outputs')
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=dense2, labels=labels))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train = tf.train.AdamOptimizer().minimize(loss)
saver = tf.train.Saver()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
G_Train = batchGenerator(batchSize=256)
G_Valid = batchGenerator(batchSize=80, basePath='data/processed/valid_224')
acc_Train = []
acc_Val = []
max_acc = 0
for i in range(256):
X, Y = G_Train.getBatch()
cur_BatchSize = X.shape[0]
_, cur_loss = sess.run([train, loss],
feed_dict={batchImgInput: X, labels: Y, keeProb: 0.8, BNTraining: True,
InputBatchSize: cur_BatchSize})
if i % 1 == 0:
print(i, end=': loss: ')
print(cur_loss)
# 验证集
X_v, Y_v = G_Valid.getBatch()
output_v = softmax(
sess.run(dense2,
feed_dict={batchImgInput: X_v, labels: Y_v, keeProb: 1., BNTraining: False,
InputBatchSize: 80}))
output_v = returnOneHot(output_v)
acc_v = computeAccuracy(output_v, Y_v)
acc_Val.append(acc_v)
print('current accuracy: '+str(acc_v))
if acc_v \u003e 0.7 and acc_v \u003e max_acc:
max_acc = acc_v
saver.save(sess, \"step10/Model/FinalNet\")
\ No newline at end of file
import os
os.environ[\"TF_CPP_MIN_LOG_LEVEL\"]='3'
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
import sys
sys.path.append('step3')
sys.path.append('step9')
from generatorCompleted import batchGenerator
from outputsUtilsCompleted import softmax, returnOneHot, computeAccuracy
from prevModules import (Inception_traditional, Inception_parallelAsymmetricConv,
Inception_AsymmetricConv,InitialPart,reduction,ResNetBlock)
#********** Begin **********#
# 任意发挥,完成模型并训练,保存模型PATH为: 'step10/Model/FinalNet'
# 可选placeholder以及要求name参数值:
# 输入X: batchImgInput / 输入Y: Labels / dropout保存概率: dropout_keep_prob
# batchNorm层training: BNTraining / 前文所述ResNet实现中批数据数量: InputBatchSize
#********** End **********#
\ No newline at end of file
import os
os.environ[\"TF_CPP_MIN_LOG_LEVEL\"]='3'
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
import sys
sys.path.append('step3')
sys.path.append('step9')
from generatorCompleted import batchGenerator
from outputsUtilsCompleted import softmax, returnOneHot, computeAccuracy
import TrainSaveLoadForUsers
tf.reset_default_graph()
with tf.Session() as sess:
saver = tf.train.import_meta_graph('step10/Model/FinalNet.meta')
saver.restore(sess, tf.train.latest_checkpoint('step10/Model'))
graph = tf.get_default_graph()
# for op in graph.get_operations():
# print(op.name)
# mm = graph.get_tensor_by_name(\"batch_normalization/moving_mean:0\")
# print(sess.run(mm))
# exit()
G_Valid = batchGenerator(batchSize=8, basePath='data/processed/valid_224')
X_v, Y_v = G_Valid.getBatch()
batchImgInput = graph.get_tensor_by_name(\"batchImgInput:0\")
labels = graph.get_tensor_by_name(\"Labels:0\")
keeProb = graph.get_tensor_by_name(\"dropout_keep_prob:0\")
try:
BNTraining = graph.get_tensor_by_name(\"BNTraining:0\")
batchSize = graph.get_tensor_by_name(\"InputBatchSize:0\")
except:
BNTraining, batchSize = None, None
acc_v = 0
for i in range(10):
X_v, Y_v = G_Valid.getBatch()
if BNTraining is not None and batchSize is not None:
feed_dict = {batchImgInput: X_v, labels: Y_v, keeProb: 1., BNTraining: False, batchSize: 8}
else:
feed_dict = {batchImgInput: X_v, labels: Y_v, keeProb: 1.}
out = graph.get_tensor_by_name(\"model_outputs/BiasAdd:0\")
output_v = softmax(
sess.run(out,
feed_dict=feed_dict))
output_v = returnOneHot(output_v)
acc_v += computeAccuracy(output_v, Y_v)
acc_v /= 10
# print(acc_v)
if acc_v\u003e0.35:
print('训练时间40S内,预测准确率高于35%,恭喜通过本关测试!',end='')
else:
print('训练时间40S内,预测准确率低于35%,很遗憾并未通过本关测试!',end='')
import tensorflow as tf
# Inception module 1
def Inception_traditional(Inputs, nfilters_11=64, nfilters_11Before33=64,
nfilters_11Before55=48, nfilters_11After33Pool=32,
nfilters_33=96, nfilters_55=64, name=None):
'''
最基本的Inception模块,拼接不同感受野的卷积结果
其实传入的参数还能更加细,这里默认所有卷积步长都是1,padding都是same
:param Inputs: 上一层的输出,该层的输入
:param nfilters_11: 1×1卷积层的卷积核数
:param nfilters_11Before33: 3×3卷积层前的1×1卷积降维的卷积核数
:param nfilters_11Before55: 5×5卷积层前的1×1卷积降维的卷积核数
:param nfilters_11After33Pool: 3×3池化后的1×1卷积核的数量
:param nfilters_33: 3×3卷积层的卷积核数
:param nfilters_55: 5×5卷积层的卷积核数(下面的实现用俩个3×3替代了5×5,两个3×3的卷积核数都为该参数)
:param name: 该层的名字
:return:
'''
# 1×1的卷积层
conv1 = tf.layers.conv2d(inputs=Inputs, filters=nfilters_11, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
# 3×3的卷积层
conv2_1 = tf.layers.conv2d(inputs=Inputs, filters=nfilters_11Before33, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv2_2 = tf.layers.conv2d(inputs=conv2_1, filters=nfilters_33, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
# 5×5的卷积层
conv3_1 = tf.layers.conv2d(inputs=Inputs, filters=nfilters_11Before55, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv3_2 = tf.layers.conv2d(inputs=conv3_1, filters=nfilters_55, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
conv3_3 = tf.layers.conv2d(inputs=conv3_2, filters=nfilters_55, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
# 池化+卷积
pool = tf.layers.average_pooling2d(inputs=Inputs, pool_size=3, strides=1, padding='same')
conv4 = tf.layers.conv2d(inputs=pool, filters=nfilters_11After33Pool, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
# 在通道维度上拼接各输出
outputs = tf.concat([conv1, conv2_2, conv3_3, conv4], axis=-1)
return outputs
# Inception module 2 带不对称的卷积
def Inception_AsymmetricConv(Inputs, nfilters_11=192, nfilters_11Before7=128,
nfilters_11Before77=128, nfilters_11After33Pool=192,
nfilters_7=128, nfilters_77=128, name=None):
'''
将n×n的卷积变成连续的1×n和n×1的两次卷积
其实这一层的参数也不止这么多,不过大概是这么个意思
有兴趣的朋友可以让参数更加具体地描述该模块
步长都默认1
:param Inputs: 输入
:param nfilters_11: 1×1卷积层的卷积核数
:param nfilters_11Before7: 1×7然后7×1卷积前1×1卷积核数
:param nfilters_11Before77: 7×1,1×7然后又7×1,1×7卷积前1×1的卷积核数
:param nfilters_11After33Pool: 3×3池化后的1×1卷积核的数量
:param nfilters_7: 1×7然后7×1卷积的卷积核数
:param nfilters_77: 7×1,1×7然后又7×1,1×7卷积的卷积核数
:param name: 该层的名字
:return:
'''
# 1×1的卷积层
conv1 = tf.layers.conv2d(Inputs, filters=nfilters_11, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
# 1×7然后7×1的卷积层
conv2_1 = tf.layers.conv2d(Inputs, filters=nfilters_11Before7, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv2_2 = tf.layers.conv2d(conv2_1, filters=nfilters_7, kernel_size=(1, 7), strides=1, padding='same',
activation=tf.nn.relu)
conv2_3 = tf.layers.conv2d(conv2_2, filters=nfilters_7, kernel_size=(7, 1), strides=1, padding='same',
activation=tf.nn.relu)
# 7×1,1×7然后又7×1,1×7的卷积层
conv3_1 = tf.layers.conv2d(Inputs, filters=nfilters_11Before77, kernel_size=1, strides=1)
conv3_2 = tf.layers.conv2d(conv3_1, filters=nfilters_77, kernel_size=(7, 1), strides=1, padding='same',
activation=tf.nn.relu)
conv3_3 = tf.layers.conv2d(conv3_2, filters=nfilters_77, kernel_size=(1, 7), strides=1, padding='same',
activation=tf.nn.relu)
conv3_4 = tf.layers.conv2d(conv3_3, filters=nfilters_77, kernel_size=(7, 1), strides=1, padding='same',
activation=tf.nn.relu)
conv3_5 = tf.layers.conv2d(conv3_4, filters=nfilters_77, kernel_size=(1, 7), strides=1, padding='same',
activation=tf.nn.relu)
# 池化+卷积
pool = tf.layers.average_pooling2d(Inputs, pool_size=3, strides=1, padding='same')
conv4 = tf.layers.conv2d(pool, filters=nfilters_11After33Pool, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
# 在通道维度上拼接各输出
outputs = tf.concat([conv1, conv2_3, conv3_5, conv4], axis=-1)
return outputs
# Inception module 3 平行的不对称的卷积
def Inception_parallelAsymmetricConv(Inputs, nfilters_11=320, nfilters_11Before33=384,
nfilters_11Before55=448, nfilters_11After33Pool=192,
nfilters_33=384, nfilters_55=384, name=None):
'''
将1×n和n×1的两个卷积并行操作,然后拼接起来
:param Inputs: 输入
:param nfilters_11: 1×1卷积层的卷积核数
:param nfilters_11Before33: 3×3卷积层前的1×1卷积降维的卷积核数
:param nfilters_11Before55: 5×5卷积层前的1×1卷积降维的卷积核数
:param nfilters_11After33Pool: 3×3池化后的1×1卷积核的数量
:param nfilters_33: 平行的1×3和3×1方式卷积的卷积核数
:param nfilters_55: 两个3×3构成的卷积层,但是第二个3×3会用平行的1×3和3×1方式卷积
:param name:
:return:
'''
# 1×1的卷积层
conv1 = tf.layers.conv2d(Inputs, filters=nfilters_11, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
# 3×3的卷积层
conv2_1 = tf.layers.conv2d(Inputs, filters=nfilters_11Before33, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv2_21 = tf.layers.conv2d(conv2_1, filters=nfilters_33, kernel_size=(1, 3), strides=1, padding='same',
activation=tf.nn.relu)
conv2_22 = tf.layers.conv2d(conv2_1, filters=nfilters_33, kernel_size=(3, 1), strides=1, padding='same',
activation=tf.nn.relu)
conv2_3 = tf.concat([conv2_21, conv2_22], axis=-1)
# 两个3×3的卷积层
conv3_1 = tf.layers.conv2d(Inputs, filters=nfilters_11Before55, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv3_2 = tf.layers.conv2d(conv3_1, filters=nfilters_55, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
conv3_31 = tf.layers.conv2d(conv3_2, filters=nfilters_55, kernel_size=(1, 3), strides=1, padding='same',
activation=tf.nn.relu)
conv3_32 = tf.layers.conv2d(conv3_2, filters=nfilters_55, kernel_size=(3, 1), strides=1, padding='same',
activation=tf.nn.relu)
conv3_4 = tf.concat([conv3_31, conv3_32], axis=-1)
# 池化+卷积
pool = tf.layers.average_pooling2d(Inputs, pool_size=3, strides=1, padding='same')
conv4 = tf.layers.conv2d(pool, filters=nfilters_11After33Pool, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
# 在通道维度上拼接各输出
outputs = tf.concat([conv1, conv2_3, conv3_4, conv4], axis=-1)
return outputs
# 池化和卷积并行的降特征图尺寸的方法
def reduction(Inputs, nfilters_11Before33=192, nfilters_11Before55=192,
nfilters_33=320, nfilters_55=192, ):
'''
注意拼接前的最后一次的卷积步长要变成2了
:param Inputs: 输入
:param nfilters_11Before33: 3×3卷积前的1×1卷积核数量
:param nfilters_11Before55: 两个3×3卷积前的1×1卷积核数量
:param nfilters_33: 3×3卷积核数量
:param nfilters_55: 两个3×3卷积的核数量
:return:
'''
# 3×3卷积
conv1_1 = tf.layers.conv2d(Inputs, filters=nfilters_11Before33, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv1_2 = tf.layers.conv2d(conv1_1, filters=nfilters_33, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu)
# 两个3×3卷积
conv2_1 = tf.layers.conv2d(Inputs, filters=nfilters_11Before55, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv2_2 = tf.layers.conv2d(conv2_1, filters=nfilters_55, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
conv2_3 = tf.layers.conv2d(conv2_2, filters=nfilters_55, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu)
# 池化
pool = tf.layers.average_pooling2d(Inputs, pool_size=3, strides=2, padding='same')
# 拼接
outputs = tf.concat([conv1_2, conv2_3, pool], axis=-1)
return outputs
# 模型初始的部分
def InitialPart(Inputs):
'''
论文模型中在使用Inception模块之前还是正常的一些卷积和池化
:param Inputs: 初始img输入
:return:
'''
conv1 = tf.layers.conv2d(Inputs, filters=32, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu)
conv2 = tf.layers.conv2d(conv1, filters=32, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
conv3 = tf.layers.conv2d(conv2, filters=64, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(conv3, pool_size=3, strides=2, padding='same')
conv4 = tf.layers.conv2d(pool1, filters=80, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv5 = tf.layers.conv2d(conv4, filters=192, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(conv5, pool_size=3, strides=2, padding='same')
return pool2
def multiChannelWeightLayer(Inputs, batchNormTraining,batchSize):
'''
对输入完成BatchNorm + relu + Wx_plus_b操作
:param Inputs: 输入张量
:param batchNormTraining: batchNorm层Training参数,在训练和预测阶段传入不同值
:return:
'''
batchNorm = tf.layers.batch_normalization(Inputs, training=batchNormTraining)
relu = tf.nn.relu(batchNorm)
transposed = tf.transpose(relu, [0, 3, 1, 2])
num_channels = Inputs.get_shape()[-1].value
size = Inputs.get_shape()[1].value
weight = tf.Variable(tf.truncated_normal(shape=(size, size)), dtype=tf.float32, trainable=True)
weight_expand = tf.expand_dims(weight, axis=0)
weight_nchannels = tf.tile(weight_expand, tf.constant([