Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Kiryuu Sakuya
TensorFlow-Homework
Commits
c9a3d7d5
Unverified
Commit
c9a3d7d5
authored
Apr 14, 2020
by
Kiryuu Sakuya
🎵
Browse files
Add exam.6
parent
fd2724c9
Changes
3
Hide whitespace changes
Inline
Side-by-side
exam/step6/VGGNetCompleted.py
0 → 100644
View file @
c9a3d7d5
import
tensorflow
as
tf
# def generateModel():
#----以下是答案部分 begin----#
# 定义placeholder 开始
Input
=
tf
.
placeholder
(
shape
=
(
None
,
224
,
224
,
3
),
dtype
=
tf
.
float32
)
keep_prob
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
())
Labels
=
tf
.
placeholder
(
shape
=
(
None
,
4
),
dtype
=
tf
.
float32
)
# 定义placeholder 结束
# 第一部分
Part1_Conv1
=
tf
.
layers
.
conv2d
(
inputs
=
Input
,
filters
=
64
,
kernel_size
=
3
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part1_Conv2
=
tf
.
layers
.
conv2d
(
inputs
=
Part1_Conv1
,
filters
=
64
,
kernel_size
=
3
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part1_pool1
=
tf
.
layers
.
max_pooling2d
(
Part1_Conv2
,
pool_size
=
2
,
strides
=
2
,
padding
=
'same'
)
# 第二部分
Part2_Conv1
=
tf
.
layers
.
conv2d
(
inputs
=
Part1_pool1
,
filters
=
128
,
kernel_size
=
3
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part2_Conv2
=
tf
.
layers
.
conv2d
(
inputs
=
Part2_Conv1
,
filters
=
128
,
kernel_size
=
3
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part2_pool1
=
tf
.
layers
.
max_pooling2d
(
Part2_Conv2
,
pool_size
=
2
,
strides
=
2
,
padding
=
'same'
)
#
# 第三部分
Part3_Conv1
=
tf
.
layers
.
conv2d
(
inputs
=
Part2_pool1
,
filters
=
256
,
kernel_size
=
3
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part3_Conv2
=
tf
.
layers
.
conv2d
(
inputs
=
Part3_Conv1
,
filters
=
256
,
kernel_size
=
3
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part3_Conv3
=
tf
.
layers
.
conv2d
(
inputs
=
Part3_Conv2
,
filters
=
256
,
kernel_size
=
1
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part3_pool1
=
tf
.
layers
.
max_pooling2d
(
Part3_Conv3
,
pool_size
=
2
,
strides
=
2
,
padding
=
'same'
)
# 第四部分
Part4_Conv1
=
tf
.
layers
.
conv2d
(
inputs
=
Part3_pool1
,
filters
=
512
,
kernel_size
=
3
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part4_Conv2
=
tf
.
layers
.
conv2d
(
inputs
=
Part4_Conv1
,
filters
=
512
,
kernel_size
=
3
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part4_Conv3
=
tf
.
layers
.
conv2d
(
inputs
=
Part4_Conv2
,
filters
=
512
,
kernel_size
=
1
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part4_pool1
=
tf
.
layers
.
max_pooling2d
(
Part4_Conv3
,
pool_size
=
2
,
strides
=
2
,
padding
=
'same'
)
Part5_Conv1
=
tf
.
layers
.
conv2d
(
inputs
=
Part4_pool1
,
filters
=
512
,
kernel_size
=
3
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part5_Conv2
=
tf
.
layers
.
conv2d
(
inputs
=
Part5_Conv1
,
filters
=
512
,
kernel_size
=
3
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part5_Conv3
=
tf
.
layers
.
conv2d
(
inputs
=
Part5_Conv2
,
filters
=
512
,
kernel_size
=
1
,
strides
=
1
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
Part5_pool1
=
tf
.
layers
.
max_pooling2d
(
Part5_Conv3
,
pool_size
=
2
,
strides
=
2
,
padding
=
'same'
)
# 全连接部分
# flatten扁平化
flattened
=
tf
.
layers
.
flatten
(
Part5_pool1
)
# 同样本任务不宜太宽的网络,该层要求使用512个神经元
dense1
=
tf
.
layers
.
dense
(
flattened
,
units
=
512
,
activation
=
tf
.
nn
.
relu
)
dropout1
=
tf
.
nn
.
dropout
(
dense1
,
keep_prob
)
# 该层要求使用256个神经元
dense2
=
tf
.
layers
.
dense
(
dropout1
,
units
=
256
,
activation
=
tf
.
nn
.
relu
)
dropout2
=
tf
.
nn
.
dropout
(
dense2
,
keep_prob
)
outputs
=
tf
.
layers
.
dense
(
dropout2
,
units
=
4
)
loss
=
tf
.
reduce_mean
(
tf
.
nn
.
softmax_cross_entropy_with_logits_v2
(
logits
=
outputs
,
labels
=
Labels
))
train
=
tf
.
train
.
AdamOptimizer
().
minimize
(
loss
)
#----以上是答案部分 end----#
with
tf
.
Session
()
as
sess
:
sess
.
run
(
tf
.
global_variables_initializer
())
# saver.save(sess, \"modelInfo/VGGNet\")
tf
.
train
.
export_meta_graph
(
filename
=
\
"step6/modelInfo/VGGNet
\"
,
graph=tf.get_default_graph())
tf.reset_default_graph()
# if __name__ == '__main__':
# generateModel()
\ No newline at end of file
exam/step6/VGGNetForUsers.py
0 → 100644
View file @
c9a3d7d5
import
tensorflow
as
tf
# 所有的卷积层:卷积核大小为3,步长为1,需要padding,激活函数是relu
# 所有的池化层:为最大池化,池化范围是2,步长也为2,需要padding
# 中间全连接层:激活函数是relu
# 两个隐藏全连接层神经元数分别是512,256
# 全连接层之间需要dropout
# dropout使用tf.nn,其它层全部使用tf.layers
# 损失: tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2())
# 优化器: tf.train.AdamOptimizer()
#********** Begin **********#
#********** End **********#
#---以下代码不要改动---#
#---否则影响测评---#
with
tf
.
Session
()
as
sess
:
sess
.
run
(
tf
.
global_variables_initializer
())
tf
.
train
.
export_meta_graph
(
filename
=
'step6/userModelInfo/VGGNet'
,
graph
=
tf
.
get_default_graph
())
exam/step6/VGGNetTest.py
0 → 100644
View file @
c9a3d7d5
import
os
os
.
environ
[
\
"TF_CPP_MIN_LOG_LEVEL
\"
]='3'
import warnings
warnings.filterwarnings('ignore')
import VGGNetCompleted
import VGGNetForUsers
rightModelPath = 'step6/modelInfo/VGGNet'
userModelPath = 'step6/userModelInfo/VGGNet'
# print(os.path.exists(rightModelPath))
# print(os.path.exists(userModelPath))
# print(os.path.getsize(rightModelPath))
# print(os.path.getsize(userModelPath))
isRight = os.path.getsize(rightModelPath)==os.path.getsize(userModelPath)
# print(isRight)
if isRight:
print('恭喜你通过本关测试!VGGNet的结构你已经掌握!',end='')
else:
print('未能通过本关测试,模型结构有误!')
"
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment