Unverified Commit f11427cb authored by Kiryuu Sakuya's avatar Kiryuu Sakuya 🎵
Browse files

Update 02:

 - Finish (actually not) tensorflow v2 version
 - Add usable tensorflow v1 version
parent 0773a570
......@@ -3,6 +3,7 @@
import tensorflow as tf
import numpy
import matplotlib.pyplot as pyplot
from datetime import datetime
with tf.compat.v1.Session() as sess:
......@@ -43,7 +44,8 @@ with tf.compat.v1.Session() as sess:
# 本处初始化梯度下降优化器
# https://stackoverflow.com/questions/55682718/module-tensorflow-api-v2-train-has-no-attribute-gradientdescentoptimizer
# https://stackoverflow.com/questions/58722591/typeerror-minimize-missing-1-required-positional-argument-var-list
optimizer = tf.optimizers.SGD(learning_rate).minimize(loss_function, var_list=[w, b])
# optimizer = tf.keras.optimizers.SGD(learning_rate).minimize(loss_function, var_list=[w, b])
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)
# 开始训练
# 记录训练步数
......@@ -51,18 +53,44 @@ with tf.compat.v1.Session() as sess:
# 用于保存loss值的列表
loss_list = []
# 设置迭代轮次
for epoch in range(train_rounds):
for rounds in range(train_rounds):
for xs, ys in zip(x_data, y_data):
_, loss = sess.run([optimizer, loss_function], feed_dict = {x: xs, y: ys})
_, loss = sess.run([optimizer, loss_function], feed_dict = {x: tf.keras.layers.Reshape(xs), y: tf.keras.layers.Reshape(ys)})
# 将每一次的损失值放入列表
loss_list.append(loss)
step = step + 1
# 每训练20个样本输出一次损失值
display_step = 20
if step % display_step == 0:
print("Train Epoch: %02d" % (epoch + 1),"Step: %04d" % step, "loss = ", "{:.9f}".format(loss))
print("Train Epoch: %02d" % (rounds + 1),"Step: %04d" % step, "loss = ", "{:.9f}".format(loss))
# 每一轮训练完,有一个w,b值
b0temp = b.eval(session = sess)
w0temp = w.eval(session = sess)
# 画出训练模型
pyplot.plot(x_data, w0temp * x_data + b0temp)
# pyplot.plot(x_data, w0temp * x_data + b0temp)
pyplot.scatter(x_data, y_data, label="Original data")
# 显示训练好的模型
pyplot.plot(x_data, x_data * sess.run(w) + sess.run(b), label = "Fitted line", color="r", linewidth = 1)
# 通过参数loc指定图例位置
pyplot.legend(loc = 2)
pyplot.show()
# pyplot.plot(loss_list)
print('最后w和b的值为:')
print('w:', sess.run(w))
print('b:', sess.run(b))
# 第四步:利用模型进行预测
x_test = 5.79
predict = sess.run(predict, feed_dict = {x: x_test})
print('预测值:%f'% predict)
target = 3.1234 * x_test + 2.98
print('目标值:%f' % target)
# 生成一个写日记的writer
logdir = "logs" + datetime.now().strftime("%Y%m%d-%H%M%S")
writer = tf.summary.create_file_writer(logdir)
with writer.as_default():
# Still TensorFlow v1 here
writer = tf.compat.v1.summary.FileWriter(logdir, tf.compat.v1.get_default_graph())
writer.close()
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# 清除default_graph和不断增加的节点
tf.reset_default_graph()
# 设置日志路径
logdir="log"
# 设置随机数
np.random.seed(5)
# 生成等差数列
x_data = np.linspace(0, 100, 500)
# y = 3.1234x+2.89+噪声
y_data = 3.1234 * x_data + 2.98 + np.random.randn(*x_data.shape)*0.4
# 画出随机生成数据的散点图
plt.scatter(x_data, y_data)
# 绘出正确的线性函数图 y= 3.1234x+2.98
plt.plot(x_data, 3.1234 * x_data + 2.98, color='red', linewidth=3)
# 定义占位符
x = tf.placeholder("float", name="x")
y = tf.placeholder("float", name="y")
# 定义模型函数
def model(x, w, b):
"""返回模型"""
return tf.multiply(x, w) +b
# 构建线性函数的斜率, 变量w
w = tf.Variable(1.0, name="w0")
# 构截距 变量b
b = tf.Variable(0.0, name="b0")
# pred 是预测值
pred = model(x, w, b)
# 设置超参数
# 训练论数
train_epoches = 10
# 学习率
learn_rate = 0.00001
# 定义损失参数
# 采用均方差作为损失函数
loss_function = tf.reduce_mean(tf.square(y - pred))
# 定义优化器
#创建梯度优化器
optimizer = tf.train.GradientDescentOptimizer(learn_rate).minimize(loss_function)
# 声明会话
sess = tf.Session()
# 变量初始化
init = tf.global_variables_initializer()
sess.run(init)
# 迭代训练
# 开始训练
step = 0# 记录步数
loss_list =[]# 用于保存损失值的列表
disp_step = 20 # 每20个输出一次
for epoch in range(train_epoches):
for xs, ys in zip(x_data, y_data):
_, loss=sess.run([optimizer, loss_function], feed_dict={x:xs, y:ys})
loss_list.append(loss)
step += 1
if step % disp_step ==0:
print("Train Epoch:","%02d" %(epoch+1), "Step: %03d"%(step),"loss=",\
"{:.9f}".format(loss))
b0temp = b.eval(session=sess)
w0temp = w.eval(session=sess)
plt.plot(x_data, w0temp*x_data + b0temp)
writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
writer.close()
plt.plot(loss_list)
# 打印结果
print("w:", sess.run(w))# 在3.1234附近
print("b:", sess.run(b))# 在2.98附近
# 结果可视化
plt.scatter(x_data, y_data, label="Original data")
plt.plot(x_data, x_data*sess.run(w)+sess.run(b),\
label="Fitted line", color="r", linewidth=3)
plt.legend(loc=2)
# 利用模型进行预测
x_test = 5.79
predict = sess.run(pred, feed_dict={x:x_test})
print("预测值:%f"%predict)
target = 3.1234*x_test + 2.98
print("目标值:%f"%target)
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment