Unverified Commit c6c7d713 authored by Kiryuu Sakuya's avatar Kiryuu Sakuya 🎵
Browse files

Update 03 code

parent ef913bad
...@@ -81,6 +81,17 @@ with tf.compat.v1.Session() as sess: ...@@ -81,6 +81,17 @@ with tf.compat.v1.Session() as sess:
# TypeError: 'Tensor' object is not callable # TypeError: 'Tensor' object is not callable
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(loss_function) optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)
# Tensorboard 可视化
logdir = "03. predicting-boston-house-price/log"
# 创建一个用于记录损失值的操作
sum_loss_op = tf.summary.scalar("loss", loss_function)
# 把所有日志文件中内容合并,方便写入
merged = tf.compat.v1.summary.merge_all()
# 创建 Tensorboard 的写入器
writer = tf.compat.v1.summary.FileWriter(logdir, sess.graph)
loss_list = []
# 模型训练 # 模型训练
for epoch in range(train_epochs): for epoch in range(train_epochs):
loss_sum = 0.0 loss_sum = 0.0
...@@ -89,9 +100,9 @@ with tf.compat.v1.Session() as sess: ...@@ -89,9 +100,9 @@ with tf.compat.v1.Session() as sess:
xs = xs.reshape(1, 12) xs = xs.reshape(1, 12)
ys = ys.reshape(1, 1) ys = ys.reshape(1, 1)
_, loss = sess.run([optimizer, loss_function], feed_dict = {x: xs, y: ys}) _, summary_str, loss = sess.run([optimizer, sum_loss_op, loss_function], feed_dict = {x: xs, y: ys})
writer.add_summary(summary_str, epoch)
loss_sum = loss_sum + loss loss_sum += loss
# 打乱数据顺序 # 打乱数据顺序
x_data, y_data = shuffle(x_data, y_data) x_data, y_data = shuffle(x_data, y_data)
...@@ -100,4 +111,17 @@ with tf.compat.v1.Session() as sess: ...@@ -100,4 +111,17 @@ with tf.compat.v1.Session() as sess:
w0temp = w.eval(session = sess) w0temp = w.eval(session = sess)
loss_average = loss_sum / len(y_data) loss_average = loss_sum / len(y_data)
print("epoch = ", epoch + 1, "loss = ", loss_average, "b = ", b0temp, " w = ", w0temp) print("epoch = ", epoch + 1, "loss = ", loss_average, "b = ", b0temp, " w = ", w0temp)
\ No newline at end of file
# 模型预测
n = numpy.random.randint(len(x_data[:, 0]))
# print(n)
x_test = x_data[n]
x_test = x_test.reshape([1, 12])
prediction = sess.run(predict, feed_dict={x: x_test})
print("预测值:%f" %prediction)
target = y_data[n]
print("真实值:%f" %target)
# 可视化损失值
pyplot.plot(loss_list)
\ No newline at end of file
...@@ -8,115 +8,113 @@ import numpy ...@@ -8,115 +8,113 @@ import numpy
import pandas as panda import pandas as panda
from sklearn.utils import shuffle from sklearn.utils import shuffle
with tf.compat.v1.Session() as sess: # 读取配置文件
read_data = panda.read_csv("03. predicting-boston-house-price/data/boston.csv", header = 0)
# 读取配置文件
read_data = panda.read_csv("03. predicting-boston-house-price/data/boston.csv", header = 0) # 显示数据摘要描述信息
# print(read_data.describe())
# 显示数据摘要描述信息 # 获取配置文件的值
# print(read_data.describe()) # read_data = read_data.values
# 获取配置文件的值 # 二维数组,13 列 506 行
read_data = read_data.values # print(read_data)
# 二维数组,13 列 506 行 # 转换为 numpy 的数组格式
# print(read_data) read_data = numpy.array(read_data)
# 转换为 numpy 的数组格式
read_data = numpy.array(read_data) # 对特征数据(0 到 11 列)做(0 - 1)的归一化
for i in range(12):
# 对特征数据(0 到 11 列)做(0 - 1)的归一化 read_data[:, i] = read_data[:, i] / (read_data[:, i].max() - read_data[:, i].min())
for i in range(12):
read_data[:, i] = read_data[:, i] / (read_data[:, i].max() - read_data[:, i].min()) # x_data 为前 12 列特征数据
# 前半部分是所有数据,右半部分是列(0 - 11)
# x_data 为前 12 列特征数据 x_data = read_data[:, :12]
# 前半部分是所有数据,右半部分是列(0 - 11) # y_data 为最后 1 列标签数据
x_data = read_data[:, :12] y_data = read_data[:, 12]
# y_data 为最后 1 列标签数据 # 是一个二维数组,506 行,12 列
y_data = read_data[:, 12] # print(x_data, "\n shape = ", x_data.shape)
# 是一个二维数组,506 行,12 列 # 是一个一维数组,有 506 个元素/单元
# print(x_data, "\n shape = ", x_data.shape) # print(y_data, "\n shape = ", y_data.shape)
# 是一个一维数组,有 506 个元素/单元
# print(y_data, "\n shape = ", y_data.shape) # 定义特征数据和标签数据的占位符(placeholder)
# 具有 12 个特征,shape 要和实际上的特征数据相吻合
# 定义特征数据和标签数据的占位符(placeholder) # 12 个特征数据
# 具有 12 个特征,shape 要和实际上的特征数据相吻合 # 即行不管,列有 12 列
# 12 个特征数据 x = tf.compat.v1.placeholder(tf.float32, [None, 12], name = "x")
# 即行不管,列有 12 列 # 1 个标签数据
x = tf.compat.v1.placeholder(tf.float32, [None, 12], name = "x") # 有 1 列
# 1 个标签数据 y = tf.compat.v1.placeholder(tf.float32, [None, 1], name = "y")
# 有 1 列
y = tf.compat.v1.placeholder(tf.float32, [None, 1], name = "y") # 定义命名空间
with tf.name_scope("Model"):
# 定义命名空间 # 12 行 1 列的列向量,w1、w2、w3...
with tf.name_scope("Model"): # w 初始化为 shape = (12, 1) 的随机数,标准差设置为 0.01
# 12 行 1 列的列向量,w1、w2、w3... w = tf.Variable(tf.random.normal([12, 1], stddev = 0.01, name = "w"))
# w 初始化为 shape = (12, 1) 的随机数,标准差设置为 0.01 # b 初始化为 1.0
w = tf.Variable(tf.random.normal([12, 1], stddev = 0.01, name = "w")) b = tf.Variable(1.0, name = "b")
# b 初始化为 1.0 # w 和 x 是矩阵相乘,用 matmul,不能使用 mutiply 或者 *
b = tf.Variable(1.0, name = "b") # 矩阵叉乘
# w 和 x 是矩阵相乘,用 matmul,不能使用 mutiply 或者 * # x 以后会是一个行向量!
# 矩阵叉乘 # b 是想要预测出来的标签值 y
# x 以后会是一个行向量! # y = x1 * w1 + ... + x12 * w12 + b
# b 是想要预测出来的标签值 y def model(x, w, b):
# y = x1 * w1 + ... + x12 * w12 + b return tf.matmul(x, w) + b
def model(x, w, b): # 预测计算操作,前向计算节点
return tf.matmul(x, w) + b predict = model(x, w, b)
# 预测计算操作,前向计算节点
predict = model(x, w, b) # 模型训练
# 设置模型训练超参数
# 模型训练 # 迭代轮次
# 设置模型训练超参数 train_epochs = 50
# 迭代轮次 # 学习率
train_epochs = 50 learning_rate = 0.0251
# 学习率 # 定义均方差损失函数
learning_rate = 0.0251 with tf.name_scope("LossFunction"):
# 定义均方差损失函数 # 均方误差
with tf.name_scope("LossFunction"): loss_function = tf.reduce_mean(tf.pow(y - predict, 2))
# 均方误差 # 创建优化器
loss_function = tf.reduce_mean(tf.pow(y - predict, 2)) optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)
# 创建优化器
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(loss_function) config = tf.compat.v1.ConfigProto(allow_soft_placement = True)
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction = 0.7)
config = tf.compat.v1.ConfigProto(allow_soft_placement = True) config.gpu_options.allow_growth = True
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction = 0.7)
config.gpu_options.allow_growth = True # 声明会话
sess = tf.compat.v1.Session(config = config)
# 声明会话 init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session(config = config)
init = tf.compat.v1.global_variables_initializer() # Tensorboard 可视化
logdir = "03. predicting-boston-house-price/log"
# Tensorboard 可视化 # 创建一个用于记录损失值的操作
logdir = "03. predicting-boston-house-price/log" sum_loss_op = tf.summary.scalar("loss", loss_function)
# 创建一个用于记录损失值的操作 # 把所有日志文件中内容合并,方便写入
sum_loss_op = tf.summary.scalar("loss", loss_function) merged = tf.compat.v1.summary.merge_all()
# 把所有日志文件中内容合并,方便写入
merged = tf.compat.v1.summary.merge_all() sess.run(init)
sess.run(init) # 创建 Tensorboard 的写入器
writer = tf.compat.v1.summary.FileWriter(logdir, sess.graph)
# 创建 Tensorboard 的写入器
writer = tf.compat.v1.summary.FileWriter(logdir, sess.graph) loss_list = []
loss_list = [] # 模型训练
for epoch in range(train_epochs):
# 模型训练 loss_sum = 0.0
for epoch in range(train_epochs): for xs, ys in zip(x_data, y_data):
loss_sum = 0.0 # Feed 数据必须和 Placeholder 的 shape 一致
for xs, ys in zip(x_data, y_data): xs = xs.reshape(1, 12)
# Feed 数据必须和 Placeholder 的 shape 一致 ys = ys.reshape(1, 1)
xs = xs.reshape(1, 12)
ys = ys.reshape(1, 1) _, summary_str, loss = sess.run([optimizer, sum_loss_op, loss_function], feed_dict = {x: xs, y: ys})
writer.add_summary(summary_str, epoch)
_, summary_str, loss = sess.run([optimizer, sum_loss_op, loss_function], feed_dict = {x: xs, y: ys}) loss_sum += loss
writer.add_summary(summary_str, epoch)
loss_sum += loss # 打乱数据顺序
x_data, y_data = shuffle(x_data, y_data)
# 打乱数据顺序
x_data, y_data = shuffle(x_data, y_data) b0temp = b.eval(session = sess)
w0temp = w.eval(session = sess)
b0temp = b.eval(session = sess) loss_average = loss_sum / len(y_data)
w0temp = w.eval(session = sess)
loss_average = loss_sum / len(y_data) print("epoch = ", epoch + 1, "loss = ", loss_average, "b = ", b0temp, " w = ", w0temp)
print("epoch = ", epoch + 1, "loss = ", loss_average, "b = ", b0temp, " w = ", w0temp)
# 模型预测 # 模型预测
n = numpy.random.randint(len(x_data[:, 0])) n = numpy.random.randint(len(x_data[:, 0]))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment