Unverified Commit c6c7d713 authored by Kiryuu Sakuya's avatar Kiryuu Sakuya 🎵
Browse files

Update 03 code

parent ef913bad
......@@ -81,6 +81,17 @@ with tf.compat.v1.Session() as sess:
# TypeError: 'Tensor' object is not callable
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)
# Tensorboard 可视化
logdir = "03. predicting-boston-house-price/log"
# 创建一个用于记录损失值的操作
sum_loss_op = tf.summary.scalar("loss", loss_function)
# 把所有日志文件中内容合并,方便写入
merged = tf.compat.v1.summary.merge_all()
# 创建 Tensorboard 的写入器
writer = tf.compat.v1.summary.FileWriter(logdir, sess.graph)
loss_list = []
# 模型训练
for epoch in range(train_epochs):
loss_sum = 0.0
......@@ -89,9 +100,9 @@ with tf.compat.v1.Session() as sess:
xs = xs.reshape(1, 12)
ys = ys.reshape(1, 1)
_, loss = sess.run([optimizer, loss_function], feed_dict = {x: xs, y: ys})
loss_sum = loss_sum + loss
_, summary_str, loss = sess.run([optimizer, sum_loss_op, loss_function], feed_dict = {x: xs, y: ys})
writer.add_summary(summary_str, epoch)
loss_sum += loss
# 打乱数据顺序
x_data, y_data = shuffle(x_data, y_data)
......@@ -101,3 +112,16 @@ with tf.compat.v1.Session() as sess:
loss_average = loss_sum / len(y_data)
print("epoch = ", epoch + 1, "loss = ", loss_average, "b = ", b0temp, " w = ", w0temp)
# 模型预测
n = numpy.random.randint(len(x_data[:, 0]))
# print(n)
x_test = x_data[n]
x_test = x_test.reshape([1, 12])
prediction = sess.run(predict, feed_dict={x: x_test})
print("预测值:%f" %prediction)
target = y_data[n]
print("真实值:%f" %target)
# 可视化损失值
pyplot.plot(loss_list)
\ No newline at end of file
......@@ -8,45 +8,43 @@ import numpy
import pandas as panda
from sklearn.utils import shuffle
with tf.compat.v1.Session() as sess:
# 读取配置文件
read_data = panda.read_csv("03. predicting-boston-house-price/data/boston.csv", header = 0)
# 显示数据摘要描述信息
# print(read_data.describe())
# 获取配置文件的值
read_data = read_data.values
# 二维数组,13 列 506 行
# print(read_data)
# 转换为 numpy 的数组格式
read_data = numpy.array(read_data)
# 对特征数据(0 到 11 列)做(0 - 1)的归一化
for i in range(12):
# 读取配置文件
read_data = panda.read_csv("03. predicting-boston-house-price/data/boston.csv", header = 0)
# 显示数据摘要描述信息
# print(read_data.describe())
# 获取配置文件的值
# read_data = read_data.values
# 二维数组,13 列 506 行
# print(read_data)
# 转换为 numpy 的数组格式
read_data = numpy.array(read_data)
# 对特征数据(0 到 11 列)做(0 - 1)的归一化
for i in range(12):
read_data[:, i] = read_data[:, i] / (read_data[:, i].max() - read_data[:, i].min())
# x_data 为前 12 列特征数据
# 前半部分是所有数据,右半部分是列(0 - 11)
x_data = read_data[:, :12]
# y_data 为最后 1 列标签数据
y_data = read_data[:, 12]
# 是一个二维数组,506 行,12 列
# print(x_data, "\n shape = ", x_data.shape)
# 是一个一维数组,有 506 个元素/单元
# print(y_data, "\n shape = ", y_data.shape)
# 定义特征数据和标签数据的占位符(placeholder)
# 具有 12 个特征,shape 要和实际上的特征数据相吻合
# 12 个特征数据
# 即行不管,列有 12 列
x = tf.compat.v1.placeholder(tf.float32, [None, 12], name = "x")
# 1 个标签数据
# 有 1 列
y = tf.compat.v1.placeholder(tf.float32, [None, 1], name = "y")
# 定义命名空间
with tf.name_scope("Model"):
# x_data 为前 12 列特征数据
# 前半部分是所有数据,右半部分是列(0 - 11)
x_data = read_data[:, :12]
# y_data 为最后 1 列标签数据
y_data = read_data[:, 12]
# 是一个二维数组,506 行,12 列
# print(x_data, "\n shape = ", x_data.shape)
# 是一个一维数组,有 506 个元素/单元
# print(y_data, "\n shape = ", y_data.shape)
# 定义特征数据和标签数据的占位符(placeholder)
# 具有 12 个特征,shape 要和实际上的特征数据相吻合
# 12 个特征数据
# 即行不管,列有 12 列
x = tf.compat.v1.placeholder(tf.float32, [None, 12], name = "x")
# 1 个标签数据
# 有 1 列
y = tf.compat.v1.placeholder(tf.float32, [None, 1], name = "y")
# 定义命名空间
with tf.name_scope("Model"):
# 12 行 1 列的列向量,w1、w2、w3...
# w 初始化为 shape = (12, 1) 的随机数,标准差设置为 0.01
w = tf.Variable(tf.random.normal([12, 1], stddev = 0.01, name = "w"))
......@@ -62,43 +60,43 @@ with tf.compat.v1.Session() as sess:
# 预测计算操作,前向计算节点
predict = model(x, w, b)
# 模型训练
# 设置模型训练超参数
# 迭代轮次
train_epochs = 50
# 学习率
learning_rate = 0.0251
# 定义均方差损失函数
with tf.name_scope("LossFunction"):
# 模型训练
# 设置模型训练超参数
# 迭代轮次
train_epochs = 50
# 学习率
learning_rate = 0.0251
# 定义均方差损失函数
with tf.name_scope("LossFunction"):
# 均方误差
loss_function = tf.reduce_mean(tf.pow(y - predict, 2))
# 创建优化器
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)
# 创建优化器
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)
config = tf.compat.v1.ConfigProto(allow_soft_placement = True)
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction = 0.7)
config.gpu_options.allow_growth = True
config = tf.compat.v1.ConfigProto(allow_soft_placement = True)
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction = 0.7)
config.gpu_options.allow_growth = True
# 声明会话
sess = tf.compat.v1.Session(config = config)
init = tf.compat.v1.global_variables_initializer()
# 声明会话
sess = tf.compat.v1.Session(config = config)
init = tf.compat.v1.global_variables_initializer()
# Tensorboard 可视化
logdir = "03. predicting-boston-house-price/log"
# 创建一个用于记录损失值的操作
sum_loss_op = tf.summary.scalar("loss", loss_function)
# 把所有日志文件中内容合并,方便写入
merged = tf.compat.v1.summary.merge_all()
# Tensorboard 可视化
logdir = "03. predicting-boston-house-price/log"
# 创建一个用于记录损失值的操作
sum_loss_op = tf.summary.scalar("loss", loss_function)
# 把所有日志文件中内容合并,方便写入
merged = tf.compat.v1.summary.merge_all()
sess.run(init)
sess.run(init)
# 创建 Tensorboard 的写入器
writer = tf.compat.v1.summary.FileWriter(logdir, sess.graph)
# 创建 Tensorboard 的写入器
writer = tf.compat.v1.summary.FileWriter(logdir, sess.graph)
loss_list = []
loss_list = []
# 模型训练
for epoch in range(train_epochs):
# 模型训练
for epoch in range(train_epochs):
loss_sum = 0.0
for xs, ys in zip(x_data, y_data):
# Feed 数据必须和 Placeholder 的 shape 一致
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment