TFboy养成记 多层感知器 MLP

时间:2023-12-31 10:40:26

内容总结与莫烦的视频。

这里多层感知器代码写的是一个简单的三层神经网络,输入层,隐藏层,输出层。代码的目的是你和一个二次曲线。同时,为了保证数据的自然,添加了mean为0,steddv为0.05的噪声。

添加层代码:

def addLayer(inputs,inSize,outSize,activ_func = None):#insize outsize表示输如输出层的大小,inputs是输入。activ_func是激活函数,输出层没有激活函数。默认激活函数为空
with tf.name_scope(name = "layer"):
with tf.name_scope("weigths"):
Weights = tf.Variable(tf.random_normal([inSize,outSize]),name = "W")
bias = tf.Variable(tf.zeros([1,outSize]),name = "bias")
W_plus_b = tf.matmul(inputs,Weights)+bias
if activ_func == None:
return W_plus_b
else:
return activ_func(W_plus_b)

输入:

 with tf.name_scope(name = "inputs"):#with这个主要是用来在tensorboard上显示用。
xs = tf.placeholder(tf.float32,[None,1],name = "x_input")#不是-1哦
ys = tf.placeholder(tf.float32,[None,1],name = "y_input")
l1 = addLayer(xs,1,10,activ_func= tf.nn.relu)
y_pre = addLayer(l1,10,1,activ_func=None)

其他部分:

需要注意的是

 with tf.name_scope("loss"):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-y_pre),
reduction_indices=[1]))#这里reduction_indices=[1]类似于numpy中的那种用法,是指横向还是竖向,reduce_sum函数貌似主要是用于矩阵的,向量可以不使用
with tf.name_scope("train"):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#在以后的版本中,这里的initialize_all_variable()可能被逐步抛弃使用global_variable_init(大概是这么写的)那个函数。欢迎指正。
init = tf.initialize_all_variables()#init这一步很重要,在训练前一定要是使用sess.run(init)操作(只要是你用到了Variable)
writer = tf.summary.FileWriter("logs/",sess.graph)
with tf.Session() as sess: sess.run(init) for i in range(1000):
sess.run(train_step,feed_dict = {xs:x_data,ys:y_data})
if i % 50 == 0:
print(sess.run(loss,feed_dict = {xs:x_data,ys:y_data}))#只要是你的操作中有涉及到placeholder一定要记得使用feed_dict

所有代码:

 # -*- coding: utf-8 -*-
"""
Created on Tue Jun 13 15:41:23 2017 @author: Jarvis
""" import tensorflow as tf
import numpy as np def addLayer(inputs,inSize,outSize,activ_func = None):
with tf.name_scope(name = "layer"):
with tf.name_scope("weigths"):
Weights = tf.Variable(tf.random_normal([inSize,outSize]),name = "W")
bias = tf.Variable(tf.zeros([1,outSize]),name = "bias")
W_plus_b = tf.matmul(inputs,Weights)+bias
if activ_func == None:
return W_plus_b
else:
return activ_func(W_plus_b)
x_data = np.linspace(-1,1,300)[:,np.newaxis]
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data)-0.5+noise with tf.name_scope(name = "inputs"):
xs = tf.placeholder(tf.float32,[None,1],name = "x_input")#不是-1哦
ys = tf.placeholder(tf.float32,[None,1],name = "y_input")
l1 = addLayer(xs,1,10,activ_func= tf.nn.relu)
y_pre = addLayer(l1,10,1,activ_func=None)
with tf.name_scope("loss"):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-y_pre),
reduction_indices=[1]))
with tf.name_scope("train"):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss) init = tf.initialize_all_variables()
writer = tf.summary.FileWriter("logs/",sess.graph)
with tf.Session() as sess: sess.run(init) for i in range(1000):
sess.run(train_step,feed_dict = {xs:x_data,ys:y_data})
if i % 50 == 0:
print(sess.run(loss,feed_dict = {xs:x_data,ys:y_data}))