用pybrain构建BP神经网络

时间:2022-01-05 19:43:37

pybrain是一个python关于构建神经网络的包,
官方文档:http://pybrain.org/docs/index.html

安装:

$ git clone git://github.com/pybrain/pybrain.git
$ python setup.py install

以sklearn作为辅助构建BP神经网络并保存,可二次读取使用。

建立BP回归模型

#coding:utf-8

'''
env:
python 2.7
pybrain 0.3.3
sklearn 0.18.1
'''

from pybrain.structure import FeedForwardNetwork
from pybrain.structure import LinearLayer, TanhLayer, FullConnection
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.customxml.networkwriter import NetworkWriter
from pybrain.tools.customxml.networkreader import NetworkReader

from sklearn.datasets import load_boston
from sklearn.preprocessing import MinMaxScaler
from sklearn.externals import joblib

#从sklearn数据集中读取用来模拟的数据
boston = load_boston()
x = boston.data
y = boston.target.reshape(-1,1)

#直接采用不打乱的方式进行7:3分离训练集和测试集
per = int(len(x) * 0.7)

#对数据进行归一化处理(一般来说使用Sigmoid时一定要归一化)
sx = MinMaxScaler()
sy = MinMaxScaler()
xTrain = x[:per]
xTrain = sx.fit_transform(xTrain)
yTrain = y[:per]
yTrain = sy.fit_transform(yTrain)

xTest = x[per:]
xTest = sx.transform(xTest)
yTest = y[per:]
yTest = sy.transform(yTest)

#初始化前馈神经网络
fnn = FeedForwardNetwork()

#构建输入层,隐藏层和输出层,一般隐藏层为3-5层,不宜过多
inLayer = LinearLayer(x.shape[1], 'inLayer')
hiddenLayer = TanhLayer(3, 'hiddenLayer')
outLayer = LinearLayer(1, 'outLayer')

#将构建的输出层、隐藏层、输出层加入到fnn中
fnn.addInputModule(inLayer)
fnn.addModule(hiddenLayer)
fnn.addOutputModule(outLayer)

#对各层之间建立完全连接
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)

#与fnn建立连接
fnn.addConnection(in_to_hidden)
fnn.addConnection(hidden_to_out)
fnn.sortModules()

#初始化监督数据集
DS = SupervisedDataSet(x.shape[1],1)

#将训练的数据及标签加入到DS中
for i in range(len(xTrain)):
DS.addSample(xTrain[i],yTrain[i])

#采用BP进行训练,训练至收敛,最大训练次数为1000
trainer = BackpropTrainer(fnn, DS, learningrate=0.01, verbose=True)
trainer.trainUntilConvergence(maxEpochs=1000)


#在测试集上对其效果做验证
values = []
for x in xTest:
values.append(sy.inverse_transform(fnn.activate(x))[0])

#计算RMSE (Root Mean Squared Error)均方差
sum(map(lambda x: x ** 0.5,map(lambda x,y: pow(x-y,2), boston.target[per:], values))) / float(len(xTest))

#将训练数据进行保存
NetworkWriter.writeToFile(fnn, 'pathName.xml')
joblib.dump(sx, 'sx.pkl', compress=3)
joblib.dump(sy, 'sy.pkl', compress=3)

#将保存的数据读取
fnn = NetworkReader.readFrom('pathName.xml')
sx = joblib.load('sx.pkl')
sy = joblib.load('sy.pkl')

建立BP分类模型

# coding: utf-8

from collections import Counter

from pybrain.structure import FeedForwardNetwork
from pybrain.structure import (LinearLayer,
TanhLayer,
FullConnection,
)
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets import ClassificationDataSet

from sklearn.datasets import load_iris
from sklearn.preprocessing import MinMaxScaler


def generateDS(input_, out, num, xtrain, ytrain):

alldata = ClassificationDataSet(input_, out, nb_classes=num)

for x, y in zip(xtrain, ytrain):
alldata.addSample(x, y)
#这里有坑,解决方案如下,参考为:http://*.com/questions/27887936/attributeerror-using-pybrain-splitwithportion-object-type-changed/30869317#30869317
tstdata_temp, trndata_temp = alldata.splitWithProportion(.3)

tstdata = ClassificationDataSet(input_, out, nb_classes=num)
for n in xrange(tstdata_temp.getLength()):
tstdata.addSample(*[tstdata_temp.getSample(n)[i] for i in range(2)])

trndata = ClassificationDataSet(input_, out, nb_classes=num)
for n in xrange(trndata_temp.getLength()):
trndata.addSample(*[trndata_temp.getSample(n)[i] for i in range(2)])

trndata._convertToOneOfMany()
tstdata._convertToOneOfMany()

return trndata, tstdata


def buildBP(input_, hidden, output, trndata):

fnn = FeedForwardNetwork()

inLayer = LinearLayer(input_, 'inLayer')
hidden0 = TanhLayer(hidden, 'hiddenLayer')
outLayer = LinearLayer(output, 'outLayer')

fnn.addInputModule(inLayer)
fnn.addModule(hidden0)
fnn.addOutputModule(outLayer)

in_to_hidden = FullConnection(inLayer, hidden0)
hidden_to_out = FullConnection(hidden0, outLayer)

fnn.addConnection(in_to_hidden)
fnn.addConnection(hidden_to_out)
fnn.sortModules()

trainer = BackpropTrainer(fnn, trndata, verbose=True, learningrate=.01)
trainer.trainUntilConvergence(maxEpochs=1000)

return fnn


def result(data, fnn, char='test'):

value = 0
length = data.getLength()
for i in xrange(length):
predict = fnn.activate(data.getSample(i)[0]).argmax()
real = data.getSample(i)[1].argmax()
#print predict, real

if predict == real:
value += 1
print ('%s'%char).center(60, '*')
print 'accuracy: %s' % (value / float(length))


if __name__ == '__main__':

iris = load_iris()
x, y = iris.data, iris.target
scaler = MinMaxScaler()
x_train = scaler.fit_transform(x)
#print x_train[:10]


input_ = x_train.shape[1]
out = 3
num = len(Counter(y).keys())
trndata, tstdata = generateDS(input_, 1, num, x_train, y)
fnn = buildBP(input_, 3, out, trndata)
result(tstdata, fnn)
result(trndata, fnn, 'train')

输出的total error 等详情就不放这了,主要是用来观察是否正在收敛,若减少速度过快可以适当减小learningrate ,若减少速度过慢可以适当增加 learningrate ,这里设置的隐层个数为1,隐藏层神经元个数为3,可以多试几个。
可以看到最终在训练集上分类正确效果可以达到97%,测试集效果可以达到93%,可以继续调整参数,不过这里是为了示例,所以就点到为止了。
用pybrain构建BP神经网络

多说几句,pybrain虽然用起来确实挺好用,但是官方文档真的很随意,用起来各种踩坑,跟着文档做都能出错,不过因为目前还没找到更好的对BP支持比较详细的第三方库,目前只能一步步踩坑了。

这里留下_convertToOneOfMany() 时遇到的坑的解决方案*