BP网络.docx
《BP网络.docx》由会员分享,可在线阅读,更多相关《BP网络.docx(16页珍藏版)》请在冰点文库上搜索。
BP网络
请问如何用MATLAB的神经网络工具箱实现三层BP网络?
%读入训练数据和测试数据
Input=[];
Output=[];
str={'Test','Check'};
Data=textread([str{1},'.txt']);
%读训练数据
Input=Data(:
1:
end-1);
%取数据表的前五列(主从成分)
Output=Data(:
end);
%取数据表的最后一列(输出值)
Data=textread([str{2},'.txt']);
%读测试数据
CheckIn=Data(:
1:
end-1);
%取数据表的前五列(主从成分)
CheckOut=Data(:
end);
%取数据表的最后一列(输出值)
Input=Input';
Output=Output';
CheckIn=CheckIn';
CheckOut=CheckOut';
%矩阵赚置
[Input,minp,maxp,Output,mint,maxt]=premnmx(Input,Output);
%标准化数据
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%神经网络参数设置
%====可以修正处
Para.Goal=0.0001;
%网络训练目标误差
Para.Epochs=800;
%网络训练代数
Para.LearnRate=0.1;
%网络学习速率
%====
Para.Show=5;
%网络训练显示间隔
Para.InRange=repmat([-11],size(Input,1),1);
%网络的输入变量区间
Para.Neurons=[size(Input,1)*2+11];
%网络后两层神经元配置
Para.TransferFcn={'logsig''purelin'};
%各层的阈值函数
Para.TrainFcn='trainlm';
%网络训练函数赋值
%traingd:
梯度下降后向传播法
%traingda:
自适应学习速率的梯度下降法
%traingdm:
带动量的梯度下降法
%traingdx:
%带动量,自适应学习速率的梯度下降法
Para.LearnFcn='learngdm';
%网络学习函数
Para.PerformFcn='sse';
%网络的误差函数
Para.InNum=size(Input,1);
%输入量维数
Para.IWNum=Para.InNum*Para.Neurons
(1);
%输入权重个数
Para.LWNum=prod(Para.Neurons);
%层权重个数
Para.BiasNum=sum(Para.Neurons);
%偏置个数
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Net=newff(Para.InRange,Para.Neurons,Para.TransferFcn,...
Para.TrainFcn,Para.LearnFcn,Para.PerformFcn);
%建立网络
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%Net.trainParam.show=Para.Show;
%训练显示间隔赋值
Net.trainParam.goal=Para.Goal;
%训练目标误差赋值
Net.trainParam.lr=Para.LearnRate;
%网络学习速率赋值
Net.trainParam.epochs=Para.Epochs;
%训练代数赋值
Net.trainParam.lr=Para.LearnRate;
Net.performFcn=Para.PerformFcn;
%误差函数赋值
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%调试
Out1=sim(Net,Input);
%仿真刚建立的网络
Sse1=sse(Output-Out1);
%刚建立的网络误差
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
[NetTR]=train(Net,Input,Output);
%训练网络并返回
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Out3=sim(Net,Input);
%对学习训练后的网络仿真
关于用遗传算法改进BP神经网络的matlab实现(转引)
ga优化神经网权值&阈值程序。
优化的基本原理和过程很多论文可以查到,在此不必赘述我就把用gaot5的小程序贴在下面吧,也是y=1/x(为看的方便,比较繁杂的也有)
file:
gabp.m
%**************************************
clearall
%用GA训练BP网络的权值、阈值
%开始计时
tic,
%BP网络初始化
[P,T,R,S1,S2,S]=bpinit;
bounds=ones(S,1)*[-1010];
%初始种群个数
num=60;
pop=initializega(num,bounds,'fitness');
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%clearall
%%用GA训练BP网络的权值、阈值
%%开始计时
%tic,
%%BP网络初始化
%[P,T,R,S1,S2,S]=bpinit;
%bounds=ones(S,1)*[0.10.9];
%%初始种群个数
%num=60;
%pop=initializega(num,bounds,'fitness');
%%遗传代数
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%遗传代数
gen=500;
[xendPopbPoptrace]=ga(bounds,'fitness',[],pop,[1e-611],'maxGenTerm',gen,...
'normGeomSelect',[0.09],['arithXover'],[2],'nonUnifMutation',[2gen3]);
fori=1:
S
x(i)=endPop(1,i);
end;
sum=0;
%前R*S1个编码为W1
fori=1:
S1,
fork=1:
R,
W1(i,k)=x(R*(i-1)+k);
end
end
%接着的S1*S2个编码(即第R*S1个后的编码)为W2
fori=1:
S2,
fork=1:
S1,
W2(i,k)=x(S1*(i-1)+k+R*S1);
end
end
%接着的S1个编码(即第R*S1+S1*S2个后的编码)为B1
fori=1:
S1,
B1(i,1)=x((R*S1+S1*S2)+i);
end
%接着的S2个编码(即第R*S1+S1*S2+S1个后的编码)为B2
fori=1:
S2,
B2(i,1)=x((R*S1+S1*S2+S1)+i);
end
%计算S1与S2层的输出
fori=1:
100
x1=W1*P(i)+B1;
A1=1./(1+exp(-x1));
x2=W2*A1+B2;
A2=1./(1+exp(-x2));
%A1=logsig(W1*P(1:
17,i),B1);
%A2=logsig(W2*A1,B2);
YY(i)=A2;
%计算误差平方和
%SE=;
%sum=sum+sumsqr(T(i)-A2);
end
i=1:
1:
100
plot(i,YY(i),'r+',i,T(i),'b-');
%[W1B1W2B2]=gadecod(x);
%仿真结果
%TT=simuff(P,W1,B1,'logsig',W2,B2,'logsig')
toc%结束计时
>>file:
fitness.m
function[sol,eval]=fitness(sol,options)
[P,T,R,S1,S2,S]=bpinit;
numv=size(sol,2)-1;
x=sol(1:
numv);
eval=f(x);
>>file:
f.m
function[eval]=f(sol)
numv=size(sol,2);
x=sol(1:
numv);
[P,T,R,S1,S2,S]=bpinit;
%getthevalueoffitness
%前R*S1个编码为W1
fori=1:
S1
fork=1:
R,
W1(i,k)=x(R*(i-1)+k);
end
end
%接着的S1*S2个编码(即第R*S1个后的编码)为W2
fori=1:
S2
fork=1:
S1,
W2(i,k)=x(S1*(i-1)+k+R*S1);
end
end
%接着的S1个编码(即第R*S1+S1*S2个后的编码)为B1
fori=1:
S1
B1(i,1)=x((R*S1+S1*S2)+i);
end
%接着的S2个编码(即第R*S1+S1*S2+S1个后的编码)为B2
fori=1:
S2
B2(i,1)=x((R*S1+S1*S2+S1)+i);
end
sum=0;
SE=0;
fori=1:
100
x1=W1*P(i)+B1;
A1=1./(1+exp(-x1));
x2=W2*A1+B2;
A2=1./(1+exp(-x2));
%A1=logsig(W1*P(1:
17,i),B1);
%A2=logsig(W2*A1,B2);
%计算误差平方和
SE=sumsqr(T(i)-A2);
sum=sum+SE;
end
eval=10/sum;%遗传算法的适应值
>>file:
bpinit.m
%BP网络初始化:
给出网络的训练样本P、T,
%输入、输出数及隐含神经元数R,S2,S1
function[P,T,R,S1,S2,S]=bpinit;
fori=1:
100
P(i)=i;
T(i)=1/P(i);
end
[R,Q]=size(P);%R=1
[S2,Q]=size(T);%S2=1
S1=3;%3nu
S=R*S1+S1*S2+S1+S2;%遗传算法编码长度
下面是我写的PSO训练BP网络的代码,训练样本被我抽去了。
各位下载后,适当修改即可运行。
如有问题,先自行研究,实在不行再请发帖或Email交流。
functionpsobp
%BPneuralnetworktrainedbyPSOalgorithm
%CopyrightbyDengDa-Peng@2005
%Email:
rexdeng@
%Youcanchangeanddistributethiscodefreelyforacademicusage
%Businessusageisstrictlyprohibited
clc
clearall
AllSamIn=...;%Addyourallinputdata
AllSamOut-...;%Addyouralloutputdata
%Pre-processingdatawithpremnmx,youcanuseotherfunctions
globalminAllSamOut;
globalmaxAllSamOut;
[AllSamInn,minAllSamIn,maxAllSamIn,AllSamOutn,minAllSamOut,maxAllSamOut]=premnmx(AllSamIn,AllSamOut);
%draw10percentfromallsamplesastestingsamples,therestastrainingsamples
i=[10:
10:
1000];
TestSamIn=[];
TestSamOut=[];
forj=1:
100
TestSamIn=[TestSamIn,AllSamInn(:
i(j))];
TestSamOut=[TestSamOut,AllSamOutn(:
i(j))];
end
TargetOfTestSam=...;%addrealloutputoftestingsamples
TrainSamIn=AllSamInn;
TrainSamOut=AllSamOutn;
TrainSamIn(:
i)=[];
TrainSamOut(:
i)=[];
%EvaluatingSample
EvaSamIn=...
EvaSamInn=tramnmx(EvaSamIn,minAllSamIn,maxAllSamIn);%preprocessing
globalPtrain;
Ptrain=TrainSamIn;
globalTtrain;
Ttrain=TrainSamOut;
Ptest=TestSamIn;
Ttest=TestSamOut;
%InitializeBPNparameters
globalindim;
indim=5;
globalhiddennum;
hiddennum=3;
globaloutdim;
outdim=1;
%InitializePSOparameters
vmax=0.5;%Maximumvelocity
minerr=0.001;%Minimumerror
wmax=0.90;
wmin=0.30;
globalitmax;%Maximumiterationnumber
itmax=300;
c1=2;
c2=2;
foriter=1:
itmax
W(iter)=wmax-((wmax-wmin)/itmax)*iter;%weightdeclininglinearly
end
%particlesareinitializedbetween(a,b)randomly
a=-1;
b=1;
%Between(m,n),(whichcanalsobestartedfromzero)
m=-1;
n=1;
globalN;%numberofparticles
N=40;
globalD;%lengthofparticle
D=(indim+1)*hiddennum+(hiddennum+1)*outdim;
%Initializepositionsofparticles
rand('state',sum(100*clock));
X=a+(b-a)*rand(N,D,1);
%Initializevelocitiesofparticles
V=m+(n-m)*rand(N,D,1);
globalfvrec;
MinFit=[];
BestFit=[];
%Functiontobeminimized,performancefunction,i.e.,mseofnetwork
globalnet;
net=newff(minmax(Ptrain),[hiddennum,outdim],{'tansig','purelin'});
fitness=fitcal(X,net,indim,hiddennum,outdim,D,Ptrain,Ttrain,minAllSamOut,maxAllSamOut);
fvrec(:
1,1)=fitness(:
1,1);
[C,I]=min(fitness(:
1,1));
MinFit=[MinFitC];
BestFit=[BestFitC];
L(:
1,1)=fitness(:
1,1);%recordthefitnessofparticleofeveryiterations
B(1,1,1)=C;%recordtheminimumfitnessofparticle
gbest(1,:
1)=X(I,:
1);%theglobalbestxinpopulation
%Matrixcomposedofgbestvector
forp=1:
N
G(p,:
1)=gbest(1,:
1);
end
fori=1:
N;
pbest(i,:
1)=X(i,:
1);
end
V(:
:
2)=W
(1)*V(:
:
1)+c1*rand*(pbest(:
:
1)-X(:
:
1))+c2*rand*(G(:
:
1)-X(:
:
1));
%V(:
:
2)=cf*(W
(1)*V(:
:
1)+c1*rand*(pbest(:
:
1)-X(:
:
1))+c2*rand*(G(:
:
1)-X(:
:
1)));
%V(:
:
2)=cf*(V(:
:
1)+c1*rand*(pbest(:
:
1)-X(:
:
1))+c2*rand*(G(:
:
1)-X(:
:
1)));
%limitsvelocityofparticlesbyvmax
forni=1:
N
fordi=1:
D
ifV(ni,di,2)>vmax
V(ni,di,2)=vmax;
elseifV(ni,di,2)<-vmax
V(ni,di,2)=-vmax;
else
V(ni,di,2)=V(ni,di,2);
end
end
end
X(:
:
2)=X(:
:
1)+V(:
:
2);
%******************************************************
forj=2:
itmax
disp('IterationandCurrentBestFitness')
disp(j-1)
disp(B(1,1,j-1))
%Calculationofnewpositions
fitness=fitcal(X,net,indim,hiddennum,outdim,D,Ptrain,Ttrain,minAllSamOut,maxAllSamOut);
fvrec(:
1,j)=fitness(:
1,j);
%[maxC,maxI]=max(fitness(:
1,j));
%MaxFit=[MaxFitmaxC];
%MeanFit=[MeanFitmean(fitness(:
1,j))];
[C,I]=min(fitness(:
1,j));
MinFit=[MinFitC];
BestFit=[BestFitmin(MinFit)];
L(:
1,j)=fitness(:
1,j);
B(1,1,j)=C;
gbest(1,:
j)=X(I,:
j);
[C,I]=min(B(1,1,:
));
%keepgbestisthebestparticleofallhaveoccured
ifB(1,1,j)<=C
gbest(1,:
j)=gbest(1,:
j);
else
gbest(1,:
j)=gbest(1,:
I);
end
ifC<=minerr,break,end
%Matrixcomposedofgbestvector
ifj>=itmax,break,end
forp=1:
N
G(p,:
j)=gbest(1,:
j);
end
fori=1:
N;
[C,I]=min(L(i,1,:
));
ifL(i,1,j)<=C
pbest(i,:
j)=X(i,:
j);
else
pbest(i,:
j)=X(i,:
I);
end
end
V(:
:
j+1)=W(j)*V(:
:
j)+c1*rand*(pbest(:
:
j)-X(:
:
j))+c2*rand*(G(:
:
j)-X(:
:
j));
%V(:
:
j+1)=cf*(W(j)*V(:
:
j)+c1*rand*(pbest(:
:
j)-X(:
:
j))+c2*rand*(G(:
:
j)-X(:
:
j)));
%V(:
:
j+1)=cf*(V(:
:
j)+c1*rand*(pbest(:
:
j)-X(:
:
j))+c2*rand*(G(:
:
j)-X(:
:
j)));
forni=1:
N
fordi=1:
D
ifV(ni,di,j+1)>vmax
V(ni,di,j+1)=vmax;
elseifV(ni,di,j+1)<-vmax
V(ni,di,j+1)=-vmax;
else
V(ni,di,j+1)=V(ni,di,j+1);
end
end
end
X(:
:
j+1)=X(:
:
j)+V(:
:
j+1);
end
disp('IterationandCurrentBestFitness')
disp(j)
disp(B(1,1,j))
disp('GlobalBestFitnessandOccurredIteration')
[C,I]=min(B(1,1,:
))
%simulationnetwork
fort=1:
hiddennum
x2iw(t,:
)=gbest(1,((t-1)*indim+1):
t*indim,j);
end
forr=1:
outdim
x2lw(r,:
)=gbest(1,(indim*hiddennum+1):
(indim*hiddennum+hiddennum),j);
end
x2b=gbest(1,((indim+1)*hiddennum+1):
D,j);
x2b1=x2b(1:
hiddennum).';
x2b2=x2b(hiddennum+1:
hiddennum+outdim).';
net.IW{1,1}=x2