epsilonw = 0.01; % 权值学习效率
epsilonvb = 0.01; % 可见层偏置
epsilonhb = 0.01; % 隐含层偏置
weightcost = 0.001;
initialmomentum = 0.5;
finalmomentum = 0.9;
[numcases numdims numbatches]=size(batchdata);
% 初始化权值和偏置
vishid = 0.1*randn(numdims, numhid);
hidbiases = zeros(1,numhid);
visbiases = zeros(1,numdims);
poshidprobs = zeros(numcases,numhid);
neghidprobs = zeros(numcases,numhid);
posprods = zeros(numdims,numhid);
negprods = zeros(numdims,numhid);
vishidinc = zeros(numdims,numhid);
hidbiasinc = zeros(1,numhid);
visbiasinc = zeros(1,numdims);
batchposhidprobs=zeros(numcases,numhid,numbatches);
for epoch = epoch:maxepoch, %重建次数
errsum=0;
for batch = 1:numbatches, %对数据进行分批在处理
%从可见层向隐含层运算
data = batchdata(:,:,batch);
poshidprobs = 1./(1 + exp(-data*vishid - repmat(hidbiases,numcases,1))); %生成隐含成的概率
batchposhidprobs(:,:,batch)=poshidprobs;
posprods = data' * poshidprobs;
poshidact = sum(poshidprobs);
posvisact = sum(data);
poshidstates = poshidprobs > rand(numcases,numhid); %可见层到隐含层的激活状态
%从隐含层到可见层的解码过程,调整权值
negdata = 1./(1 + exp(-poshidstates*vishid' - repmat(visbiases,numcases,1)));
neghidprobs = 1./(1 + exp(-negdata*vishid - repmat(hidbiases,numcases,1))); %生成可见层的概率
negprods = negdata'*neghidprobs;
neghidact = sum(neghidprobs);
negvisact = sum(negdata);
err= sum(sum( (data-negdata).^2 )); %计算误差
errsum = err + errsum;
%调整学习效率
if epoch>5,
momentum=finalmomentum;
else
momentum=initialmomentum;
end;
%更新权值和偏置
vishidinc = momentum*vishidinc + ...
epsilonw*( (posprods-negprods)/numcases - weightcost*vishid);
visbiasinc = momentum*visbiasinc + (epsilonvb/numcases)*(posvisact-negvisact);
hidbiasinc = momentum*hidbiasinc + (epsilonhb/numcases)*(poshidact-neghidact);
vishid = vishid + vishidinc;
visbiases = visbiases + visbiasinc;
hidbiases = hidbiases + hidbiasinc;
end;
end;
原文:http://blog.csdn.net/zhurui_idea/article/details/44568567