From 40e49a20cb0caf498ffd2d37865861b7551a74f1 Mon Sep 17 00:00:00 2001 From: clement etienam <39471613+clementetienam@users.noreply.github.com> Date: Sun, 6 Sep 2020 09:09:40 +0100 Subject: [PATCH] Add files via upload --- Crucial/Bayesian_Clement.m | 12 + Crucial/Bayesian_Clement_2.m | 16 + Crucial/ESMDA.m | 55 +++ Crucial/Forwarding.m | 105 +++++ Crucial/Forwarding_prime.m | 108 +++++ Crucial/Get_cov.m | 4 + Crucial/Get_ensemble.m | 17 + Crucial/Get_ensemble_2.m | 13 + Crucial/Optimize_clement.m | 110 +++++ Crucial/Optimize_clement_DNN.m | 60 +++ Crucial/Optimize_clement_Gp.m | 78 ++++ Crucial/Optimize_clement_RF.m | 56 +++ Crucial/Unseen_soft_1.m | 44 ++ Crucial/Unseen_soft_2.m | 20 + Crucial/Unseen_soft_3.m | 31 ++ Crucial/colordg.m | 46 ++ Crucial/fminlbfgs.m | 754 +++++++++++++++++++++++++++++++++ Crucial/pinv2.m | 29 ++ Crucial/prediction_1.m | 32 ++ Crucial/prediction_2.m | 15 + Crucial/prediction_3.m | 49 +++ 21 files changed, 1654 insertions(+) create mode 100644 Crucial/Bayesian_Clement.m create mode 100644 Crucial/Bayesian_Clement_2.m create mode 100644 Crucial/ESMDA.m create mode 100644 Crucial/Forwarding.m create mode 100644 Crucial/Forwarding_prime.m create mode 100644 Crucial/Get_cov.m create mode 100644 Crucial/Get_ensemble.m create mode 100644 Crucial/Get_ensemble_2.m create mode 100644 Crucial/Optimize_clement.m create mode 100644 Crucial/Optimize_clement_DNN.m create mode 100644 Crucial/Optimize_clement_Gp.m create mode 100644 Crucial/Optimize_clement_RF.m create mode 100644 Crucial/Unseen_soft_1.m create mode 100644 Crucial/Unseen_soft_2.m create mode 100644 Crucial/Unseen_soft_3.m create mode 100644 Crucial/colordg.m create mode 100644 Crucial/fminlbfgs.m create mode 100644 Crucial/pinv2.m create mode 100644 Crucial/prediction_1.m create mode 100644 Crucial/prediction_2.m create mode 100644 Crucial/prediction_3.m diff --git a/Crucial/Bayesian_Clement.m b/Crucial/Bayesian_Clement.m new file mode 100644 index 0000000..6be893d --- /dev/null +++ b/Crucial/Bayesian_Clement.m @@ -0,0 +1,12 @@ +function DupdateK=Bayesian_Clement(hypps,N,f,clfx,ytrain,oldfolder,ytrue,alpha,combo1) +% ytrain=y_train; +Sim=zeros(2,N); +for i=1:N + aa=(hypps(:,i))'; +Sim(:,i)=abs((Forwarding(aa,f,clfx,ytrain,oldfolder,combo1))'); +end + +% ytrue=y_train(5,:); + +[DupdateK] = ESMDA (hypps,ytrue', N, Sim,alpha); +end \ No newline at end of file diff --git a/Crucial/Bayesian_Clement_2.m b/Crucial/Bayesian_Clement_2.m new file mode 100644 index 0000000..278d8e6 --- /dev/null +++ b/Crucial/Bayesian_Clement_2.m @@ -0,0 +1,16 @@ +function DupdateK=Bayesian_Clement_2(hypps,N,f,clfx,ytrain,... + oldfolder,ytrue,alpha,combo1,suni) +% ytrain=y_train; +%Sim=zeros(2,N); +parfor i=1:N + aa=(hypps(:,i)); + aa=reshape(aa,[],suni) + spit=abs((Forwarding(aa,f,clfx,ytrain,oldfolder,combo1))); + spit=reshape(spit,[],1); +Sim(:,i)=spit; +end + +% ytrue=y_train(5,:); + +[DupdateK] = ESMDA (hypps,reshape(ytrue,[],1), N, Sim,alpha); +end \ No newline at end of file diff --git a/Crucial/ESMDA.m b/Crucial/ESMDA.m new file mode 100644 index 0000000..43ce640 --- /dev/null +++ b/Crucial/ESMDA.m @@ -0,0 +1,55 @@ +function [DupdateK] = ESMDA (sgsim,f, N, Sim1,alpha) +sd=1; +rng(sd) + +%----------------------------------------------------------------------------- + +parfor i=1:size(f,1) + aa=f(i,:); + aa1=num2str(aa); + usee=str2num(aa1(1)); + stddWOPR1 =1 %0.01*f(i,:); + stdall(i,:)=stddWOPR1; + end + +nobs = length(f); +noise = randn(max(10000,nobs),1); + +Error1=stdall; +sig=Error1; +parfor i = 1 : length(f) + f(i) = f(i) + sig(i)*noise(end-nobs+i); +end +R = sig.^2; + Dj = repmat(f, 1, N); + parfor i = 1:size(Dj,1) + rndm(i,:) = randn(1,N); + rndm(i,:) = rndm(i,:) - mean(rndm(i,:)); + rndm(i,:) = rndm(i,:) / std(rndm(i,:)); + Dj(i,:) = Dj(i,:) + sqrt(alpha)*sqrt(R(i)) * rndm(i,:); + end + + +Cd2 =diag(R); +overall=sgsim; + +Y=overall; %State variable,it is important we include simulated measurements in the ensemble state variable +M = mean(Sim1,2); +% Mean of the ensemble state +M2=mean(overall,2); +%M=M' +% Get the ensemble states pertubations +parfor j=1:N + S(:,j)=Sim1(:,j)-M; +end +parfor j=1:N + yprime(:,j)=overall(:,j)-M2; +end +Cyd=(yprime*S')./((N-1)); +Cdd=(S*S')./((N-1)); + +Ynew=Y+(Cyd*pinv2((Cdd+(alpha.*Cd2))))*(Dj-Sim1); + +DupdateK=Ynew; + +end \ No newline at end of file diff --git a/Crucial/Forwarding.m b/Crucial/Forwarding.m new file mode 100644 index 0000000..7c04e00 --- /dev/null +++ b/Crucial/Forwarding.m @@ -0,0 +1,105 @@ +function [Hardmean,Softmean]=Forwarding(parameters,fv,... + clfx,y_train,oldfolder,combo1) + +X_test=(clfx.transform(parameters)); + +cd(oldfolder); + +%% +for ii=1:size(y_train,2) +% fprintf('Predicting measurement %d | %d .\n', ii,2); +folderk = fv; +switch combo1 + case 1 + +cd(folderk) + +Classifiers=load('Classifiers'); +Classifiers=Classifiers.Classifiers; + +Experts=load('Experts.out'); + + +clfysses=load('clfysses'); +clfysses=clfysses.clfysses; + +% clfy = MinMaxScalery(); +% (clfy.fit(y_train(:,ii))); + + +Regressors=load('Regressors'); +Regressors=Regressors.Regressors; + + +Xtrainbig=load('Xtrainbig'); +Xtrainbig=Xtrainbig.Xtrainbig; + +ytrainbig=load('ytrainbig'); +ytrainbig=ytrainbig.ytrainbig; + +[Hardmean(:,ii)]=prediction_1(Regressors{ii,1},... + pred_class(X_test, Classifiers{ii,1})... +,X_test,Xtrainbig{ii,1},ytrainbig{ii,1},Experts(ii,1),clfysses{ii,1}); + +[Softmean(:,ii),~]=Unseen_soft_1(Regressors{ii,1},... + Classifiers{ii,1},X_test,Xtrainbig{ii,1},ytrainbig{ii,1}... +,Experts(ii,1),clfysses{ii,1}); + + case 2 + +cd(folderk) + +% disp('Expert=DNN, Gate=DNN') +Classifiers=load('Classifiers'); +Classifiers=Classifiers.Classifiers; + +Experts=load('Experts.out'); + +Classallsbig=load('Classallsbig'); +Classallsbig=Classallsbig.Classallsbig; + +clfysses=load('clfysses'); +clfysses=clfysses.clfysses; + +Regressors=load('Regressors'); +Regressors=Regressors.Regressors; + +% cd(oldfolder) +[Hardmean(:,ii)]=prediction_2(Regressors{ii,1},pred_class(X_test... + , Classifiers{ii,1})... +,X_test,Classallsbig{ii,1},Experts(ii,1),clfysses{ii,1}); + +[Softmean(:,ii)]=Unseen_soft_2(Regressors{ii,1},... + Classifiers{ii,1},X_test,Classallsbig{ii,1},Experts(ii,1),... +clfysses{ii,1}); + case 3 + +cd(folderk) +Classifiers=load('Classifiers'); +Classifiers=Classifiers.Classifiers; + +Experts=load('Experts.out'); + +Classallsbig=load('Classallsbig'); +Classallsbig=Classallsbig.Classallsbig; + + +clfysses=load('clfysses'); +clfysses=clfysses.clfysses; + +Regressors=load('Regressors'); +Regressors=Regressors.Regressors; + +% cd(oldfolder) +[Hardmean(:,ii)]=prediction_3(Regressors{ii,1}... + ,str2double(predict(Classifiers{ii,1},X_test))... +,X_test,Classallsbig{ii,1},Experts(ii,1),clfysses{ii,1}); + +[Softmean(:,ii),~]=Unseen_soft_3(Regressors{ii,1}... + ,Classifiers{ii,1},X_test,Experts(ii,1),clfysses{ii,1}); +% +end + cd(oldfolder) +end + +end \ No newline at end of file diff --git a/Crucial/Forwarding_prime.m b/Crucial/Forwarding_prime.m new file mode 100644 index 0000000..f764298 --- /dev/null +++ b/Crucial/Forwarding_prime.m @@ -0,0 +1,108 @@ +function [Hardmean,Softmean]=Forwarding_prime(parameters,... + fv,clfx,y_train,oldfolder,combo1) +X_test=(parameters); +cd(oldfolder); + +%% +% Answerhard=cell(2,1); +% Answersoft=cell(2,1); +for ii=1:2 +% fprintf('Predicting measurement %d | %d .\n', ii,2); +folderk = fv; + +switch combo1 + case 1 + +cd(folderk) + + +Classifiers=load('Classifiers'); +Classifiers=Classifiers.Classifiers; + +Experts=load('Experts.out'); + + +clfysses=load('clfysses'); +clfysses=clfysses.clfysses; + +% clfy = MinMaxScalery(); +% (clfy.fit(y_train(:,ii))); + + +Regressors=load('Regressors'); +Regressors=Regressors.Regressors; + + +Xtrainbig=load('Xtrainbig'); +Xtrainbig=Xtrainbig.Xtrainbig; + +ytrainbig=load('ytrainbig'); +ytrainbig=ytrainbig.ytrainbig; + +[Hardmean(:,ii),Hardvariance(:,ii)]=prediction_1(Regressors{ii,1},... + pred_class(X_test, Classifiers{ii,1})... +,X_test,Xtrainbig{ii,1},ytrainbig{ii,1},Experts,clfysses{ii,1}); +[Softmean(:,ii),Softvariance(:,ii)]=Unseen_soft_1(Regressors{ii,1},... + Classifiers{ii,1},X_test,Xtrainbig{ii,1},ytrainbig{ii,1}... +,Experts,clfysses{ii,1}); + + case 2 + +cd(folderk) + +% disp('Expert=DNN, Gate=DNN') +Classifiers=load('Classifiers'); +Classifiers=Classifiers.Classifiers; + +Experts=load('Experts.out'); + +Classallsbig=load('Classallsbig'); +Classallsbig=Classallsbig.Classallsbig; + +clfysses=load('clfysses'); +clfysses=clfysses.clfysses; + +Regressors=load('Regressors'); +Regressors=Regressors.Regressors; + +% cd(oldfolder) +[Hardmean(:,ii)]=prediction_2(Regressors{ii,1},pred_class(X_test... + , Classifiers{ii,1})... +,X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + +[Softmean(:,ii)]=Unseen_soft_2(Regressors{ii,1},... + Classifiers{ii,1},X_test,Classallsbig{ii,1},... +Experts,clfysses{ii,1}); + case 3 + +cd(folderk) +Classifiers=load('Classifiers'); +Classifiers=Classifiers.Classifiers; + +Experts=load('Experts.out'); + +Classallsbig=load('Classallsbig'); +Classallsbig=Classallsbig.Classallsbig; + + +clfysses=load('clfysses'); +clfysses=clfysses.clfysses; + +Regressors=load('Regressors'); +Regressors=Regressors.Regressors; + +% cd(oldfolder) +[Hardmean(:,ii),Hardvariance(:,ii)]=prediction_3(Regressors{ii,1}... + ,str2double(predict(Classifiers{ii,1},X_test))... +,X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + +[Softmean(:,ii),Softvariance(:,ii)]=Unseen_soft_3(Regressors{ii,1}... + ,Classifiers{ii,1},X_test,Experts,clfysses{ii,1}); +% +end + cd(oldfolder) +end + + + +end \ No newline at end of file diff --git a/Crucial/Get_cov.m b/Crucial/Get_cov.m new file mode 100644 index 0000000..541b454 --- /dev/null +++ b/Crucial/Get_cov.m @@ -0,0 +1,4 @@ +function cov_big= Get_cov(X_test2,hyp_updated) +for ik=1:size(X_test2,2) +cov_big(:,ik)=cov(hyp_updated(:,ik)); +end \ No newline at end of file diff --git a/Crucial/Get_ensemble.m b/Crucial/Get_ensemble.m new file mode 100644 index 0000000..d5af6f4 --- /dev/null +++ b/Crucial/Get_ensemble.m @@ -0,0 +1,17 @@ +function ensemble_ini=Get_ensemble(N,X_test2,meanss2,meanss) +% for j=1:N +% for jj=1:size(X_test2,2) +% hyp_inipure(:,jj) = normrnd( meanss2(:,jj),0.1*meanss2(:,jj),1,1) ; +% end +% hyp_inipure=abs(hyp_inipure); +% hyp_inipure(:,6)=round(hyp_inipure(:,6)); +% hyp_inipure(:,8)=round(hyp_inipure(:,8)); +% hyp_ini=(clfx.transform(hyp_inipure)); +% ensemble_ini(:,j)=hyp_ini'; +% end + + for jj=1:size(X_test2,2) + hyp_inipuree(1:N,jj) = unifrnd(meanss2(:,jj),meanss(:,jj),10,1); + end +ensemble_ini=hyp_inipuree'; +end \ No newline at end of file diff --git a/Crucial/Get_ensemble_2.m b/Crucial/Get_ensemble_2.m new file mode 100644 index 0000000..598748a --- /dev/null +++ b/Crucial/Get_ensemble_2.m @@ -0,0 +1,13 @@ +function ensemble_ini=Get_ensemble_2(N,X_test2,meanss2,meanss,Nop) + +p=2; +szss=Nop; +for k=1:N + for jj=1:size(X_test2,2) + aj=meanss2(:,jj)+ (meanss(:,jj)- meanss2(:,jj))*sum(rand(szss,p),2)/p; + % hyp_inipuree(:,jj) = unifrnd(meanss2(:,jj),meanss(:,jj),szss,1); + hyp_inipuree(:,jj) = reshape(aj,[],1); +end + +ensemble_ini(:,k)=reshape(hyp_inipuree,[],1); +end \ No newline at end of file diff --git a/Crucial/Optimize_clement.m b/Crucial/Optimize_clement.m new file mode 100644 index 0000000..23e7d84 --- /dev/null +++ b/Crucial/Optimize_clement.m @@ -0,0 +1,110 @@ +function out=Optimize_clement(parameters,fv,clfx,y_train,... + oldfolder,ytrue,combo1,suni) +X_test=reshape(parameters,[],suni); +%parfor ii=1:2 + +cd(oldfolder); + +%% +for ii=1:2 +% fprintf('Predicting measurement %d | %d .\n', ii,2); +folderk = fv; + +switch combo1 + case 1 + +cd(folderk) +Classifiers=load('Classifiers'); +Classifiers=Classifiers.Classifiers; + +Experts=load('Experts.out'); + + +clfysses=load('clfysses'); +clfysses=clfysses.clfysses; + +% clfy = MinMaxScalery(); +% (clfy.fit(y_train(:,ii))); + + +Regressors=load('Regressors'); +Regressors=Regressors.Regressors; + + +Xtrainbig=load('Xtrainbig'); +Xtrainbig=Xtrainbig.Xtrainbig; + +ytrainbig=load('ytrainbig'); +ytrainbig=ytrainbig.ytrainbig; + +[Hardmean(:,ii)]=prediction_1(Regressors{ii,1},... + pred_class(X_test, Classifiers{ii,1})... +,X_test,Xtrainbig{ii,1},ytrainbig{ii,1},Experts,clfysses{ii,1}); +% [Softmean(:,ii),~]=Unseen_soft_1(Regressors{ii,1},... +% Classifiers{ii,1},X_test,Xtrainbig{ii,1},ytrainbig{ii,1}... +%,Experts,clfy); + + case 2 + +cd(folderk) + +% disp('Expert=DNN, Gate=DNN') +Classifiers=load('Classifiers'); +Classifiers=Classifiers.Classifiers; + +Experts=load('Experts.out'); + +Classallsbig=load('Classallsbig'); +Classallsbig=Classallsbig.Classallsbig; + +clfysses=load('clfysses'); +clfysses=clfysses.clfysses; + +Regressors=load('Regressors'); +Regressors=Regressors.Regressors; + +% cd(oldfolder) +[Hardmean(:,ii)]=prediction_2(Regressors{ii,1},pred_class(X_test... + , Classifiers{ii,1})... +,X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + +% [Softmean(:,ii)]=Unseen_soft_2(Regressors{ii,1},... +% Classifiers{ii,1},X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + case 3 + +cd(folderk) +Classifiers=load('Classifiers'); +Classifiers=Classifiers.Classifiers; + +Experts=load('Experts.out'); + +Classallsbig=load('Classallsbig'); +Classallsbig=Classallsbig.Classallsbig; + + +clfysses=load('clfysses'); +clfysses=clfysses.clfysses; + +Regressors=load('Regressors'); +Regressors=Regressors.Regressors; + +% cd(oldfolder) +[Hardmean(:,ii)]=prediction_3(Regressors{ii,1}... + ,str2double(predict(Classifiers{ii,1},X_test))... +,X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + +% [Softmean(:,ii),~]=Unseen_soft_3(Regressors{ii,1}... +% ,Classifiers{ii,1},X_test,Experts,clfysses{ii,1}); +% +end + cd(oldfolder) +end + +ytrue=reshape(ytrue,[],1); +Hardmean=reshape(Hardmean,[],1); + +a1=((ytrue-Hardmean).^2); +cc = sum(a1); +out=cc; + +end \ No newline at end of file diff --git a/Crucial/Optimize_clement_DNN.m b/Crucial/Optimize_clement_DNN.m new file mode 100644 index 0000000..50ed935 --- /dev/null +++ b/Crucial/Optimize_clement_DNN.m @@ -0,0 +1,60 @@ +function out=Optimize_clement_DNN(parameters,fv,clfx,y_train,... + oldfolder,ytrue,combo1,Regressors,Classifiers,... + Classallsbig,Experts,clfysses) + + +parameters=reshape(parameters,[],8); +X_test=(clfx.transform(parameters)); +cd(oldfolder); + +%% +for ii=1:2 +switch combo1 + case 1 +[Hardmean(:,ii)]=prediction_1(Regressors{ii,1},... + pred_class(X_test, Classifiers{ii,1})... +,X_test,Xtrainbig{ii,1},ytrainbig{ii,1},Experts,clfysses{ii,1}); +% [Softmean(:,ii),~]=Unseen_soft_1(Regressors{ii,1},... +% Classifiers{ii,1},X_test,Xtrainbig{ii,1},ytrainbig{ii,1}... +%,Experts,clfy); + + case 2 + +[Hardmean(:,ii)]=prediction_2(Regressors{ii,1},pred_class(X_test... + , Classifiers{ii,1})... +,X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + +% [Softmean(:,ii)]=Unseen_soft_2(Regressors{ii,1},... +% Classifiers{ii,1},X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + case 3 + +[Hardmean(:,ii)]=prediction_3(Regressors{ii,1}... + ,str2double(predict(Classifiers{ii,1},X_test))... +,X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + +% [Softmean(:,ii),~]=Unseen_soft_3(Regressors{ii,1}... +% ,Classifiers{ii,1},X_test,Experts,clfysses{ii,1}); +% +end + cd(oldfolder) +end +% fprintf('Done measurement %d | %d .\n', ii,2); + +%end + +%% + +%% + +ytrue=reshape(ytrue,[],1); +Hardmean=double(reshape(Hardmean,[],1)); +gg=size(ytrue,1); +% a1=((ytrue'-Hardmean').^2).^0.5; +a1=(1/(2*gg)) * sum((ytrue-Hardmean).^2); + +%a1=abs((ytrue-Hardmean)); + +cc = sum(a1); +out=cc; + +end \ No newline at end of file diff --git a/Crucial/Optimize_clement_Gp.m b/Crucial/Optimize_clement_Gp.m new file mode 100644 index 0000000..1a9c01a --- /dev/null +++ b/Crucial/Optimize_clement_Gp.m @@ -0,0 +1,78 @@ +function out=Optimize_clement_Gp(parameters,fv,clfx,y_train,... + oldfolder,ytrue,combo1,Regressors,Classifiers,... + Xtrainbig,ytrainbig,Experts,clfysses) + + +parameters=reshape(parameters,[],8); +X_test=(clfx.transform(parameters)); +cd(oldfolder); + +%% +for ii=1:2 + +switch combo1 + case 1 +[Hardmean(:,ii)]=prediction_1(Regressors{ii,1},... + pred_class(X_test, Classifiers{ii,1})... +,X_test,Xtrainbig{ii,1},ytrainbig{ii,1},Experts,clfysses{ii,1}); +% [Softmean(:,ii),~]=Unseen_soft_1(Regressors{ii,1},... +% Classifiers{ii,1},X_test,Xtrainbig{ii,1},ytrainbig{ii,1}... +%,Experts,clfy); + + case 2 + +[Hardmean(:,ii)]=prediction_2(Regressors{ii,1},pred_class(X_test... + , Classifiers{ii,1})... +,X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + +% [Softmean(:,ii)]=Unseen_soft_2(Regressors{ii,1},... +% Classifiers{ii,1},X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + case 3 + +cd(folderk) +Classifiers=load('Classifiers'); +Classifiers=Classifiers.Classifiers; + +Experts=load('Experts.out'); + +Classallsbig=load('Classallsbig'); +Classallsbig=Classallsbig.Classallsbig; + + +clfysses=load('clfysses'); +clfysses=clfysses.clfysses; + +Regressors=load('Regressors'); +Regressors=Regressors.Regressors; + +% cd(oldfolder) +[Hardmean(:,ii)]=prediction_3(Regressors{ii,1}... + ,str2double(predict(Classifiers{ii,1},X_test))... +,X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + +% [Softmean(:,ii),~]=Unseen_soft_3(Regressors{ii,1}... +% ,Classifiers{ii,1},X_test,Experts,clfysses{ii,1}); +% +end + cd(oldfolder) +end +% fprintf('Done measurement %d | %d .\n', ii,2); + +%end + +%% + +%% + +ytrue=reshape(ytrue,[],1); +Hardmean=double(reshape(Hardmean,[],1)); +gg=size(ytrue,1); +% a1=((ytrue'-Hardmean').^2).^0.5; +a1=(1/(2*gg)) * sum((ytrue-Hardmean).^2); + +%a1=abs((ytrue-Hardmean)); + +cc = sum(a1); +out=cc; + +end \ No newline at end of file diff --git a/Crucial/Optimize_clement_RF.m b/Crucial/Optimize_clement_RF.m new file mode 100644 index 0000000..aa483c4 --- /dev/null +++ b/Crucial/Optimize_clement_RF.m @@ -0,0 +1,56 @@ +function out=Optimize_clement_RF(parameters,fv,clfx,y_train,... + oldfolder,ytrue,combo1,Regressors,Classifiers,... + Classallsbig,Experts,clfysses) + + +parameters=reshape(parameters,[],8); +X_test=(clfx.transform(parameters)); +cd(oldfolder); + +%% +for ii=1:2 +switch combo1 + case 1 +[Hardmean(:,ii)]=prediction_1(Regressors{ii,1},... + pred_class(X_test, Classifiers{ii,1})... +,X_test,Xtrainbig{ii,1},ytrainbig{ii,1},Experts,clfysses{ii,1}); +% [Softmean(:,ii),~]=Unseen_soft_1(Regressors{ii,1},... +% Classifiers{ii,1},X_test,Xtrainbig{ii,1},ytrainbig{ii,1}... +%,Experts,clfy); + + case 2 + +[Hardmean(:,ii)]=prediction_2(Regressors{ii,1},pred_class(X_test... + , Classifiers{ii,1})... +,X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + +% [Softmean(:,ii)]=Unseen_soft_2(Regressors{ii,1},... +% Classifiers{ii,1},X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + case 3 + +[Hardmean(:,ii)]=prediction_3(Regressors{ii,1}... + ,str2double(predict(Classifiers{ii,1},X_test))... +,X_test,Classallsbig{ii,1},Experts,clfysses{ii,1}); + +% [Softmean(:,ii),~]=Unseen_soft_3(Regressors{ii,1}... +% ,Classifiers{ii,1},X_test,Experts,clfysses{ii,1}); +% +end + cd(oldfolder) +end +% fprintf('Done measurement %d | %d .\n', ii,2); + +%% + +ytrue=reshape(ytrue,[],1); +Hardmean=double(reshape(Hardmean,[],1)); +gg=size(ytrue,1); +% a1=((ytrue'-Hardmean').^2).^0.5; +a1=(1/(2*gg)) * sum((ytrue-Hardmean).^2); + +%a1=abs((ytrue-Hardmean)); + +cc = sum(a1); +out=cc; + +end \ No newline at end of file diff --git a/Crucial/Unseen_soft_1.m b/Crucial/Unseen_soft_1.m new file mode 100644 index 0000000..0b460d3 --- /dev/null +++ b/Crucial/Unseen_soft_1.m @@ -0,0 +1,44 @@ +function [Valuee,variance2]=Unseen_soft_1(weights,... + modelNN,X,Xtrains,ytrains,Experts,clfy) + + [~,D] = pred_class(X, modelNN); + +numcols=size(D,2); +Valuee=zeros(size(X,1),1); +variance2=zeros(size(X,1),numcols); +meanfunc=@meanConst; +likfunc = {@likGauss}; + +inf = @infGaussLik; + cov = {@covSEiso}; + infv = @(varargin) inf(varargin{:},struct('s',1.0)); + +%% + +parfor jj=1:size(X,1) + + a00=X(jj,:) ; + Valuee1=zeros(1,numcols); + Valuees1=zeros(1,numcols); + for xx= 1:Experts + + hyp_use=weights{xx,:}; + Xuse=Xtrains{xx,:}; + yuse=ytrains{xx,:}; + cov1 = {'apxSparse',cov,hyp_use.xu}; + + [zz ,s2] = gp(hyp_use, infv, meanfunc, cov1, likfunc, Xuse, yuse, a00); + + zz=reshape(zz,[],1); + s2=reshape(s2,[],1); + Valuee1(:,xx)= zz; + Valuees1(:,xx)= s2; + end + +getit=D(jj,:).*Valuee1; +Valuee(jj,:)=clfy.inverse_transform(sum(getit,2)); +variance2(jj,:)=sqrt(clfy.inverse_transform(sum((D(jj,:).*Valuees1),2)+... + (sum((D(jj,:).*(Valuee1.^2)),2)-... + (sum((D(jj,:).*Valuee1),2)).^2))); +end +end \ No newline at end of file diff --git a/Crucial/Unseen_soft_2.m b/Crucial/Unseen_soft_2.m new file mode 100644 index 0000000..682962c --- /dev/null +++ b/Crucial/Unseen_soft_2.m @@ -0,0 +1,20 @@ +function [Valuee]=Unseen_soft_2(weights,modelNN,X,Class_all,Experts,clfy) + [~,D] = pred_class(X, modelNN); + + +Valuee=zeros(size(X,1),1); +%% +parfor jj=1:size(X,1) + + a00=X(jj,:) ; + Valuee1=zeros(1,numcols); + for xx= 1:Experts + net=weights{xx,:}; + zz = (predict(net,a00'))'; + zz=reshape(zz,[],1); + Valuee1(:,xx)= zz; + end +Valuee(jj,:)=clfy.inverse_transform(sum((D(jj,:).*Valuee1),2)); +end + +end \ No newline at end of file diff --git a/Crucial/Unseen_soft_3.m b/Crucial/Unseen_soft_3.m new file mode 100644 index 0000000..1c3a629 --- /dev/null +++ b/Crucial/Unseen_soft_3.m @@ -0,0 +1,31 @@ +function [Valuee,variance2]=Unseen_soft_3(weights,modelNN,X,Experts,clfy) + + [~,D] = pred_class(X,modelNN); + + +numcols=size(D,2); +Valuee=zeros(size(X,1),1); +variance2=zeros(size(X,1),numcols); +%% +parfor jj=1:size(X,1) + + a00=X(jj,:) ; + Valuee1=zeros(1,numcols); + Valuees1=zeros(1,numcols); + for xx= 1:Experts + + net=weights{xx,:}; + [zz,s2] = predict(net,a00); + zz=reshape(zz,[],1); + s2=reshape(s2,[],1); + Valuee1(:,xx)= zz; + Valuees1(:,xx)= s2; + end + +getit=D(jj,:).*Valuee1; +Valuee(jj,:)=clfy.inverse_transform(sum(getit,2)); +variance2(jj,:)=sqrt(clfy.inverse_transform(sum((D(jj,:).*Valuees1),2)+... + (sum((D(jj,:).*(Valuee1.^2)),2)-... + (sum((D(jj,:).*Valuee1),2)).^2))); +end +end \ No newline at end of file diff --git a/Crucial/colordg.m b/Crucial/colordg.m new file mode 100644 index 0000000..10858b6 --- /dev/null +++ b/Crucial/colordg.m @@ -0,0 +1,46 @@ +function linecolor = colordg(n); +%COLORDG - Choose 1 out of 10 different colors for a line plot +%The first seven colors are exactly the same as Matlab's default +%AXES Colororder property. +% +%Syntax: linecolor = colordg(n); +% +%Input: N , value between 1 and 10, giving the following colors +% +% 1 BLUE +% 2 GREEN (medium dark) +% 3 RED +% 4 TURQUOISE +% 5 MAGENTA +% 6 YELLOW (dark) +% 7 GREY (very dark) +% 8 ORANGE +% 9 BROWN +% 10 YELLOW (pale) +% +%Output: LINECOLOR (1 x 3 row vector) +% +%Example: linecolor = colordg(8); +% +%See also: COLOR_ORDERDG.MAT + +%Author: clement Etienam +%PhD Supervisor: Dr Rossmary Villegas + + + +color_order = ... +[ 0 0 1 % 1 BLUE +0 0.5 0 % 2 GREEN (medium dark) +1 0 0 % 3 RED +0 0.75 0.75 % 4 TURQUOISE +0.75 0 0.75 % 5 MAGENTA +0.75 0.75 0 % 6 YELLOW (dark) +0.25 0.25 0.25 % 7 GREY (very dark) +1 0.50 0.25 % 8 ORANGE +0.6 0.5 0.4 % 9 BROWN +1 1 0 ]; % 10 YELLOW (pale) + +linecolor = color_order(n,:); + +%END of code \ No newline at end of file diff --git a/Crucial/fminlbfgs.m b/Crucial/fminlbfgs.m new file mode 100644 index 0000000..f570daa --- /dev/null +++ b/Crucial/fminlbfgs.m @@ -0,0 +1,754 @@ +function [x,fval,exitflag,output,grad]=fminlbfgs(funfcn,x_init,optim) +%FMINLBFGS finds a local minimum of a function of several variables. +% This optimizer is developed for image registration methods with large +% amounts of unknown variables. +% +% Optimization methods supported: +% - Quasi Newton Broyden?letcher?oldfarb?hanno (BFGS) +% - Limited memory BFGS (L-BFGS) +% - Steepest Gradient Descent optimization. +% +% [X,FVAL,EXITFLAG,OUTPUT,GRAD] = FMINLBFGS(FUN,X0,OPTIONS) +% +% Inputs, +% FUN: Function handle or string which is minimized, returning an +% error value and optional the error gradient. +% X0: Initial values of unknowns can be a scalar, vector or matrix +% (optional) +% OPTIONS: Structure with optimizer options, made by a struct or +% optimset. (optimset doesnot support all input options) +% +% Outputs, +% X : The found location (values) which minimize the function. +% FVAL : The minimum found +% EXITFLAG : Gives value, which explain why the minimizer stopt +% OUTPUT : Structure with all important ouput values and parameters +% GRAD : The gradient at this location +% +% Extended description of input/ouput variables +% OPTIONS, +% OPTIONS.GoalsExactAchieve : If set to 0, a line search method is +% used which uses a few function calls to do a good line +% search. When set to 1 a normal line search method with Wolfe +% conditions is used (default). +% OPTIONS.GradConstr, Set this variable to true if gradient calls are +% cpu-expensive (default). If false more gradient calls are +% used and less function calls. +% OPTIONS.HessUpdate : If set to 'bfgs', Broyden?letcher?oldfarb?hanno +% optimization is used (default), when the number of unknowns is +% larger then 3000 the function will switch to Limited memory BFGS, +% or if you set it to 'lbfgs'. When set to 'steepdesc', steepest +% decent optimization is used. +% OPTIONS.StoreN : Number of itterations used to approximate the Hessian, +% in L-BFGS, 20 is default. A lower value may work better with +% non smooth functions, because than the Hessian is only valid for +% a specific position. A higher value is recommend with quadratic equations. +% OPTIONS.GradObj : Set to 'on' if gradient available otherwise finited difference +% is used. +% OPTIONS.Display : Level of display. 'off' displays no output; 'plot' displays +% all linesearch results in figures. 'iter' displays output at each +% iteration; 'final' displays just the final output; 'notify' +% displays output only if the function does not converge; +% OPTIONS.TolX : Termination tolerance on x, default 1e-6. +% OPTIONS.TolFun : Termination tolerance on the function value, default 1e-6. +% OPTIONS.MaxIter : Maximum number of iterations allowed, default 400. +% OPTIONS.MaxFunEvals : Maximum number of function evaluations allowed, +% default 100 times the amount of unknowns. +% OPTIONS.DiffMaxChange : Maximum stepsize used for finite difference gradients. +% OPTIONS.DiffMinChange : Minimum stepsize used for finite difference gradients. +% OPTIONS.OutputFcn : User-defined function that an optimization function calls +% at each iteration. +% OPTIONS.rho : Wolfe condition on gradient (c1 on wikipedia), default 0.01. +% OPTIONS.sigma : Wolfe condition on gradient (c2 on wikipedia), default 0.9. +% OPTIONS.tau1 : Bracket expansion if stepsize becomes larger, default 3. +% OPTIONS.tau2 : Left bracket reduction used in section phase, +% default 0.1. +% OPTIONS.tau3 : Right bracket reduction used in section phase, default 0.5. +% FUN, +% The speed of this optimizer can be improved by also providing +% the gradient at X. Write the FUN function as follows +% function [f,g]=FUN(X) +% f , value calculation at X; +% if ( nargout > 1 ) +% g , gradient calculation at X; +% end +% EXITFLAG, +% Possible values of EXITFLAG, and the corresponding exit conditions +% are +% 1, 'Change in the objective function value was less than the specified tolerance TolFun.'; +% 2, 'Change in x was smaller than the specified tolerance TolX.'; +% 3, 'Magnitude of gradient smaller than the specified tolerance'; +% 4, 'Boundary fminimum reached.'; +% 0, 'Number of iterations exceeded options.MaxIter or number of function evaluations exceeded options.FunEvals.'; +% -1, 'Algorithm was terminated by the output function.'; +% -2, 'Line search cannot find an acceptable point along the current search'; +% +% Examples +% options = optimset('GradObj','on'); +% X = fminlbfgs(@myfun,2,options) +% +% % where myfun is a MATLAB function such as: +% function [f,g] = myfun(x) +% f = sin(x) + 3; +% if ( nargout > 1 ), g = cos(x); end +% +% See also OPTIMSET, FMINSEARCH, FMINBND, FMINCON, FMINUNC, @, INLINE. +% +% Function is written by D.Kroon University of Twente (Updated Nov. 2010) +% Read Optimalisation Parameters +defaultopt = struct('Display','final','HessUpdate','bfgs','GoalsExactAchieve',1,'GradConstr',true, ... + 'TolX',1e-6,'TolFun',1e-6,'GradObj','off','MaxIter',400,'MaxFunEvals',100*numel(x_init)-1, ... + 'DiffMaxChange',1e-1,'DiffMinChange',1e-8,'OutputFcn',[], ... + 'rho',0.0100,'sigma',0.900,'tau1',3,'tau2', 0.1, 'tau3', 0.5,'StoreN',20); +if (~exist('optim','var')) + optim=defaultopt; +else + f = fieldnames(defaultopt); + for i=1:length(f), + if (~isfield(optim,f{i})||(isempty(optim.(f{i})))), optim.(f{i})=defaultopt.(f{i}); end + end +end + +% Initialize the data structure +data.fval=0; +data.gradient=0; +data.fOld=[]; +data.xsizes=size(x_init); +data.numberOfVariables = numel(x_init); +data.xInitial = x_init(:); +data.alpha=1; +data.xOld=data.xInitial; +data.iteration=0; +data.funcCount=0; +data.gradCount=0; +data.exitflag=[]; +data.nStored=0; +data.timeTotal=tic; +data.timeExtern=0; +% Switch to L-BFGS in case of more than 3000 unknown variables +if(optim.HessUpdate(1)=='b') + if(data.numberOfVariables<3000), + optim.HessUpdate='bfgs'; + else + optim.HessUpdate='lbfgs'; + end +end +if(optim.HessUpdate(1)=='l') + succes=false; + while(~succes) + try + data.deltaX=zeros(data.numberOfVariables,optim.StoreN); + data.deltaG=zeros(data.numberOfVariables,optim.StoreN); + data.saveD=zeros(data.numberOfVariables,optim.StoreN); + succes=true; + catch ME + warning('fminlbfgs:memory','Decreasing StoreN value because out of memory'); + succes=false; + data.deltaX=[]; data.deltaG=[]; data.saveD=[]; + optim.StoreN=optim.StoreN-1; + if(optim.StoreN<1) + rethrow(ME); + end + end + end +end +exitflag=[]; +% Display column headers +if(strcmp(optim.Display,'iter')) + disp(' Iteration Func-count Grad-count f(x) Step-size'); +end +% Calculate the initial error and gradient +data.initialStepLength=1; +[data,fval,grad]=gradient_function(data.xInitial,funfcn, data, optim); +data.gradient=grad; +data.dir = -data.gradient; +data.fInitial = fval; +data.fPrimeInitial= data.gradient'*data.dir(:); +data.fOld=data.fInitial; +data.xOld=data.xInitial; +data.gOld=data.gradient; + +gNorm = norm(data.gradient,Inf); % Norm of gradient +data.initialStepLength = min(1/gNorm,5); +% Show the current iteration +if(strcmp(optim.Display,'iter')) + s=sprintf(' %5.0f %5.0f %5.0f %13.6g ',data.iteration,data.funcCount,data.gradCount,data.fInitial); disp(s); +end + +% Hessian intialization +if(optim.HessUpdate(1)=='b') + data.Hessian=eye(data.numberOfVariables); +end +% Call output function +if(call_output_function(data,optim,'init')), exitflag=-1; end + +% Start Minimizing +while(true) + % Update number of itterations + data.iteration=data.iteration+1; + % Set current lineSearch parameters + data.TolFunLnS = eps(max(1,abs(data.fInitial ))); + data.fminimum = data.fInitial - 1e16*(1+abs(data.fInitial)); + + % Make arrays to store linesearch results + data.storefx=[]; data.storepx=[]; data.storex=[]; data.storegx=[]; + % If option display plot, than start new figure + if(optim.Display(1)=='p'), figure, hold on; end + + % Find a good step size in the direction of the gradient: Linesearch + if(optim.GoalsExactAchieve==1) + data=linesearch(funfcn, data,optim); + else + data=linesearch_simple(funfcn, data, optim); + end + + % Make linesearch plot + if(optim.Display(1)=='p'); + plot(data.storex,data.storefx,'r*'); + plot(data.storex,data.storefx,'b'); + + alpha_test= linspace(min(data.storex(:))/3, max(data.storex(:))*1.3, 10); + falpha_test=zeros(1,length(alpha_test)); + for i=1:length(alpha_test) + [data,falpha_test(i)]=gradient_function(data.xInitial(:)+alpha_test(i)*data.dir(:),funfcn, data, optim); + end + plot(alpha_test,falpha_test,'g'); + plot(data.alpha,data.f_alpha,'go','MarkerSize',8); + end + + % Check if exitflag is set + if(~isempty(data.exitflag)), + exitflag=data.exitflag; + data.xInitial=data.xOld; + data.fInitial=data.fOld; + data.gradient=data.gOld; + break, + end; + + % Update x with the alpha step + data.xInitial = data.xInitial + data.alpha*data.dir; + + % Set the current error and gradient + data.fInitial = data.f_alpha; + data.gradient = data.grad; + + % Set initial steplength to 1 + data.initialStepLength = 1; + + + gNorm = norm(data.gradient,Inf); % Norm of gradient + + % Set exit flags + if(gNorm =optim.MaxIter), exitflag=0; end + + % Check if exitflag is set + if(~isempty(exitflag)), break, end; + % Update the inverse Hessian matrix + if(optim.HessUpdate(1)~='s') + % Do the Quasi-Neton Hessian update. + data = updateQuasiNewtonMatrix_LBFGS(data,optim); + else + data.dir = -data.gradient; + end + + % Derivative of direction + data.fPrimeInitial= data.gradient'*data.dir(:); + % Call output function + if(call_output_function(data,optim,'iter')), exitflag=-1; end + + % Show the current iteration + if(strcmp(optim.Display(1),'i')||strcmp(optim.Display(1),'p')) + s=sprintf(' %5.0f %5.0f %5.0f %13.6g %13.6g',data.iteration,data.funcCount,data.gradCount,data.fInitial,data.alpha); disp(s); + end + + % Keep the variables for next iteration + data.fOld=data.fInitial; + data.xOld=data.xInitial; + data.gOld=data.gradient; +end +% Set output parameters +fval=data.fInitial; +grad=data.gradient; +x = data.xInitial; +% Reshape x to original shape +x=reshape(x,data.xsizes); +% Call output function +if(call_output_function(data,optim,'done')), exitflag=-1; end +% Make exist output structure +if(optim.HessUpdate(1)=='b'), output.algorithm='Broyden?letcher?oldfarb?hanno (BFGS)'; +elseif(optim.HessUpdate(1)=='l'), output.algorithm='limited memory BFGS (L-BFGS)'; +else output.algorithm='Steepest Gradient Descent'; +end +output.message=getexitmessage(exitflag); +output.iteration = data.iteration; +output.funccount = data.funcCount; +output.fval = data.fInitial; +output.stepsize = data.alpha; +output.directionalderivative = data.fPrimeInitial; +output.gradient = reshape(data.gradient, data.xsizes); +output.searchdirection = data.dir; +output.timeTotal=toc(data.timeTotal); +output.timeExtern=data.timeExtern; +oupput.timeIntern=output.timeTotal-output.timeExtern; +% Display final results +if(~strcmp(optim.Display,'off')) + disp(' Optimizer Results') + disp([' Algorithm Used: ' output.algorithm]); + disp([' Exit message : ' output.message]); + disp([' iterations : ' int2str(data.iteration)]); + disp([' Function Count : ' int2str(data.funcCount)]); + disp([' Minimum found : ' num2str(fval)]); + disp([' Intern Time : ' num2str(oupput.timeIntern) ' seconds']); + disp([' Total Time : ' num2str(output.timeTotal) ' seconds']); +end +function message=getexitmessage(exitflag) + switch(exitflag) + case 1, message='Change in the objective function value was less than the specified tolerance TolFun.'; + case 2, message='Change in x was smaller than the specified tolerance TolX.'; + case 3, message='Magnitude of gradient smaller than the specified tolerance'; + case 4, message='Boundary fminimum reached.'; + case 0, message='Number of iterations exceeded options.MaxIter or number of function evaluations exceeded options.FunEvals.'; + case -1, message='Algorithm was terminated by the output function.'; + case -2, message='Line search cannot find an acceptable point along the current search'; + otherwise, message='Undefined exit code'; + end + +function stopt=call_output_function(data,optim,where) +stopt=false; +if(~isempty(optim.OutputFcn)) + output.iteration = data.iteration; + output.funccount = data.funcCount; + output.fval = data.fInitial; + output.stepsize = data.alpha; + output.directionalderivative = data.fPrimeInitial; + output.gradient = reshape(data.gradient, data.xsizes); + output.searchdirection = data.dir; + stopt=feval(optim.OutputFcn,reshape(data.xInitial,data.xsizes),output,where); +end + + +function data=linesearch_simple(funfcn, data, optim) +% Find a bracket of acceptable points +data = bracketingPhase_simple(funfcn, data, optim); +if (data.bracket_exitflag == 2) + % BracketingPhase found a bracket containing acceptable points; + % now find acceptable point within bracket + data = sectioningPhase_simple(funfcn, data, optim); + data.exitflag = data.section_exitflag; +else + % Already acceptable point found or MaxFunEvals reached + data.exitflag = data.bracket_exitflag; +end +function data = bracketingPhase_simple(funfcn, data,optim) +% Number of itterations +itw=0; +% Point with smaller value, initial +data.beta=0; +data.f_beta=data.fInitial; +data.fPrime_beta=data.fPrimeInitial; +% Initial step is equal to alpha of previous step. +alpha = data.initialStepLength; +% Going up hill +hill=false; +% Search for brackets +while(true) + % Calculate the error registration gradient + if(optim.GradConstr) + [data,f_alpha]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data, optim); + fPrime_alpha=nan; + grad=nan; + else + [data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data,optim); + fPrime_alpha = grad'*data.dir(:); + end + + % Store values linesearch + data.storefx=[data.storefx f_alpha]; + data.storepx=[data.storepx fPrime_alpha]; + data.storex=[data.storex alpha]; + data.storegx=[data.storegx grad(:)]; + + % Update step value + if(data.f_beta(log(optim.TolFun)/log(optim.tau3))), + % No new optium found, linesearch failed. + data.bracket_exitflag=-2; break; + end + + if(data.beta>0&&hill) + % Get the brackets around minimum point + % Pick bracket A from stored trials + [t,i]=sort(data.storex,'ascend'); + storefx=data.storefx(i);storepx=data.storepx(i); storex=data.storex(i); + [t,i]=find(storex>data.beta,1); + if(isempty(i)), [t,i]=find(storex==data.beta,1); end + alpha=storex(i); f_alpha=storefx(i); fPrime_alpha=storepx(i); + + % Pick bracket B from stored trials + [t,i]=sort(data.storex,'descend'); + storefx=data.storefx(i);storepx=data.storepx(i); storex=data.storex(i); + [t,i]=find(storexoptim.DiffMaxChange), gstep=optim.DiffMaxChange; end + if(gstep=optim.MaxFunEvals), data.bracket_exitflag=0; return; end +end + +function data = sectioningPhase_simple(funfcn, data, optim) +% Get the brackets +brcktEndpntA=data.a; brcktEndpntB=data.b; +% Calculate minimum between brackets +[alpha,f_alpha_estimated] = pickAlphaWithinInterval(brcktEndpntA,brcktEndpntB,data.a,data.b,data.f_a,data.fPrime_a,data.f_b,data.fPrime_b,optim); +if(isfield(data,'beta')&&(data.f_betaoptim.DiffMaxChange), gstep=optim.DiffMaxChange; end + if(gstep data.fInitial + alpha*optim.rho*data.fPrimeInitial) || (f_alpha >= data.f_a)) + % Update bracket B to current alpha + data.b = alpha; data.f_b = f_alpha; data.fPrime_b = fPrime_alpha; + else + % Wolfe conditions, if true then acceptable point found + if (abs(fPrime_alpha) <= -optim.sigma*data.fPrimeInitial), + if(optim.GradConstr) + % Gradient was not yet calculated because of time costs + [data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data, optim); + fPrime_alpha = grad'*data.dir(:); + end + % Store the found alpha values + data.alpha=alpha; data.fPrime_alpha= fPrime_alpha; data.f_alpha= f_alpha; + data.grad=grad; + data.section_exitflag = []; return, + end + + % Update bracket A + data.a = alpha; data.f_a = f_alpha; data.fPrime_a = fPrime_alpha; + + if (data.b - data.a)*fPrime_alpha >= 0 + % B becomes old bracket A; + data.b = aPrev; data.f_b = f_aPrev; data.fPrime_b = fPrime_aPrev; + end + end + + % No acceptable point could be found + if (abs(data.b-data.a) < eps), data.section_exitflag = -2; return, end + % maxFunEvals reached + if(data.funcCount >optim.MaxFunEvals), data.section_exitflag = -1; return, end +end +function data = bracketingPhase(funfcn, data, optim) +% bracketingPhase finds a bracket [a,b] that contains acceptable points; a bracket +% is the same as a closed interval, except that a > b is allowed. +% +% The outputs f_a and fPrime_a are the values of the function and the derivative +% evaluated at the bracket endpoint 'a'. Similar notation applies to the endpoint +% 'b'. +% Parameters of bracket A +data.a = []; +data.f_a = []; +data.fPrime_a = []; +% Parameters of bracket B +data.b = []; +data.f_b = []; +data.fPrime_b = []; +% First trial alpha is user-supplied +% f_alpha will contain f(alpha) for all trial points alpha +% fPrime_alpha will contain f'(alpha) for all trial points alpha +alpha = data.initialStepLength; +f_alpha = data.fInitial; +fPrime_alpha = data.fPrimeInitial; +% Set maximum value of alpha (determined by fminimum) +alphaMax = (data.fminimum - data.fInitial)/(optim.rho*data.fPrimeInitial); +alphaPrev = 0; +while(true) + % Evaluate f(alpha) and f'(alpha) + fPrev = f_alpha; + fPrimePrev = fPrime_alpha; + + % Calculate value (and gradient if no extra time cost) of current alpha + if(~optim.GradConstr) + [data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data, optim); + fPrime_alpha = grad'*data.dir(:); + else + gstep=data.initialStepLength/1e6; + if(gstep>optim.DiffMaxChange), gstep=optim.DiffMaxChange; end + if(gstep (data.fInitial + alpha*optim.rho*data.fPrimeInitial)) || (f_alpha >= fPrev) + % Set the bracket values + data.a = alphaPrev; data.f_a = fPrev; data.fPrime_a = fPrimePrev; + data.b = alpha; data.f_b = f_alpha; data.fPrime_b = fPrime_alpha; + % Finished bracketing phase + data.bracket_exitflag = 2; return + end + % Acceptable steplength found + if (abs(fPrime_alpha) <= -optim.sigma*data.fPrimeInitial), + if(optim.GradConstr) + % Gradient was not yet calculated because of time costs + [data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data, optim); + fPrime_alpha = grad'*data.dir(:); + end + % Store the found alpha values + data.alpha=alpha; + data.fPrime_alpha= fPrime_alpha; data.f_alpha= f_alpha; data.grad=grad; + % Finished bracketing phase, and no need to call sectioning phase + data.bracket_exitflag = []; return + end + + % Bracket located - case 2 + if (fPrime_alpha >= 0) + % Set the bracket values + data.a = alpha; data.f_a = f_alpha; data.fPrime_a = fPrime_alpha; + data.b = alphaPrev; data.f_b = fPrev; data.fPrime_b = fPrimePrev; + % Finished bracketing phase + data.bracket_exitflag = 2; return + end + + % Update alpha + if (2*alpha - alphaPrev < alphaMax ) + brcktEndpntA = 2*alpha-alphaPrev; + brcktEndpntB = min(alphaMax,alpha+optim.tau1*(alpha-alphaPrev)); + % Find global minimizer in bracket [brcktEndpntA,brcktEndpntB] of 3rd-degree polynomial + % that interpolates f() and f'() at alphaPrev and at alpha + alphaNew = pickAlphaWithinInterval(brcktEndpntA,brcktEndpntB,alphaPrev,alpha,fPrev, ... + fPrimePrev,f_alpha,fPrime_alpha,optim); + alphaPrev = alpha; + alpha = alphaNew; + else + alpha = alphaMax; + end + % maxFunEvals reached + if(data.funcCount >optim.MaxFunEvals), data.bracket_exitflag = -1; return, end +end +function [alpha,f_alpha]= pickAlphaWithinInterval(brcktEndpntA,brcktEndpntB,alpha1,alpha2,f1,fPrime1,f2,fPrime2,optim) +% finds a global minimizer alpha within the bracket [brcktEndpntA,brcktEndpntB] of the cubic polynomial +% that interpolates f() and f'() at alpha1 and alpha2. Here f(alpha1) = f1, f'(alpha1) = fPrime1, +% f(alpha2) = f2, f'(alpha2) = fPrime2. +% determines the coefficients of the cubic polynomial with c(alpha1) = f1, +% c'(alpha1) = fPrime1, c(alpha2) = f2, c'(alpha2) = fPrime2. +coeff = [(fPrime1+fPrime2)*(alpha2-alpha1)-2*(f2-f1) ... + 3*(f2-f1)-(2*fPrime1+fPrime2)*(alpha2-alpha1) (alpha2-alpha1)*fPrime1 f1]; +% Convert bounds to the z-space +lowerBound = (brcktEndpntA - alpha1)/(alpha2 - alpha1); +upperBound = (brcktEndpntB - alpha1)/(alpha2 - alpha1); +% Swap if lowerbound is higher than the upperbound +if (lowerBound > upperBound), t=upperBound; upperBound=lowerBound; lowerBound=t; end +% Find minima and maxima from the roots of the derivative of the polynomial. +sPoints = roots([3*coeff(1) 2*coeff(2) coeff(3)]); +% Remove imaginaire and points outside range +sPoints(imag(sPoints)~=0)=[]; +sPoints(sPointsupperBound)=[]; +% Make vector with all possible solutions +sPoints=[lowerBound sPoints(:)' upperBound]; +% Select the global minimum point +[f_alpha,index]=min(polyval(coeff,sPoints)); z=sPoints(index); +% Add the offset and scale back from [0..1] to the alpha domain +alpha = alpha1 + z*(alpha2 - alpha1); +% Show polynomial search +if(optim.Display(1)=='p'); + vPoints=polyval(coeff,sPoints); + plot(sPoints*(alpha2 - alpha1)+alpha1,vPoints,'co'); + plot([sPoints(1) sPoints(end)]*(alpha2 - alpha1)+alpha1,[vPoints(1) vPoints(end)],'c*'); + xPoints=linspace(lowerBound/3, upperBound*1.3, 50); + vPoints=polyval(coeff,xPoints); + plot(xPoints*(alpha2 - alpha1)+alpha1,vPoints,'c'); +end + +function [data,fval,grad]=gradient_function(x,funfcn, data, optim) + % Call the error function for error (and gradient) + if ( nargout <3 ) + timem=tic; + fval=funfcn(reshape(x,data.xsizes)); + data.timeExtern=data.timeExtern+toc(timem); + data.funcCount=data.funcCount+1; + else + if(strcmp(optim.GradObj,'on')) + timem=tic; + [fval, grad]=feval(funfcn,reshape(x,data.xsizes)); + data.timeExtern=data.timeExtern+toc(timem); + data.funcCount=data.funcCount+1; + data.gradCount=data.gradCount+1; + else + % Calculate gradient with forward difference if not provided by the function + grad=zeros(length(x),1); + fval=funfcn(reshape(x,data.xsizes)); + gstep=data.initialStepLength/1e6; + if(gstep>optim.DiffMaxChange), gstep=optim.DiffMaxChange; end + if(gstep= sqrt(eps)*max( eps,norm(deltaX)*norm(deltaG) )) + if(optim.HessUpdate(1)=='b') + % Default BFGS as described by Nocedal + p_k = 1 / (deltaG'*deltaX); + Vk = eye(data.numberOfVariables) - p_k*deltaG*deltaX'; + % Set Hessian + data.Hessian = Vk'*data.Hessian *Vk + p_k * deltaX*deltaX'; + % Set new Direction + data.dir = -data.Hessian*data.gradient; + else + % L-BFGS with scaling as described by Nocedal + + % Update a list with the history of deltaX and deltaG + data.deltaX(:,2:optim.StoreN)=data.deltaX(:,1:optim.StoreN-1); data.deltaX(:,1)=deltaX; + data.deltaG(:,2:optim.StoreN)=data.deltaG(:,1:optim.StoreN-1); data.deltaG(:,1)=deltaG; + + data.nStored=data.nStored+1; if(data.nStored>optim.StoreN), data.nStored=optim.StoreN; end + % Initialize variables + a=zeros(1,data.nStored); + p=zeros(1,data.nStored); + q = data.gradient; + for i=1:data.nStored + p(i)= 1 / (data.deltaG(:,i)'*data.deltaX(:,i)); + a(i) = p(i)* data.deltaX(:,i)' * q; + q = q - a(i) * data.deltaG(:,i); + end + % Scaling of initial Hessian (identity matrix) + p_k = data.deltaG(:,1)'*data.deltaX(:,1) / sum(data.deltaG(:,1).^2); + + % Make r = - Hessian * gradient + r = p_k * q; + for i=data.nStored:-1:1, + b = p(i) * data.deltaG(:,i)' * r; + r = r + data.deltaX(:,i)*(a(i)-b); + end + + % Set new direction + data.dir = -r; + end +end + + \ No newline at end of file diff --git a/Crucial/pinv2.m b/Crucial/pinv2.m new file mode 100644 index 0000000..80fbbe4 --- /dev/null +++ b/Crucial/pinv2.m @@ -0,0 +1,29 @@ +function [B] = pinv2(A) + + DEBUG = 0; + + [U, L, V] = svd(A); + + if size(A, 2) == 1 + l = L; + else + l = diag(L); + end + valid = find(l / l(1) > 1e-10); + invalid = find(l / l(1) <= 1e-10); + + m = size(A, 2); + for i = valid' + L(i, i) = 1 / l(i); + end + for i = invalid' + L(i, i) = 0; + end + + if DEBUG + fprintf(' U = %d x %d, L = %d x %d, V = %d x %d\n', size(U, 1), size(U, 2), size(L, 1), size(L, 2), size(V, 1), size(V, 2)); + end + + B = V * L' * U'; + + return \ No newline at end of file diff --git a/Crucial/prediction_1.m b/Crucial/prediction_1.m new file mode 100644 index 0000000..a11fac9 --- /dev/null +++ b/Crucial/prediction_1.m @@ -0,0 +1,32 @@ +function [Valuee]=prediction_1(weights,... +dd_updated,X,Xtrains,ytrains,Experts,clfys) + labelDA =dd_updated; %target prediction + + meanfunc=@meanConst; +likfunc = {@likGauss}; + +inf = @infGaussLik; + cov = {@covSEiso}; + infv = @(varargin) inf(varargin{:},struct('s',1.0)); + + +parfor jj=1:size(labelDA,1) + label=labelDA(jj,:); + hyp_use=weights{label,:}; + Xuse=Xtrains{label,:}; + yuse=ytrains{label,:}; + cov1 = {'apxSparse',cov,hyp_use.xu}; + a00=X(jj,:) ; + [zz s2] = gp(hyp_use, infv, meanfunc, cov1, likfunc, Xuse, yuse, a00); + + zz=reshape(zz,[],1); + s2=reshape(s2,[],1); + + Valuee(jj,:)= zz; + Valuees(jj,:)= s2; +end +Valuee=clfys.inverse_transform(Valuee); +Valuees=clfys.inverse_transform(Valuees); +Valuees=sqrt(Valuees); + +end \ No newline at end of file diff --git a/Crucial/prediction_2.m b/Crucial/prediction_2.m new file mode 100644 index 0000000..e4a7a6a --- /dev/null +++ b/Crucial/prediction_2.m @@ -0,0 +1,15 @@ +function [Valuee]=prediction_2(weights,dd_updated,X,Class_all,Experts,clfy) + labelDA =dd_updated; %target prediction +%% +parfor jj=1:size(labelDA,1) + label=labelDA(jj,:); + net=weights{label,:}; + a00=X(jj,:) ; + zz = (predict(net,a00'))'; + zz=reshape(zz,[],1); + + Valuee(jj,:)= zz; +end +Valuee=clfy.inverse_transform(Valuee); + +end \ No newline at end of file diff --git a/Crucial/prediction_3.m b/Crucial/prediction_3.m new file mode 100644 index 0000000..a3232bd --- /dev/null +++ b/Crucial/prediction_3.m @@ -0,0 +1,49 @@ +function [Valuee,Valuees]=prediction_3(weights,... + dd_updated,X,Class_all,Experts,clfy) + labelDA =dd_updated; %target prediction + +% for i=1:Experts +% +% indee=find(labelDA==i); +% if size(indee,1)~= 0 +% +% +% net=weights{i,:}; +% +% a00=X(indee,:) ; +% [zz,stdclm] = predict(net,a00) ; +% +% zz=reshape(zz,[],1); +% Valuee(indee,:)= zz; +% Valuees(indee,:)=stdclm; +% +% else +% +% Valuee(indee,:)= 0; +% Valuees(indee,:)= 0; +% end +% +% +% end +% Valuee=double(Valuee); +% +% Valuee=clfy.inverse_transform(Valuee); + +%% +parfor jj=1:size(labelDA,1) + label=labelDA(jj,:); + net=weights{label,:}; + a00=X(jj,:) ; + + [zz,s2] = predict(net,a00) ; + zz=reshape(zz,[],1); + s2=reshape(s2,[],1); + + Valuee(jj,:)= double(zz); + Valuees(jj,:)= double(s2); +end +Valuee=clfy.inverse_transform(Valuee); +Valuees=clfy.inverse_transform(Valuees); +Valuees=sqrt(Valuees); + +end \ No newline at end of file