v4 added M.Ratto's change to diffuse filter/smoother and optmizer

git-svn-id: https://www.dynare.org/svn/dynare/dynare_v4@641 ac1d8469-bf42-47a9-8791-bf33cf982152
time-shift
michel 2006-03-03 13:13:00 +00:00
parent 35c1067f46
commit 742df69762
9 changed files with 661 additions and 184 deletions

View File

@ -128,6 +128,13 @@ function [fval,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend,data
Pstar(ivs,ivs) = lyapunov_symm(T(ivs,ivs),R(ivs,:)*Q* ...
transpose(R(ivs,:)));
Pinf = bayestopt_.Pinf;
% by M. Ratto
RR=T(:,find(~ismember([1:np],ivs)));
i=find(abs(RR)>1.e-10);
R0=zeros(size(RR));
R0(i)=sign(RR(i));
Pinf=R0*R0';
% by M. Ratto
end
%------------------------------------------------------------------------------
% 4. Likelihood evaluation

186
matlab/DsgeLikelihood_hh.m Normal file
View File

@ -0,0 +1,186 @@
function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood_hh(xparam1,gend,data)
% stephane.adjemian@cepremap.cnrs.fr [09-07-2004]
%
% Adapted from mj_optmumlik.m
global bayestopt_ estim_params_ options_ trend_coeff_ M_ oo_ xparam1_test
fval = [];
ys = [];
trend_coeff = [];
xparam1_test = xparam1;
cost_flag = 1;
nobs = size(options_.varobs,1);
%------------------------------------------------------------------------------
% 1. Get the structural parameters & define penalties
%------------------------------------------------------------------------------
if options_.mode_compute ~= 1 & any(xparam1 < bayestopt_.lb)
k = find(xparam1 < bayestopt_.lb);
fval = bayestopt_.penalty+sum((bayestopt_.lb(k)-xparam1(k)).^2);
llik=fval;
cost_flag = 0;
return;
end
if options_.mode_compute ~= 1 & any(xparam1 > bayestopt_.ub)
k = find(xparam1 > bayestopt_.ub);
fval = bayestopt_.penalty+sum((xparam1(k)-bayestopt_.ub(k)).^2);
llik=fval;
cost_flag = 0;
return;
end
Q = M_.Sigma_e;
for i=1:estim_params_.nvx
k =estim_params_.var_exo(i,1);
Q(k,k) = xparam1(i)*xparam1(i);
end
offset = estim_params_.nvx;
if estim_params_.nvn
H = zeros(nobs,nobs);
for i=1:estim_params_.nvn
k = estim_params_.var_endo(i,1);
H(k,k) = xparam1(i+offset)*xparam1(i+offset);
end
offset = offset+estim_params_.nvn;
end
if estim_params_.ncx
for i=1:estim_params_.ncx
k1 =estim_params_.corrx(i,1);
k2 =estim_params_.corrx(i,2);
Q(k1,k2) = xparam1(i+offset)*sqrt(Q(k1,k1)*Q(k2,k2));
Q(k2,k1) = Q(k1,k2);
end
[CholQ,testQ] = chol(Q);
if testQ %% The variance-covariance matrix of the structural innovations is not definite positive.
%% We have to compute the eigenvalues of this matrix in order to build the penalty.
a = diag(eig(Q));
k = find(a < 0);
if k > 0
fval = bayestopt_.penalty+sum(-a(k));
llik=fval;
cost_flag = 0;
return
end
end
offset = offset+estim_params_.ncx;
end
if estim_params_.ncn
for i=1:estim_params_.ncn
k1 = options_.lgyidx2varobs(estim_params_.corrn(i,1));
k2 = options_.lgyidx2varobs(estim_params_.corrn(i,2));
H(k1,k2) = xparam1(i+offset)*sqrt(H(k1,k1)*H(k2,k2));
H(k2,k1) = H(k1,k2);
end
[CholH,testH] = chol(H);
if testH
a = diag(eig(H));
k = find(a < 0);
if k > 0
fval = bayestopt_.penalty+sum(-a(k));
llik=fval;
cost_flag = 0;
return
end
end
offset = offset+estim_params_.ncn;
end
M_.params(estim_params_.param_vals(:,1)) = xparam1(offset+1:end);
% for i=1:estim_params_.np
% M_.params(estim_params_.param_vals(i,1)) = xparam1(i+offset);
%end
M_.Sigma_e = Q;
%------------------------------------------------------------------------------
% 2. call model setup & reduction program
%------------------------------------------------------------------------------
[T,R,SteadyState,info] = dynare_resolve;
if info(1) == 1 | info(1) == 2 | info(1) == 5
fval = bayestopt_.penalty+1;
llik=fval;
cost_flag = 0;
return
elseif info(1) == 3 | info(1) == 4 | info(1) == 20
fval = bayestopt_.penalty+info(2)^2;
llik=fval;
cost_flag = 0;
return
end
if options_.loglinear == 1
constant = log(SteadyState(bayestopt_.mfys));
else
constant = SteadyState(bayestopt_.mfys);
end
if bayestopt_.with_trend == 1
trend_coeff = zeros(nobs,1);
for i=1:nobs
trend_coeff(i) = evalin('base',bayestopt_.trend_coeff{i});
end
trend = repmat(constant,1,gend)+trend_coeff*[1:gend];
else
trend = repmat(constant,1,gend);
end
start = options_.presample+1;
np = size(T,1);
mf = bayestopt_.mf;
%------------------------------------------------------------------------------
% 3. Initial condition of the Kalman filter
%------------------------------------------------------------------------------
if options_.lik_init == 1 % Kalman filter
Pstar = lyapunov_symm(T,R*Q*transpose(R));
Pinf = [];
elseif options_.lik_init == 2 % Old Diffuse Kalman filter
Pstar = 10*eye(np);
Pinf = [];
elseif options_.lik_init == 3 % Diffuse Kalman filter
Pstar = zeros(np,np);
ivs = bayestopt_.i_T_var_stable;
Pstar(ivs,ivs) = lyapunov_symm(T(ivs,ivs),R(ivs,:)*Q* ...
transpose(R(ivs,:)));
Pinf = bayestopt_.Pinf;
% by M. Ratto
RR=T(:,find(~ismember([1:np],ivs)));
i=find(abs(RR)>1.e-10);
R0=zeros(size(RR));
R0(i)=sign(RR(i));
Pinf=R0*R0';
% by M. Ratto
end
%------------------------------------------------------------------------------
% 4. Likelihood evaluation
%------------------------------------------------------------------------------
if estim_params_.nvn
if options_.kalman_algo == 1
[LIK, lik] = DiffuseLikelihoodH1(T,R,Q,H,Pinf,Pstar,data,trend,start);
if isinf(LIK) & ~estim_params_.ncn %% The univariate approach considered here doesn't
%% apply when H has some off-diagonal elements.
[LIK, lik] = DiffuseLikelihoodH3(T,R,Q,H,Pinf,Pstar,data,trend,start);
elseif isinf(LIK) & estim_params_.ncn
[LIK, lik] = DiffuseLikelihoodH3corr(T,R,Q,H,Pinf,Pstar,data,trend,start);
end
elseif options_.kalman_algo == 3
if ~estim_params_.ncn %% The univariate approach considered here doesn't
%% apply when H has some off-diagonal elements.
[LIK, lik] = DiffuseLikelihoodH3(T,R,Q,H,Pinf,Pstar,data,trend,start);
else
[LIK, lik] = DiffuseLikelihoodH3corr(T,R,Q,H,Pinf,Pstar,data,trend,start);
end
end
else
if options_.kalman_algo == 1
[LIK, lik] = DiffuseLikelihood1(T,R,Q,Pinf,Pstar,data,trend,start);
if isinf(LIK)
[LIK, lik] = DiffuseLikelihood3(T,R,Q,Pinf,Pstar,data,trend,start);
end
elseif options_.kalman_algo == 3
[LIK, lik] = DiffuseLikelihood3(T,R,Q,Pinf,Pstar,data,trend,start);
end
end
if imag(LIK) ~= 0
likelihood = bayestopt_.penalty;
lik=ones(size(lik)).*bayestopt_.penalty;
else
likelihood = LIK;
end
% ------------------------------------------------------------------------------
% Adds prior if necessary
% ------------------------------------------------------------------------------
lnprior = priordens(xparam1,bayestopt_.pshape,bayestopt_.p1,bayestopt_.p2,bayestopt_.p3,bayestopt_.p4);
fval = (likelihood-lnprior);
llik=[-lnprior; .5*lik(start:end)];

View File

@ -88,6 +88,13 @@ function [alphahat,etahat,epsilonhat,ahat,SteadyState,trend_coeff] = DsgeSmoothe
Pstar(ivs,ivs) = lyapunov_symm(T(ivs,ivs),R(ivs,:)*Q* ...
transpose(R(ivs,:)));
Pinf = bayestopt_.Pinf;
% by M. Ratto
RR=T(:,find(~ismember([1:np],ivs)));
i=find(abs(RR)>1.e-10);
R0=zeros(size(RR));
R0(i)=sign(RR(i));
Pinf=R0*R0';
% by M. Ratto
end
% -----------------------------------------------------------------------------
% 4. Kalman smoother

View File

@ -248,9 +248,29 @@ if options_.mode_compute > 0 & options_.posterior_mode_estimation
disp(sprintf('Objective function at mode: %f',fval))
disp(sprintf('Objective function at mode: %f',DsgeLikelihood(xparam1,gend,data)))
elseif options_.mode_compute == 5
flag = 0;
[xparam1, hh, gg, fval] = newrat('DsgeLikelihood',xparam1,[],[],flag,gend,data);
eval(['save ' M_.fname '_mode xparam1 hh gg fval;']);
if isfield(options_,'hess')
flag = options_.hess;
else
flag = 1;
end
if ~exist('igg'), % by M. Ratto
hh=[];
gg=[];
igg=[];
end % by M. Ratto
if isfield(options_,'ftol')
crit = options_.ftol;
else
crit = 1.e-7;
end
if isfield(options_,'nit')
nit = options_.nit;
else
nit=1000;
end
%[xparam1, hh, gg, fval] = newrat('DsgeLikelihood',xparam1,[],[],flag,gend,data);
[xparam1, hh, gg, fval, invhess] = newrat('DsgeLikelihood',xparam1,hh,gg,igg,crit,nit,flag,gend,data);
eval(['save ' M_.fname '_mode xparam1 hh gg fval invhess;']);
end
if options_.mode_compute ~= 5
hh = reshape(hessian('DsgeLikelihood',xparam1,gend,data),nx,nx);

131
matlab/mr_gstep.m Normal file
View File

@ -0,0 +1,131 @@
function [f0, x] = mr_gstep(func0,x,htol0,varargin)
% Copyright (C) 2005 Marco Ratto
%
% function [f0, x] = mr_gstep(func0,x,htol0,varargin)
%
% Gibbs type step in optimisation
global bayestopt_ options_
persistent h1
gstep_ = options_.gstep;
if nargin<3,
htol = 1.e-6;
else
htol = htol0;
end
func = str2func(func0);
f0=feval(func,x,varargin{:});
n=size(x,1);
h2=bayestopt_.ub-bayestopt_.lb;
if isempty(h1),
h1=max(abs(x),sqrt(gstep_)*ones(n,1))*eps^(1/4);
end
xh1=x;
f1=zeros(size(f0,1),n);
f_1=f1;
%for i=1:n,
i=0;
while i<n,
i=i+1;
h10=h1(i);
hcheck=0;
dx=[];
xh1(i)=x(i)+h1(i);
fx = feval(func,xh1,varargin{:});
it=1;
dx=(fx-f0);
ic=0;
% if abs(dx)>(2*htol),
% c=mr_nlincon(xh1,varargin{:});
% while c
% h1(i)=h1(i)*0.9;
% xh1(i)=x(i)+h1(i);
% c=mr_nlincon(xh1,varargin{:});
% ic=1;
% end
% if ic,
% fx = feval(func,xh1,varargin{:});
% dx=(fx-f0);
% end
% end
icount = 0;
h0=h1(i);
while (abs(dx(it))<0.5*htol | abs(dx(it))>(2*htol)) & icount<10 & ic==0,
%while abs(dx(it))<0.5*htol & icount< 10 & ic==0,
icount=icount+1;
if abs(dx(it)) ~= 0,
if abs(dx(it))<0.5*htol
h1(i)=min(0.3*abs(x(i)), 0.9*htol/abs(dx(it))*h1(i));
xh1(i)=x(i)+h1(i);
% c=mr_nlincon(xh1,varargin{:});
% while c
% h1(i)=h1(i)*0.9;
% xh1(i)=x(i)+h1(i);
% c=mr_nlincon(xh1,varargin{:});
% ic=1;
% end
end
if abs(dx(it))>(2*htol),
h1(i)= htol/abs(dx(it))*h1(i);
xh1(i)=x(i)+h1(i);
end
fx = feval(func,xh1,varargin{:});
it=it+1;
dx(it)=(fx-f0);
h0(it)=h1(i);
if h1(i)<1.e-12*min(1,h2(i)),
ic=1;
hcheck=1;
end
else
h1(i)=1;
ic=1;
end
end
f1(:,i)=fx;
xh1(i)=x(i)-h1(i);
% c=mr_nlincon(xh1,varargin{:});
% ic=0;
% while c
% h1(i)=h1(i)*0.9;
% xh1(i)=x(i)-h1(i);
% c=mr_nlincon(xh1,varargin{:});
% ic = 1;
% end
fx = feval(func,xh1,varargin{:});
f_1(:,i)=fx;
% if ic,
% xh1(i)=x(i)+h1(i);
% f1(:,i)=feval(func,xh1,varargin{:});
% end
if hcheck & htol<1,
htol=min(1,max(min(abs(dx))*2,htol*10));
h1(i)=h10;
xh1(i)=x(i);
i=i-1;
else
gg=zeros(size(x));
hh=gg;
gg(i)=(f1(i)'-f_1(i)')./(2.*h1(i));
if abs(f1(i)+f_1(i)-2*f0)>1.e-12,
hh(i) = abs(1/( (f1(i)+f_1(i)-2*f0)./(h1(i)*h1(i)) ));
else
hh(i) = 1;
end
if gg(i)*(hh(i)*gg(i))/2 > htol,
[f0 x fc retcode] = csminit(func0,x,f0,gg,0,diag(hh),varargin{:});
end
xh1=x;
end
save gstep
end
save gstep

View File

@ -26,9 +26,10 @@
%
function [hessian_mat, gg, htol1, ihh, hh_mat0] = mr_hessian(func,x,hflag,htol0,varargin)
global gstep_ bayestopt_
global options_ bayestopt_
persistent h1 htol
gstep_=options_.gstep;
if isempty(htol), htol = 1.e-4; end
func = str2func(func);
[f0, ff0]=feval(func,x,varargin{:});
@ -61,19 +62,19 @@ while i<n,
it=1;
dx=(fx-f0);
ic=0;
if abs(dx)>(2*htol),
c=mr_nlincon(xh1,varargin{:});
while c
h1(i)=h1(i)*0.9;
xh1(i)=x(i)+h1(i);
c=mr_nlincon(xh1,varargin{:});
ic=1;
end
if ic,
[fx, ffx]=feval(func,xh1,varargin{:});
dx=(fx-f0);
end
end
% if abs(dx)>(2*htol),
% c=mr_nlincon(xh1,varargin{:});
% while c
% h1(i)=h1(i)*0.9;
% xh1(i)=x(i)+h1(i);
% c=mr_nlincon(xh1,varargin{:});
% ic=1;
% end
% if ic,
% [fx, ffx]=feval(func,xh1,varargin{:});
% dx=(fx-f0);
% end
% end
icount = 0;
h0=h1(i);
@ -88,13 +89,13 @@ while i<n,
h1(i)=2.1*h1(i);
end
xh1(i)=x(i)+h1(i);
c=mr_nlincon(xh1,varargin{:});
while c
h1(i)=h1(i)*0.9;
xh1(i)=x(i)+h1(i);
c=mr_nlincon(xh1,varargin{:});
ic=1;
end
% c=mr_nlincon(xh1,varargin{:});
% while c
% h1(i)=h1(i)*0.9;
% xh1(i)=x(i)+h1(i);
% c=mr_nlincon(xh1,varargin{:});
% ic=1;
% end
[fx, ffx]=feval(func,xh1,varargin{:});
end
if abs(dx(it))>(2*htol),
@ -135,21 +136,21 @@ while i<n,
ff1=ffx;
if hflag, % two point based derivatives
xh1(i)=x(i)-h1(i);
c=mr_nlincon(xh1,varargin{:});
ic=0;
while c
h1(i)=h1(i)*0.9;
xh1(i)=x(i)-h1(i);
c=mr_nlincon(xh1,varargin{:});
ic = 1;
end
% c=mr_nlincon(xh1,varargin{:});
% ic=0;
% while c
% h1(i)=h1(i)*0.9;
% xh1(i)=x(i)-h1(i);
% c=mr_nlincon(xh1,varargin{:});
% ic = 1;
% end
[fx, ffx]=feval(func,xh1,varargin{:});
f_1(:,i)=fx;
ff_1=ffx;
if ic,
xh1(i)=x(i)+h1(i);
[f1(:,i), ff1]=feval(func,xh1,varargin{:});
end
% if ic,
% xh1(i)=x(i)+h1(i);
% [f1(:,i), ff1]=feval(func,xh1,varargin{:});
% end
ggh(:,i)=(ff1-ff_1)./(2.*h1(i));
else
ggh(:,i)=(ff1-ff0)./h1(i);
@ -190,27 +191,28 @@ if hflag==2,
xh_1(j)=x(j)-h_1(j);
%hessian_mat(:,(i-1)*n+j)=-(-feval(func,xh1,varargin{:})-feval(func,xh_1,varargin{:})+temp(:,i)+temp(:,j))./(2*h1(i)*h_1(j));
%temp1 = feval(func,xh1,varargin{:});
c=mr_nlincon(xh1,varargin{:});
lam=1;
while c,
lam=lam*0.9;
xh1(i)=x(i)+h1(i)*lam;
xh1(j)=x(j)+h_1(j)*lam;
%disp( ['hessian warning cross ', num2str(c) ]),
c=mr_nlincon(xh1,varargin{:});
end
temp1 = f0+(feval(func,xh1,varargin{:})-f0)/lam;
% c=mr_nlincon(xh1,varargin{:});
% lam=1;
% while c,
% lam=lam*0.9;
% xh1(i)=x(i)+h1(i)*lam;
% xh1(j)=x(j)+h_1(j)*lam;
% %disp( ['hessian warning cross ', num2str(c) ]),
% c=mr_nlincon(xh1,varargin{:});
% end
% temp1 = f0+(feval(func,xh1,varargin{:})-f0)/lam;
temp1 = feval(func,xh1,varargin{:});
%temp2 = feval(func,xh_1,varargin{:});
c=mr_nlincon(xh_1,varargin{:});
while c,
lam=lam*0.9;
xh_1(i)=x(i)-h1(i)*lam;
xh_1(j)=x(j)-h_1(j)*lam;
%disp( ['hessian warning cross ', num2str(c) ]),
c=mr_nlincon(xh_1,varargin{:});
end
temp2 = f0+(feval(func,xh_1,varargin{:})-f0)/lam;
% c=mr_nlincon(xh_1,varargin{:});
% while c,
% lam=lam*0.9;
% xh_1(i)=x(i)-h1(i)*lam;
% xh_1(j)=x(j)-h_1(j)*lam;
% %disp( ['hessian warning cross ', num2str(c) ]),
% c=mr_nlincon(xh_1,varargin{:});
% end
% temp2 = f0+(feval(func,xh_1,varargin{:})-f0)/lam;
temp2 = feval(func,xh_1,varargin{:});
hessian_mat(:,(i-1)*n+j)=-(-temp1 -temp2+temp(:,i)+temp(:,j))./(2*h1(i)*h_1(j));
xh1(i)=x(i);
@ -240,19 +242,36 @@ gga=ggh.*kron(ones(size(ff1)),2.*h1'); % re-scaled gradient
hh_mat=gga'*gga; % rescaled outer product hessian
hh_mat0=ggh'*ggh; % outer product hessian
A=diag(2.*h1); % rescaling matrix
if hflag>0 & min(eig(reshape(hessian_mat,n,n)))>0,
hh0 = A*reshape(hessian_mat,n,n)*A'; %rescaled second order derivatives
sd0=sqrt(diag(hh0)); %rescaled 'standard errors' using second order derivatives
sd=sqrt(diag(hh_mat)); %rescaled 'standard errors' using outer product
hh_mat=hh_mat./(sd*sd').*(sd0*sd0'); %rescaled outer product with 'true' std's
hh0 = reshape(hessian_mat,n,n); % second order derivatives
sd0=sqrt(diag(hh0)); % 'standard errors' using second order derivatives
sd=sqrt(diag(hh_mat0)); % 'standard errors' using outer product
hh_mat0=hh_mat0./(sd*sd').*(sd0*sd0'); % rescaled outer product with 'true' std's
end
igg=inv(hh_mat); % inverted rescaled outer product hessian
ihh=A'*igg*A; % inverted outer product hessian
if hflag==0,
if hflag>0 & min(eig(reshape(hessian_mat,n,n)))>0,
hh0 = A*reshape(hessian_mat,n,n)*A'; %rescaled second order derivatives
hh = reshape(hessian_mat,n,n); %rescaled second order derivatives
sd0=sqrt(diag(hh0)); %rescaled 'standard errors' using second order derivatives
sd=sqrt(diag(hh_mat)); %rescaled 'standard errors' using outer product
hh_mat=hh_mat./(sd*sd').*(sd0*sd0'); %rescaled inverse outer product with 'true' std's
igg=inv(hh_mat); % rescaled outer product hessian with 'true' std's
ihh=A'*igg*A; % inverted outer product hessian
hh_mat0=inv(A)'*hh_mat*inv(A); % outer product hessian with 'true' std's
sd=sqrt(diag(ihh)); %standard errors
sdh=sqrt(1./diag(hh)); %diagonal standard errors
for j=1:length(sd),
sd0(j,1)=min(bayestopt_.pstdev(j), sd(j)); %prior std
sd0(j,1)=10^(0.5*(log10(sd0(j,1))+log10(sdh(j,1))));
%sd0(j,1)=0.5*(sd0(j,1)+sdh(j,1));
end
ihh=ihh./(sd*sd').*(sd0*sd0'); %inverse outer product with modified std's
igg=inv(A)'*ihh*inv(A); % inverted rescaled outer product hessian with modified std's
hh_mat=inv(igg); % outer product rescaled hessian with modified std's
hh_mat0=inv(A)'*hh_mat*inv(A); % outer product hessian with modified std's
% sd0=sqrt(1./diag(hh0)); %rescaled 'standard errors' using second order derivatives
% sd=sqrt(diag(igg)); %rescaled 'standard errors' using outer product
% igg=igg./(sd*sd').*(sd0*sd0'); %rescaled inverse outer product with 'true' std's
% hh_mat=inv(igg); % rescaled outer product hessian with 'true' std's
% ihh=A'*igg*A; % inverted outer product hessian
% hh_mat0=inv(A)'*hh_mat*inv(A); % outer product hessian with 'true' std's
end
if hflag<2,
hessian_mat=hh_mat0(:);
end

View File

@ -1,146 +1,239 @@
function [xparam1, hh, gg, fval] = newrat(func0,x,hh,gg,flag,varargin)
function [xparam1, hh, gg, fval, igg] = newrat(func0, x, hh, gg, igg, ftol0, nit, flagg, varargin)
%
% [xparam1, hh, gg, fval] = newrat(func0,x,hh,gg,flag,varargin)
% Copyright (C) 2004 Marco Ratto
%
% Standard Newton search
% [xparam1, hh, gg, fval, igg] = newrat(func0, x, hh, gg, igg, ftol0, nit, flagg, varargin)
%
% flag = 0, to begin with a pure gradient search (the program shifts to
% pure Newton when improvement of pure gradient is below ftol)
% Optimiser with outer product gradient and 'Gibbs type' steps
% uses Chris Sims subroutine for line search
%
% func0 = name of the function
% there must be a version of the function called [func0,'_hh.m'], that also
% gives as second OUTPUT the single contributions at times t=1,...,T
% of the log-likelihood to compute outer product gradient
%
% x = starting guess
% hh = initial Hessian [OPTIONAL]
% gg = initial gradient [OPTIONAL]
% igg = initial inverse Hessian [OPTIONAL]
% ftol0 = ending criterion for function change
% nit = maximum number of iterations
%
% In each iteration, Hessian is computed with outer product gradient.
% for final Hessian (to start Metropolis):
% flagg = 0, final Hessian computed with outer product gradient
% flagg = 1, final 'mixed' Hessian: diagonal elements computed with numerical second order derivatives
% with correlation structure as from outer product gradient,
% flagg = 2, full numerical Hessian
%
% varargin = list of parameters for func0
%
% flag = 1, to start with the compelte Newton search
global bayestopt_
icount=0;
nx=length(x);
xparam1=x;
lamtol=1.e-7;
ftol=1.e-5;
options=optimset('fminunc');
options.MaxFunEvals=200;
options.TolFun= 1.0000e-005;
options.MaxIter=1;
options.LargeScale='off';
optim_options=optimset('fminsearch');
optim_options.display='iter';
optim_options.MaxFunEvals=1000;
optim_options.TolFun= 1.0000e-003;
optim_options.TolX= 1.0000e-006;
%ftol0=1.e-6;
flagit=0; % mode of computation of hessian in each iteration
ftol=ftol0;
gtol=1.e-3;
htol=ftol0;
htol0=ftol0;
func_hh = [func0,'_hh'];
func = str2func(func0);
fval0=feval(func,x,varargin{:});
fval=fval0;
if isempty(hh)
[dum, gg]=mr_hessian(func0,x,flag,varargin{:});
hh = reshape(dum,nx,nx);
[dum, gg, htol0, igg, hhg]=mr_hessian(func_hh,x,flagit,htol,varargin{:});
hh0 = reshape(dum,nx,nx);
hh=hhg;
if min(eig(hh0))<0,
hh0=hhg; %generalized_cholesky(hh0);
elseif flagit==2,
hh=hh0;
igg=inv(hh);
end
if htol0>htol,
htol=htol0;
ftol=htol0;
end
else
hh0=hh;
hhg=hh;
igg=inv(hh);
end
disp(['Gradient norm ',num2str(norm(gg))])
disp(['Minimum Hessian eigenvalue ',num2str(min(eig(hh)))])
disp(['Maximum Hessian eigenvalue ',num2str(max(eig(hh)))])
ee=eig(hh);
disp(['Minimum Hessian eigenvalue ',num2str(min(ee))])
disp(['Maximum Hessian eigenvalue ',num2str(max(ee))])
g=gg;
h{1}=hh;
check=0;
if max(eig(hh))<0, disp('Negative definite Hessian! Local maximum!'), pause, end,
while norm(gg)>1.e-3 & check==0,
save m1 x hh g hhg igg fval0
igrad=1;
igibbs=1;
inx=eye(nx);
jit=0;
while norm(gg)>gtol & check==0 & jit<nit,
jit=jit+1;
tic
icount=icount+1;
bayestopt_.penalty = fval0(icount);
disp([' '])
disp(['Iteration ',num2str(icount)])
x0=xparam1-inv(hh)*gg;
c=mr_nlincon(x0,varargin{:},1);
lam=1;
while c
lam=lam*0.9;
x0=xparam1-inv(hh)*gg.*lam;
c=mr_nlincon(x0,varargin{:},1);
end
fval=feval(func,x0,varargin{:});
% if (fval0(icount)-fval)<ftol & flag==0,
% fvala=fval;
% x0a=x0;
% disp('Try to modify Hessian')
% x0=xparam1-inv(gg*gg')*gg;
% c=mr_nlincon(x0,varargin{:},1);
% lam=1;
% while c
% lam=lam*0.9;
% x0=xparam1-inv(gg*gg')*gg.*lam;
% c=mr_nlincon(x0,varargin{:},1);
% end
% fval=feval(func,x0,varargin{:});
% if fvala<=fval,
% x0=x0a;
% fval=fvala;
% end
% end
if (fval0(icount)-fval)<ftol,
disp('Try line search')
[lam,fval,EXITFLAG,OUTPUT,GRAD,HESSIAN]=fminunc(@lsearch, 0, options, func, xparam1, inv(hh)*gg , varargin{:});
x0=xparam1-inv(hh)*gg.*lam;
end
if (fval0(icount)-fval)<ftol & flag==1,
fvala=fval;
x0a=x0;
disp('Try gradient direction')
[lam,fval,EXITFLAG,OUTPUT,GRAD,HESSIAN]=fminunc(@lsearch, 0, options, func, xparam1, gg , varargin{:});
if fvala<=fval,
x0=x0a;
fval=fvala;
[fval x0 fc retcode] = csminit(func0,xparam1,fval0(icount),gg,0,igg,varargin{:});
if igrad,
[fval1 x01 fc retcode1] = csminit(func0,xparam1,fval0(icount),gg,0,inx,varargin{:});
if fval1<fval,
fval=fval1;
x0=x01;
disp('Gradient step!!')
else
x0=xparam1-gg*lam;
if (fval0(icount)-fval)>ftol,
flag=0;
igrad=0;
end
end
if (fval0(icount)-fval)<1.e-2*(gg'*(igg*gg))/2 & igibbs,
[fvala, x0] = mr_gstep(func0,x0,htol,varargin{:});
if (fval-fvala)<5*(fval0(icount)-fval),
igibbs=0;
disp('Last Gibbs step, gain too small!!')
else
disp('Gibbs step!!')
end
fval=fvala;
end
if (fval0(icount)-fval)<ftol & flagit==0,
disp('Try diagonal Hessian')
ihh=diag(1./(diag(hhg)));
[fval2 x02 fc retcode2] = csminit(func2str(func),xparam1,fval0(icount),gg,0,ihh,varargin{:});
if fval2<fval,
x0=x02;
fval=fval2;
if (fval0(icount)-fval2)>=ftol ,
%hh=diag(diag(hh));
disp('Diagonal Hessian successful')
end
end
end
if (fval0(icount)-fval)<ftol,
fvala=fval;
x0a=x0;
disp('Try some simplex iterations')
[x0,fval,EXITFLAG,OUTPUT] = fminsearch(func, xparam1, optim_options, varargin{:});
if fvala<fval,
x0=x0a;
fval=fvala;
else
lam = NaN;
end
if (fval0(icount)-fval)<ftol & flagit==0,
disp('Try gradient direction')
ihh0=inx.*1.e-4;
[fval3 x03 fc retcode3] = csminit(func2str(func),xparam1,fval0(icount),gg,0,ihh0,varargin{:});
if fval3<fval,
x0=x03;
fval=fval3;
if (fval0(icount)-fval3)>=ftol ,
%hh=hh0;
%ihh=ihh0;
disp('Gradient direction successful')
end
end
end
if (fval0(icount)-fval)<ftol*ftol & flag==1;,
% if fvala<fval,
% fval=fvala;
% x0=x0a;
% end
end
xparam1=x0;
x(:,icount+1)=xparam1;
fval0(icount+1)=fval;
%if (fval0(icount)-fval)<ftol*ftol & flagg==1;,
if (fval0(icount)-fval)<ftol,
disp('No further improvement is possible!')
check=1;
else
if (fval0(icount)-fval)<ftol & flag==0,
flag=1;
if flagit==2,
hh=hh0;
elseif flagg>0,
[dum, gg, htol0, igg, hhg]=mr_hessian(func_hh,xparam1,flagg,ftol0,varargin{:});
if flagg==2,
hh = reshape(dum,nx,nx);
ee=eig(hh);
if min(ee)<0
hh=hhg;
end
else
hh=hhg;
end
end
xparam1=x0;
x(:,icount+1)=xparam1;
fval0(icount+1)=fval;
disp(['LAMBDA ',num2str(lam)])
%disp(['DX norm ',num2str(norm(inv(hh)*gg.*lam))])
disp(['DX norm ',num2str(norm(x(:,end)-x(:,end-1)))])
disp(['Actual dxnorm ',num2str(norm(x(:,end)-x(:,end-1)))])
disp(['FVAL ',num2str(fval)])
disp(['Improvement ',num2str(fval0(icount)-fval)])
disp(['Ftol ',num2str(ftol)])
disp(['Htol ',num2str(htol0)])
disp(['Gradient norm ',num2str(norm(gg))])
ee=eig(hh);
disp(['Minimum Hessian eigenvalue ',num2str(min(ee))])
disp(['Maximum Hessian eigenvalue ',num2str(max(ee))])
g(:,icount+1)=gg;
else
df = fval0(icount)-fval;
disp(['Actual dxnorm ',num2str(norm(x(:,end)-x(:,end-1)))])
disp(['FVAL ',num2str(fval)])
disp(['Improvement ',num2str(df)])
disp(['Ftol ',num2str(ftol)])
disp(['Htol ',num2str(htol0)])
if df<htol0,
htol=max(ftol0,df/10);
end
if norm(x(:,icount)-xparam1)>1.e-12,
%[dum, gg]=hessian('mj_optmumlik',xparam1,gend,data,1);
[dum, gg]=mr_hessian(func0,xparam1,flag,varargin{:});
hh = reshape(dum,nx,nx);
save m1 x fval0 -append
[dum, gg, htol0, igg, hhg]=mr_hessian(func_hh,xparam1,flagit,htol,varargin{:});
if htol0>ftol,
ftol=htol0;
htol=htol0;
disp(' ')
disp('Numerical noise in the likelihood')
disp('Tolerance has to be relaxed')
disp(' ')
elseif htol0<ftol,
ftol=max(htol0, ftol0);
end
hh0 = reshape(dum,nx,nx);
hh=hhg;
if flagit==2,
if min(eig(hh0))<=0,
hh0=hhg; %generalized_cholesky(hh0);
else
hh=hh0;
igg=inv(hh);
end
end
end
disp(['Gradient norm ',num2str(norm(gg))])
disp(['Minimum Hessian eigenvalue ',num2str(min(eig(hh)))])
disp(['Maximum Hessian eigenvalue ',num2str(max(eig(hh)))])
ee=eig(hh);
disp(['Minimum Hessian eigenvalue ',num2str(min(ee))])
disp(['Maximum Hessian eigenvalue ',num2str(max(ee))])
if max(eig(hh))<0, disp('Negative definite Hessian! Local maximum!'), pause, end,
t=toc;
disp(['Elapsed time for iteration ',num2str(t),' s.'])
h{icount+1}=hh;
g(:,icount+1)=gg;
save m1 x h g fval0
g(:,icount+1)=gg;
save m1 x hh g hhg igg fval0
end
end
save m1 x hh g hhg igg fval0
if ftol>ftol0,
disp(' ')
disp('Numerical noise in the likelihood')
disp('Tolerance had to be relaxed')
disp(' ')
end
if jit==nit,
disp(' ')
disp('Maximum number of iterations reached')
disp(' ')
end
if norm(gg)<=gtol,
disp(['Estimation ended:'])
disp(['Gradient norm < ', num2str(gtol)])
end
if check==1,
disp(['Estimation successful.'])
end
return
%

View File

@ -33,7 +33,8 @@ if options_.linear == 0
if exist([M_.fname '_steadystate'])
[dr.ys,check1] = feval([M_.fname '_steadystate'],dr.ys,oo_.exo_steady_state);
else
[dr.ys,check1] = dynare_solve(fh,dr.ys,oo_.exo_steady_state);
%[dr.ys,check1] = dynare_solve(fh,dr.ys,oo_.exo_steady_state);
[dr.ys,check1] = dynare_solve(fh,dr.ys,1,oo_.exo_steady_state);
end
if check1
info(1) = 20;

View File

@ -20,7 +20,8 @@ function x0 = stab_map_(Nsam, fload, alpha2, alpha)
% x0: one parameter vector for which the model is stable.
%
% GRAPHS
% 1) Histograms of marginal distributions under the stability regions
% 1) Pdf's of marginal distributions under the stability (dotted
% lines) and unstability (solid lines) regions
% 2) Cumulative distributions of:
% - stable subset (dotted lines)
% - unstable subset (solid lines)
@ -47,8 +48,13 @@ function x0 = stab_map_(Nsam, fload, alpha2, alpha)
%global bayestopt_ estim_params_ dr_ options_ ys_ fname_
global bayestopt_ estim_params_ options_ oo_ M_
ys_ = oo_.dr.ys;
dr_ = oo_.dr;
if isfield(dr_,'ghx'),
ys_ = oo_.dr.ys;
nspred = size(dr_.ghx,2);
nboth = dr_.nboth;
nfwrd = dr_.nfwrd;
end
fname_ = M_.fname;
nshock = estim_params_.nvx;
@ -110,6 +116,11 @@ if fload==0 | nargin<2 | isempty(fload),
%egg(:,j) = sort(dr_.eigval);
if isfield(dr_,'eigval'),
egg(:,j) = sort(dr_.eigval);
if ~exist('nspred')
nspred = size(dr_.ghx,2);
nboth = dr_.nboth;
nfwrd = dr_.nfwrd;
end
else
egg(:,j)=ones(size(egg,1),1).*1.1;
end
@ -123,9 +134,10 @@ if fload==0 | nargin<2 | isempty(fload),
% map stable samples
ix=[1:Nsam];
for j=1:Nsam,
if abs(egg(dr_.npred,j))>=options_.qz_criterium; %(1-(options_.qz_criterium-1)); %1-1.e-5;
if abs(egg(nspred,j))>=options_.qz_criterium; %(1-(options_.qz_criterium-1)); %1-1.e-5;
ix(j)=0;
elseif (dr_.nboth | dr_.nfwrd) & abs(egg(dr_.npred+1,j))<=options_.qz_criterium; %1+1.e-5;
%elseif (dr_.nboth | dr_.nfwrd) & abs(egg(nspred+1,j))<=options_.qz_criterium; %1+1.e-5;
elseif (nboth | nfwrd) & abs(egg(nspred+1,j))<=options_.qz_criterium; %1+1.e-5;
ix(j)=0;
end
end
@ -135,12 +147,13 @@ if fload==0 | nargin<2 | isempty(fload),
ixx=[1:Nsam];
for j=1:Nsam,
%if abs(egg(dr_.npred+1,j))>1+1.e-5 & abs(egg(dr_.npred,j))<1-1.e-5;
if (dr_.nboth | dr_.nfwrd),
if abs(egg(dr_.npred+1,j))>options_.qz_criterium & abs(egg(dr_.npred,j))<options_.qz_criterium; %(1-(options_.qz_criterium-1));
%if (dr_.nboth | dr_.nfwrd),
if (nboth | nfwrd),
if abs(egg(nspred+1,j))>options_.qz_criterium & abs(egg(nspred,j))<options_.qz_criterium; %(1-(options_.qz_criterium-1));
ixx(j)=0;
end
else
if abs(egg(dr_.npred,j))<options_.qz_criterium; %(1-(options_.qz_criterium-1));
if abs(egg(nspred,j))<options_.qz_criterium; %(1-(options_.qz_criterium-1));
ixx(j)=0;
end
end
@ -211,9 +224,9 @@ disp('Starting bivariate analysis:')
c0=corrcoef(lpmat(ix,:));
c00=tril(c0,-1);
stab_map_2(lpmat(ix,:),alpha2, 1);
stab_map_2(lpmat(ixx,:),alpha2, 0);
stab_map_2(lpmat(ix,:),alpha2, 1);
stab_map_2(lpmat(ixx,:),alpha2, 0);
else
if length(ixx)==0,
disp('All parameter values in the prior ranges are stable!')
@ -228,7 +241,7 @@ end
% thex=[];
% for j=1:Nsam,
% %cyc(j)=max(abs(imag(egg(1:34,j))));
% ic = find(imag(egg(1:dr_.npred,j)));
% ic = find(imag(egg(1:nspred,j)));
% i=find( abs(egg( ic ,j) )>0.9); %only consider complex dominant eigenvalues
% if ~isempty(i),
% i=i(1:2:end);