diff --git a/matlab/DsgeLikelihood_hh.m b/matlab/DsgeLikelihood_hh.m
index f12fe6ca6..896b474f6 100644
--- a/matlab/DsgeLikelihood_hh.m
+++ b/matlab/DsgeLikelihood_hh.m
@@ -1,15 +1,15 @@
-function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood_hh(xparam1,gend,data,data_index,number_of_observations,no_more_missing_observations)
+function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood_hh(xparam1,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults)
% function [fval,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend,data,data_index,number_of_observations,no_more_missing_observations)
-% Evaluates the posterior kernel of a dsge model.
-%
-% INPUTS
+% Evaluates the posterior kernel of a dsge model.
+%
+% INPUTS
% xparam1 [double] vector of model parameters.
% gend [integer] scalar specifying the number of observations.
% data [double] matrix of data
% data_index [cell] cell of column vectors
% number_of_observations [integer]
-% no_more_missing_observations [integer]
-% OUTPUTS
+% no_more_missing_observations [integer]
+% OUTPUTS
% fval : value of the posterior kernel at xparam1.
% cost_flag : zero if the function returns a penalty, one otherwise.
% ys : steady state of original endogenous variables
@@ -17,7 +17,7 @@ function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood_hh(xparam1,g
% info : vector of informations about the penalty:
% 41: one (many) parameter(s) do(es) not satisfied the lower bound
% 42: one (many) parameter(s) do(es) not satisfied the upper bound
-%
+%
% SPECIAL REQUIREMENTS
%
@@ -38,234 +38,360 @@ function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood_hh(xparam1,g
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see .
-global bayestopt_ estim_params_ options_ trend_coeff_ M_ oo_
+
+% Declaration of the penalty as a persistent variable.
+persistent penalty
+
+% Initialization of the persistent variable.
+if ~nargin || isempty(penalty)
+ penalty = 1e8;
+ if ~nargin, return, end
+end
+if nargin==1
+ penalty = xparam1;
+ return
+end
+
fval = [];
ys = [];
trend_coeff = [];
cost_flag = 1;
-nobs = size(options_.varobs,1);
llik=NaN;
+
+if DynareOptions.block == 1
+ error('DsgeLikelihood_hh:: This routine (called if mode_compute==5) is not compatible with the block option!')
+end
+
%------------------------------------------------------------------------------
% 1. Get the structural parameters & define penalties
%------------------------------------------------------------------------------
-if ~isequal(options_.mode_compute,1) && any(xparam1 < bayestopt_.lb)
- k = find(xparam1 < bayestopt_.lb);
- fval = bayestopt_.penalty+sum((bayestopt_.lb(k)-xparam1(k)).^2);
- cost_flag = 0;
+
+% Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
+if ~isequal(DynareOptions.mode_compute,1) && any(xparam1 bayestopt_.ub)
- k = find(xparam1 > bayestopt_.ub);
- fval = bayestopt_.penalty+sum((xparam1(k)-bayestopt_.ub(k)).^2);
- cost_flag = 0;
+
+% Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
+if ~isequal(DynareOptions.mode_compute,1) && any(xparam1>BayesInfo.ub)
+ k = find(xparam1>BayesInfo.ub);
+ fval = penalty+sum((xparam1(k)-BayesInfo.ub(k)).^2);
+ exit_flag = 0;
info = 42;
- return;
+ return
end
-Q = M_.Sigma_e;
-H = M_.H;
-for i=1:estim_params_.nvx
- k =estim_params_.var_exo(i,1);
+
+% Get the diagonal elements of the covariance matrices for the structural innovations (Q) and the measurement error (H).
+Q = Model.Sigma_e;
+H = Model.H;
+for i=1:EstimatedParameters.nvx
+ k =EstimatedParameters.var_exo(i,1);
Q(k,k) = xparam1(i)*xparam1(i);
end
-offset = estim_params_.nvx;
-if estim_params_.nvn
- for i=1:estim_params_.nvn
- k = estim_params_.var_endo(i,1);
+offset = EstimatedParameters.nvx;
+if EstimatedParameters.nvn
+ for i=1:EstimatedParameters.nvn
+ k = EstimatedParameters.var_endo(i,1);
H(k,k) = xparam1(i+offset)*xparam1(i+offset);
end
- offset = offset+estim_params_.nvn;
+ offset = offset+EstimatedParameters.nvn;
+else
+ H = zeros(DynareDataset.info.nvobs);
end
-if estim_params_.ncx
- for i=1:estim_params_.ncx
- k1 =estim_params_.corrx(i,1);
- k2 =estim_params_.corrx(i,2);
+
+% Get the off-diagonal elements of the covariance matrix for the structural innovations. Test if Q is positive definite.
+if EstimatedParameters.ncx
+ for i=1:EstimatedParameters.ncx
+ k1 =EstimatedParameters.corrx(i,1);
+ k2 =EstimatedParameters.corrx(i,2);
Q(k1,k2) = xparam1(i+offset)*sqrt(Q(k1,k1)*Q(k2,k2));
Q(k2,k1) = Q(k1,k2);
end
+ % Try to compute the cholesky decomposition of Q (possible iff Q is positive definite)
[CholQ,testQ] = chol(Q);
- if testQ %% The variance-covariance matrix of the structural innovations is not definite positive.
- %% We have to compute the eigenvalues of this matrix in order to build the penalty.
+ if testQ
+ % The variance-covariance matrix of the structural innovations is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty.
a = diag(eig(Q));
k = find(a < 0);
if k > 0
- fval = bayestopt_.penalty+sum(-a(k));
- cost_flag = 0;
+ fval = BayesInfo.penalty+sum(-a(k));
+ exit_flag = 0;
info = 43;
return
end
end
- offset = offset+estim_params_.ncx;
+ offset = offset+EstimatedParameters.ncx;
end
-if estim_params_.ncn
- for i=1:estim_params_.ncn
- k1 = options_.lgyidx2varobs(estim_params_.corrn(i,1));
- k2 = options_.lgyidx2varobs(estim_params_.corrn(i,2));
+
+% Get the off-diagonal elements of the covariance matrix for the measurement errors. Test if H is positive definite.
+if EstimatedParameters.ncn
+ for i=1:EstimatedParameters.ncn
+ k1 = DynareOptions.lgyidx2varobs(EstimatedParameters.corrn(i,1));
+ k2 = DynareOptions.lgyidx2varobs(EstimatedParameters.corrn(i,2));
H(k1,k2) = xparam1(i+offset)*sqrt(H(k1,k1)*H(k2,k2));
H(k2,k1) = H(k1,k2);
end
+ % Try to compute the cholesky decomposition of H (possible iff H is positive definite)
[CholH,testH] = chol(H);
if testH
+ % The variance-covariance matrix of the structural innovations is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty.
a = diag(eig(H));
k = find(a < 0);
if k > 0
- fval = bayestopt_.penalty+sum(-a(k));
- cost_flag = 0;
+ fval = BayesInfo.penalty+sum(-a(k));
+ exit_flag = 0;
info = 44;
return
end
end
- offset = offset+estim_params_.ncn;
+ offset = offset+EstimatedParameters.ncn;
end
-if estim_params_.np > 0
- M_.params(estim_params_.param_vals(:,1)) = xparam1(offset+1:end);
+
+% Update estimated structural parameters in Mode.params.
+if EstimatedParameters.np > 0
+ Model.params(EstimatedParameters.param_vals(:,1)) = xparam1(offset+1:end);
end
-M_.Sigma_e = Q;
-M_.H = H;
+
+% Update Model.Sigma_e and Model.H.
+Model.Sigma_e = Q;
+Model.H = H;
+
+
+
%------------------------------------------------------------------------------
% 2. call model setup & reduction program
%------------------------------------------------------------------------------
-[T,R,SteadyState,info,M_,options_,oo_] = dynare_resolve(M_,otions_,oo_);
-if info(1) == 1 || info(1) == 2 || info(1) == 5
- fval = bayestopt_.penalty+1;
- cost_flag = 0;
+
+% Linearize the model around the deterministic sdteadystate and extract the matrices of the state equation (T and R).
+[T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
+
+% Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
+if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 22 || info(1) == 24
+ fval = penalty+1;
+ info = info(1);
+ exit_flag = 0;
return
-elseif info(1) == 3 || info(1) == 4 || info(1)==6 ||info(1) == 19 || info(1) == 20 || info(1) == 21
- fval = bayestopt_.penalty+info(2);
- cost_flag = 0;
+elseif info(1) == 3 || info(1) == 4 || info(1)==6 ||info(1) == 19 || info(1) == 20 || info(1) == 21 || info(1) == 23
+ fval = penalty+info(2);
+ info = info(1);
+ exit_flag = 0;
return
end
-bayestopt_.mf = bayestopt_.mf1;
-if options_.noconstant
- constant = zeros(nobs,1);
-else
- if options_.loglinear
- constant = log(SteadyState(bayestopt_.mfys));
+
+% Define a vector of indices for the observed variables. Is this really usefull?...
+BayesInfo.mf = BayesInfo.mf1;
+
+% Define the constant vector of the measurement equation.
+if DynareOptions.noconstant
+ constant = zeros(DynareDataset.info.nvobs,1);
+else
+ if DynareOptions.loglinear
+ constant = log(SteadyState(BayesInfo.mfys));
else
- constant = SteadyState(bayestopt_.mfys);
+ constant = SteadyState(BayesInfo.mfys);
end
end
-if bayestopt_.with_trend
- trend_coeff = zeros(nobs,1);
- t = options_.trend_coeffs;
+
+% Define the deterministic linear trend of the measurement equation.
+if BayesInfo.with_trend
+ trend_coeff = zeros(DynareDataset.info.nvobs,1);
+ t = DynareOptions.trend_coeffs;
for i=1:length(t)
if ~isempty(t{i})
trend_coeff(i) = evalin('base',t{i});
end
end
- trend = repmat(constant,1,gend)+trend_coeff*[1:gend];
+ trend = repmat(constant,1,DynareDataset.info.ntobs)+trend_coeff*[1:DynareDataset.info.ntobs];
else
- trend = repmat(constant,1,gend);
+ trend = repmat(constant,1,DynareDataset.info.ntobs);
end
-start = options_.presample+1;
-np = size(T,1);
-mf = bayestopt_.mf;
-no_missing_data_flag = (number_of_observations==gend*nobs);
+
+% Get needed informations for kalman filter routines.
+start = DynareOptions.presample+1;
+Z = BayesInfo.mf; % old mf
+no_missing_data_flag = ~DynareDataset.missing.state;
+mm = length(T); % old np
+pp = DynareDataset.info.nvobs;
+rr = length(Q);
+kalman_tol = DynareOptions.kalman_tol;
+riccati_tol = DynareOptions.riccati_tol;
+Y = DynareDataset.data-trend;
+
%------------------------------------------------------------------------------
% 3. Initial condition of the Kalman filter
%------------------------------------------------------------------------------
-kalman_algo = options_.kalman_algo;
-if options_.lik_init == 1 % Kalman filter
- if kalman_algo ~= 2
+
+kalman_algo = DynareOptions.kalman_algo;
+diffuse_periods = 0;
+switch DynareOptions.lik_init
+ case 1% Standard initialization with the steady state of the state equation.
+ if kalman_algo~=2
+ % Use standard kalman filter except if the univariate filter is explicitely choosen.
kalman_algo = 1;
end
- Pstar = lyapunov_symm(T,R*Q*R',options_.qz_criterium,options_.lyapunov_complex_threshold);
- Pinf = [];
-elseif options_.lik_init == 2 % Old Diffuse Kalman filter
+ Pstar = lyapunov_symm(T,R*Q*R',DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold);
+ Pinf = [];
+ a = zeros(mm,1);
+ Zflag = 0;
+ case 2% Initialization with large numbers on the diagonal of the covariance matrix if the states (for non stationary models).
if kalman_algo ~= 2
+ % Use standard kalman filter except if the univariate filter is explicitely choosen.
kalman_algo = 1;
end
- Pstar = options_.Harvey_scale_factor*eye(np);
- Pinf = [];
-elseif options_.lik_init == 3 % Diffuse Kalman filter
+ Pstar = DynareOptions.Harvey_scale_factor*eye(mm);
+ Pinf = [];
+ a = zeros(mm,1);
+ Zflag = 0;
+ case 3% Diffuse Kalman filter (Durbin and Koopman)
if kalman_algo ~= 4
+ % Use standard kalman filter except if the univariate filter is explicitely choosen.
kalman_algo = 3;
end
- [Z,ST,R1,QT,Pstar,Pinf] = schur_statespace_transformation(mf,T,R,Q,options_.qz_criterium);
+ [Z,T,R,QT,Pstar,Pinf] = schur_statespace_transformation(Z,T,R,Q,DynareOptions.qz_criterium);
+ Zflag = 1;
+ % Run diffuse kalman filter on first periods.
+ if (kalman_algo==3)
+ % Multivariate Diffuse Kalman Filter
+ if no_missing_data_flag
+ [dLIK,dlik,a,Pstar] = kalman_filter_d(Y, 1, size(Y,2), ...
+ zeros(mm,1), Pinf, Pstar, ...
+ kalman_tol, riccati_tol, DynareOptions.presample, ...
+ T,R,Q,H,Z,mm,pp,rr);
+ else
+ [dLIK,dlik,a,Pstar] = missing_observations_kalman_filter_d(DynareDataset.missing.aindex,DynareDataset.missing.number_of_observations,DynareDataset.missing.no_more_missing_observations, ...
+ Y, 1, size(Y,2), ...
+ zeros(mm,1), Pinf, Pstar, ...
+ kalman_tol, riccati_tol, DynareOptions.presample, ...
+ T,R,Q,H,Z,mm,pp,rr);
+ end
+ diffuse_periods = length(dlik);
+ if isinf(dLIK)
+ % Go to univariate diffuse filter if singularity problem.
+ kalman_algo = 4;
+ end
+ end
+ if (kalman_algo==4)
+ % Univariate Diffuse Kalman Filter
+ if no_correlation_flag
+ mmm = mm;
+ else
+ Z = [Z, eye(pp)];
+ T = blkdiag(T,zeros(pp));
+ Q = blkdiag(Q,H);
+ R = blkdiag(R,eye(pp));
+ Pstar = blkdiag(Pstar,H);
+ Pinf = blckdiag(Pinf,zeros(pp));
+ mmm = mm+pp;
+ end
+ [dLIK,dlik,a,Pstar] = univariate_kalman_filter_d(DynareDataset.missing.aindex,DynareDataset.missing.number_of_observations,DynareDataset.missing.no_more_missing_observations, ...
+ Y, 1, size(Y,2), ...
+ zeros(mmm,1), Pinf, Pstar, ...
+ kalman_tol, riccati_tol, DynareOptions.presample, ...
+ T,R,Q,H,Z,mmm,pp,rr);
+ diffuse_periods = length(dlik);
+ end
+ case 4% Start from the solution of the Riccati equation.
+ if kalman_algo ~= 2
+ kalman_algo = 1;
+ end
+ if isequal(H,0)
+ [err,Pstar] = kalman_steady_state(transpose(T),R*Q*transpose(R),transpose(build_selection_matrix(mf,np,length(mf))));
+ else
+ [err,Pstar] = kalman_steady_state(transpose(T),R*Q*transpose(R),transpose(build_selection_matrix(mf,np,length(mf))),H);
+ end
+ if err
+ disp(['DsgeLikelihood:: I am not able to solve the Riccati equation, so I switch to lik_init=1!']);
+ DynareOptions.lik_init = 1;
+ Pstar = lyapunov_symm(T,R*Q*R',DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold);
+ end
+ Pinf = [];
+ otherwise
+ error('DsgeLikelihood:: Unknown initialization approach for the Kalman filter!')
end
-kalman_tol = options_.kalman_tol;
-riccati_tol = options_.riccati_tol;
-mf = bayestopt_.mf1;
-Y = data-trend;
+
%------------------------------------------------------------------------------
% 4. Likelihood evaluation
%------------------------------------------------------------------------------
-if (kalman_algo==1)% Multivariate Kalman Filter
+
+singularity_flag = 0;
+
+if ((kalman_algo==1) || (kalman_algo==3))% Multivariate Kalman Filter
if no_missing_data_flag
- [LIK, lik] = kalman_filter(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol);
+ [LIK,lik] = kalman_filter(Y,diffuse_periods+1,size(Y,2), ...
+ a,Pstar, ...
+ kalman_tol, riccati_tol, ...
+ DynareOptions.presample, ...
+ T,Q,R,H,Z,mm,pp,rr,Zflag,diffuse_periods);
else
- [LIK, lik] = ...
- missing_observations_kalman_filter(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol, ...
- data_index,number_of_observations,no_more_missing_observations);
+ [LIK,lik] = missing_observations_kalman_filter(DynareDataset.missing.aindex,DynareDataset.missing.number_of_observations,DynareDataset.missing.no_more_missing_observations,Y,diffuse_periods+1,size(Y,2), ...
+ a, Pstar, ...
+ kalman_tol, DynareOptions.riccati_tol, ...
+ DynareOptions.presample, ...
+ T,Q,R,H,Z,mm,pp,rr,Zflag,diffuse_periods);
end
if isinf(LIK)
- kalman_algo = 2;
- end
-end
-if (kalman_algo==2)% Univariate Kalman Filter
- no_correlation_flag = 1;
- if isequal(H,0)
- H = zeros(nobs,1);
+ singularity_flag = 1;
else
- if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
- H = diag(H);
- else
- no_correlation_flag = 0;
+ if DynareOptions.lik_init==3
+ LIK = LIK + dLIK;
+ lik = [dlik; lik];
end
end
- if no_correlation_flag
- [LIK, lik] = univariate_kalman_filter(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol,data_index,number_of_observations,no_more_missing_observations);
- else
- [LIK, lik] = univariate_kalman_filter_corr(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol,data_index,number_of_observations,no_more_missing_observations);
- end
end
-if (kalman_algo==3)% Multivariate Diffuse Kalman Filter
- if no_missing_data_flag
- [LIK, lik] = diffuse_kalman_filter(ST,R1,Q,H,Pinf,Pstar,Y,start,Z,kalman_tol, ...
- riccati_tol);
- else
- [LIK, lik] = missing_observations_diffuse_kalman_filter(ST,R1,Q,H,Pinf, ...
- Pstar,Y,start,Z,kalman_tol,riccati_tol,...
- data_index,number_of_observations,...
- no_more_missing_observations);
- end
- if isinf(LIK)
- kalman_algo = 4;
- end
-end
-if (kalman_algo==4)% Univariate Diffuse Kalman Filter
- no_correlation_flag = 1;
- if isequal(H,0)
- H = zeros(nobs,1);
- else
- if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
- H = diag(H);
+
+if ( (singularity_flag) || (kalman_algo==2) || (kalman_algo==4) )% Univariate Kalman Filter
+ if singularity_flag
+ if no_correlation
+ mmm = mm;
else
- no_correlation_flag = 0;
+ Z = [Z, eye(pp)];
+ T = blkdiag(T,zeros(pp));
+ Q = blkdiag(Q,H);
+ R = blkdiag(R,eye(pp));
+ Pstar = blkdiag(Pstar,H);
+ Pinf = blckdiag(Pinf,zeros(pp));
+ mmm = mm+pp;
+ a = [a; zeros(pp,1)];
end
end
- if no_correlation_flag
- [LIK, lik] = univariate_diffuse_kalman_filter(ST,R1,Q,H,Pinf,Pstar,Y, ...
- start,Z,kalman_tol,riccati_tol,data_index,...
- number_of_observations,no_more_missing_observations);
- else
- [LIK, lik] = univariate_diffuse_kalman_filter_corr(ST,R1,Q,H,Pinf,Pstar, ...
- Y,start,Z,kalman_tol,riccati_tol,...
- data_index,number_of_observations,...
- no_more_missing_observations);
+ [LIK,lik] = univariate_kalman_filter(DynareDataset.missing.aindex,DynareDataset.missing.number_of_observations,DynareDataset.missing.no_more_missing_observations,Y,diffuse_periods+1,size(Y,2), ...
+ a,Pstar, ...
+ DynareOptions.kalman_tol, ...
+ DynareOptions.riccati_tol, ...
+ DynareOptions.presample, ...
+ T,Q,R,H,Z,mmm,pp,rr,diffuse_periods);
+ if DynareOptions.lik_init==3
+ LIK = LIK+dLIK;
+ lik = [dlik; lik];
end
end
-if imag(LIK) ~= 0
- likelihood = bayestopt_.penalty;
+
+if isnan(LIK)
+ info = 45;
+ exit_flag = 0;
+ return
+end
+if imag(LIK)~=0
+ likelihood = penalty;
else
likelihood = LIK;
end
+
% ------------------------------------------------------------------------------
-% Adds prior if necessary
+% 5. Adds prior if necessary
% ------------------------------------------------------------------------------
-lnprior = priordens(xparam1,bayestopt_.pshape,bayestopt_.p6,bayestopt_.p7,bayestopt_.p3,bayestopt_.p4);
+lnprior = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
fval = (likelihood-lnprior);
-options_.kalman_algo = kalman_algo;
+
+% Update DynareOptions.kalman_algo.
+DynareOptions.kalman_algo = kalman_algo;
+
+% Update the penalty.
+penalty = fval;
+
+% Add the prior density at the top of the vector for the density of each observation.
lik=lik(start:end,:);
-llik=[-lnprior; lik(:)];
-% llik=[-lnprior; lik(start:end)];
+llik=[-lnprior; lik(:)];
\ No newline at end of file
diff --git a/matlab/mr_gstep.m b/matlab/mr_gstep.m
index 51ec55f40..5c2e3d478 100644
--- a/matlab/mr_gstep.m
+++ b/matlab/mr_gstep.m
@@ -1,6 +1,6 @@
-function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,varargin)
+function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults)
% function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,varargin)
-%
+%
% Gibbs type step in optimisation
% Copyright (C) 2006-2011 Dynare Team
@@ -22,13 +22,12 @@ function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,varargin)
n=size(x,1);
-if nargin<4,
+if isempty(htol0)
htol = 1.e-6;
else
htol = htol0;
end
-func = str2func(func0);
-f0=feval(func,x,varargin{:});
+f0=feval(func0,x,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
xh1=x;
f1=zeros(size(f0,1),n);
@@ -36,37 +35,29 @@ f_1=f1;
i=0;
ig=zeros(n,1);
-while i1.e-12,
-% hh(i) = abs(1/( (f1(i)+f_1(i)-2*f0)./(h1(i)*h1(i)) ));
-% else
-% hh(i) = 1;
-% end
-
- if gg(i)*(hh(i)*gg(i))/2 > htol,
- [f0 x fc retcode] = csminit(func0,x,f0,gg,0,diag(hh),varargin{:});
+ if gg(i)*(hh(i)*gg(i))/2 > htol
+ [f0 x fc retcode] = csminit(func0,x,f0,gg,0,diag(hh),DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
ig(i)=1;
end
xh1=x;
diff --git a/matlab/mr_hessian.m b/matlab/mr_hessian.m
index ec2c04cd2..1afb15802 100644
--- a/matlab/mr_hessian.m
+++ b/matlab/mr_hessian.m
@@ -1,12 +1,12 @@
-function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,hflag,htol0,varargin)
+function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,hflag,htol0,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults)
% [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,hflag,htol0,varargin)
%
% numerical gradient and Hessian, with 'automatic' check of numerical
-% error
+% error
%
% adapted from Michel Juillard original rutine hessian.m
%
-% func = name of the function: func must give two outputs:
+% func = name of the function: func must give two outputs:
% - the log-likelihood AND the single contributions at times t=1,...,T
% of the log-likelihood to compute outer product gradient
% x = parameter values
@@ -41,26 +41,24 @@ function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,hf
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see .
-global options_ bayestopt_
persistent h1 htol
n=size(x,1);
-if init,
- gstep_=options_.gstep;
+if init
+ gstep_=DynareOptions.gstep;
htol = 1.e-4;
- %h1=max(abs(x),sqrt(gstep_)*ones(n,1))*eps^(1/4);
- h1=options_.gradient_epsilon*ones(n,1);
- return,
+ h1=DynareOptions.gradient_epsilon*ones(n,1);
+ return
end
-func = str2func(func);
-[f0, ff0]=feval(func,x,varargin{:});
-h2=bayestopt_.ub-bayestopt_.lb;
-hmax=bayestopt_.ub-x;
-hmax=min(hmax,x-bayestopt_.lb);
+
+[f0, ff0]=feval(func,x,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
+h2=BayesInfo.ub-BayesInfo.lb;
+hmax=BayesInfo.ub-x;
+hmax=min(hmax,x-BayesInfo.lb);
h1 = min(h1,0.5.*hmax);
-if htol0(3*htol)) && icount<10 && ic==0,
- %while abs(dx(it))<0.5*htol && icount< 10 && ic==0,
+ while (abs(dx(it))<0.5*htol || abs(dx(it))>(3*htol)) && icount<10 && ic==0
icount=icount+1;
if abs(dx(it))<0.5*htol
if abs(dx(it)) ~= 0,
@@ -99,51 +95,51 @@ while i(3*htol),
+ if abs(dx(it))>(3*htol)
h1(i)= htol/abs(dx(it))*h1(i);
xh1(i)=x(i)+h1(i);
try
- [fx, ffx]=feval(func,xh1,varargin{:});
+ [fx, ffx]=feval(func,xh1,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
catch
fx=1.e8;
end
- while (fx-f0)==0,
+ while (fx-f0)==0
h1(i)= h1(i)*2;
xh1(i)=x(i)+h1(i);
- [fx, ffx]=feval(func,xh1,varargin{:});
+ [fx, ffx]=feval(func,xh1,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
ic=1;
end
end
it=it+1;
dx(it)=(fx-f0);
h0(it)=h1(i);
- if (h1(i)<1.e-12*min(1,h2(i)) && h1(i)<0.5*hmax(i)),% || (icount==10 && abs(dx(it))>(3*htol)),
+ if (h1(i)<1.e-12*min(1,h2(i)) && h1(i)<0.5*hmax(i))% || (icount==10 && abs(dx(it))>(3*htol)),
ic=1;
hcheck=1;
end
end
f1(:,i)=fx;
- if any(isnan(ffx)),
+ if any(isnan(ffx))
ff1=ones(size(ff0)).*fx/length(ff0);
else
ff1=ffx;
end
xh1(i)=x(i)-h1(i);
- [fx, ffx]=feval(func,xh1,varargin{:});
+ [fx, ffx]=feval(func,xh1,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
f_1(:,i)=fx;
- if any(isnan(ffx)),
+ if any(isnan(ffx))
ff_1=ones(size(ff0)).*fx/length(ff0);
else
ff_1=ffx;
end
ggh(:,i)=(ff1-ff_1)./(2.*h1(i));
xh1(i)=x(i);
- if hcheck && htol<1,
+ if hcheck && htol<1
htol=min(1,max(min(abs(dx))*2,htol*10));
h1(i)=h10;
i=0;
@@ -157,14 +153,14 @@ xh_1=xh1;
gg=(f1'-f_1')./(2.*h1);
-if hflag==2,
+if hflag==2
gg=(f1'-f_1')./(2.*h1);
hessian_mat = zeros(size(f0,1),n*n);
for i=1:n
if i > 1
k=[i:n:n*(i-1)];
hessian_mat(:,(i-1)*n+1:(i-1)*n+i-1)=hessian_mat(:,k);
- end
+ end
hessian_mat(:,(i-1)*n+i)=(f1(:,i)+f_1(:,i)-2*f0)./(h1(i)*h_1(i));
temp=f1+f_1-f0*ones(1,n);
for j=i+1:n
@@ -172,10 +168,8 @@ if hflag==2,
xh1(j)=x(j)+h_1(j);
xh_1(i)=x(i)-h1(i);
xh_1(j)=x(j)-h_1(j);
- temp1 = feval(func,xh1,varargin{:});
-
- temp2 = feval(func,xh_1,varargin{:});
-
+ temp1 = feval(func,xh1,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
+ temp2 = feval(func,xh_1,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
hessian_mat(:,(i-1)*n+j)=-(-temp1 -temp2+temp(:,i)+temp(:,j))./(2*h1(i)*h_1(j));
xh1(i)=x(i);
xh1(j)=x(j);
@@ -186,27 +180,25 @@ if hflag==2,
end
i=i+1;
end
-
-elseif hflag==1,
+elseif hflag==1
hessian_mat = zeros(size(f0,1),n*n);
- for i=1:n,
+ for i=1:n
dum = (f1(:,i)+f_1(:,i)-2*f0)./(h1(i)*h_1(i));
- if dum>eps,
+ if dum>eps
hessian_mat(:,(i-1)*n+i)=dum;
else
hessian_mat(:,(i-1)*n+i)=max(eps, gg(i)^2);
- end
+ end
end
- %hessian_mat2=hh_mat(:)';
end
gga=ggh.*kron(ones(size(ff1)),2.*h1'); % re-scaled gradient
-hh_mat=gga'*gga; % rescaled outer product hessian
+hh_mat=gga'*gga; % rescaled outer product hessian
hh_mat0=ggh'*ggh; % outer product hessian
A=diag(2.*h1); % rescaling matrix
% igg=inv(hh_mat); % inverted rescaled outer product hessian
ihh=A'*(hh_mat\A); % inverted outer product hessian
-if hflag>0 && min(eig(reshape(hessian_mat,n,n)))>0,
+if hflag>0 && min(eig(reshape(hessian_mat,n,n)))>0
hh0 = A*reshape(hessian_mat,n,n)*A'; %rescaled second order derivatives
hh = reshape(hessian_mat,n,n); %rescaled second order derivatives
sd0=sqrt(diag(hh0)); %rescaled 'standard errors' using second order derivatives
@@ -217,10 +209,9 @@ if hflag>0 && min(eig(reshape(hessian_mat,n,n)))>0,
hh_mat0=inv(A)'*hh_mat*inv(A); % outer product hessian with 'true' std's
sd=sqrt(diag(ihh)); %standard errors
sdh=sqrt(1./diag(hh)); %diagonal standard errors
- for j=1:length(sd),
- sd0(j,1)=min(bayestopt_.p2(j), sd(j)); %prior std
+ for j=1:length(sd)
+ sd0(j,1)=min(BayesInfo.p2(j), sd(j)); %prior std
sd0(j,1)=10^(0.5*(log10(sd0(j,1))+log10(sdh(j,1))));
- %sd0(j,1)=0.5*(sd0(j,1)+sdh(j,1));
end
ihh=ihh./(sd*sd').*(sd0*sd0'); %inverse outer product with modified std's
igg=inv(A)'*ihh*inv(A); % inverted rescaled outer product hessian with modified std's
@@ -233,18 +224,15 @@ if hflag>0 && min(eig(reshape(hessian_mat,n,n)))>0,
% ihh=A'*igg*A; % inverted outer product hessian
% hh_mat0=inv(A)'*hh_mat*inv(A); % outer product hessian with 'true' std's
end
-if hflag<2,
+if hflag<2
hessian_mat=hh_mat0(:);
end
-if any(isnan(hessian_mat)),
+if any(isnan(hessian_mat))
hh_mat0=eye(length(hh_mat0));
ihh=hh_mat0;
- hessian_mat=hh_mat0(:);
+ hessian_mat=hh_mat0(:);
end
hh1=h1;
htol1=htol;
-save hess.mat
-% 11/25/03 SA Created from Hessian_sparse (removed sparse)
-
-
+save hess.mat
\ No newline at end of file
diff --git a/matlab/newrat.m b/matlab/newrat.m
index ce975bfcb..bd59de7b9 100644
--- a/matlab/newrat.m
+++ b/matlab/newrat.m
@@ -1,4 +1,4 @@
-function [xparam1, hh, gg, fval, igg] = newrat(func0, x, hh, gg, igg, ftol0, nit, flagg, varargin)
+function [xparam1, hh, gg, fval, igg] = newrat(func0, x, hh, gg, igg, ftol0, nit, flagg, DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults)
% [xparam1, hh, gg, fval, igg] = newrat(func0, x, hh, gg, igg, ftol0, nit, flagg, varargin)
%
% Optimiser with outer product gradient and with sequences of univariate steps
@@ -13,17 +13,17 @@ function [xparam1, hh, gg, fval, igg] = newrat(func0, x, hh, gg, igg, ftol0, nit
% hh = initial Hessian [OPTIONAL]
% gg = initial gradient [OPTIONAL]
% igg = initial inverse Hessian [OPTIONAL]
-% ftol0 = ending criterion for function change
+% ftol0 = ending criterion for function change
% nit = maximum number of iterations
%
% In each iteration, Hessian is computed with outer product gradient.
% for final Hessian (to start Metropolis):
% flagg = 0, final Hessian computed with outer product gradient
% flagg = 1, final 'mixed' Hessian: diagonal elements computed with numerical second order derivatives
-% with correlation structure as from outer product gradient,
+% with correlation structure as from outer product gradient,
% flagg = 2, full numerical Hessian
%
-% varargin = list of parameters for func0
+% varargin = list of parameters for func0
% Copyright (C) 2004-2011 Dynare Team
%
@@ -42,7 +42,6 @@ function [xparam1, hh, gg, fval, igg] = newrat(func0, x, hh, gg, igg, ftol0, nit
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see .
-global bayestopt_
icount=0;
nx=length(x);
xparam1=x;
@@ -53,29 +52,27 @@ ftol=ftol0;
gtol=1.e-3;
htol=htol_base;
htol0=htol_base;
-gibbstol=length(bayestopt_.pshape)/50; %25;
+gibbstol=length(BayesInfo.pshape)/50; %25;
-func_hh = [func0,'_hh'];
-func = str2func(func0);
-fval0=feval(func,x,varargin{:});
+func_hh = str2func([func2str(func0),'_hh']);
+fval0=feval(func0,x,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
fval=fval0;
+
% initialize mr_gstep and mr_hessian
-% mr_gstep(1,x);
-mr_hessian(1,x);
+mr_hessian(1,x,[],[],[],DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
if isempty(hh)
- [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,x,func_hh,flagit,htol,varargin{:});
+ [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,x,func_hh,flagit,htol,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
hh0 = reshape(dum,nx,nx);
hh=hhg;
- if min(eig(hh0))<0,
+ if min(eig(hh0))<0
hh0=hhg; %generalized_cholesky(hh0);
- elseif flagit==2,
+ elseif flagit==2
hh=hh0;
igg=inv(hh);
end
- if htol0>htol,
+ if htol0>htol
htol=htol0;
- %ftol=htol0;
end
else
hh0=hh;
@@ -99,73 +96,67 @@ jit=0;
nig=[];
ig=ones(nx,1);
ggx=zeros(nx,1);
-while norm(gg)>gtol && check==0 && jitgtol && check==0 && jit1, %(fval0(icount)-fval),
+ [fval,x0,fc,retcode] = csminit1(func0,xparam1,fval0(icount),gg,0,H,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
+ if igrad
+ [fval1,x01,fc,retcode1] = csminit1(func0,x0,fval,gg,0,inx,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
+ if (fval-fval1)>1
disp('Gradient step!!')
else
igrad=0;
end
fval=fval1;
- x0=x01;
+ x0=x01;
end
- if (fval0(icount)-fval)<1.e-2*(gg'*(H*gg))/2 && igibbs,
- if length(find(ig))=ftol ,
- %hh=diag(diag(hh));
- disp('Diagonal Hessian successful')
+ ihh=diag(1./(diag(hhg)));
+ [fval2,x0,fc,retcode2] = csminit1(func0,x0,fval,gg,0,ihh,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
+ if (fval-fval2)>=ftol
+ disp('Diagonal Hessian successful')
end
fval=fval2;
- end
- if (fval0(icount)-fval)=ftol ,
- %hh=hh0;
- %ihh=ihh0;
- disp('Gradient direction successful')
+ ihh0=inx.*1.e-4;
+ [fval3,x0,fc,retcode3] = csminit1(func0,x0,fval,gg,0,ihh0,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
+ if (fval-fval3)>=ftol
+ disp('Gradient direction successful')
end
fval=fval3;
- end
+ end
xparam1=x0;
x(:,icount+1)=xparam1;
fval0(icount+1)=fval;
- if (fval0(icount)-fval)0,
- [dum, gg, htol0, igg, hhg,h1]=mr_hessian(0,xparam1,func_hh,flagg,ftol0,varargin{:});
- if flagg==2,
+ elseif flagg>0
+ [dum, gg, htol0, igg, hhg,h1]=mr_hessian(0,xparam1,func_hh,flagg,ftol0,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
+ if flagg==2
hh = reshape(dum,nx,nx);
ee=eig(hh);
if min(ee)<0
@@ -186,48 +177,38 @@ while norm(gg)>gtol && check==0 && jit1.e-12,
- try
+ if norm(x(:,icount)-xparam1)>1.e-12
+ try
save m1.mat x fval0 nig -append
catch
- save m1.mat x fval0 nig
+ save m1.mat x fval0 nig
end
- [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,xparam1,func_hh,flagit,htol,varargin{:});
- if htol0>htol, %ftol,
- %ftol=htol0;
+ [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,xparam1,func_hh,flagit,htol,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
+ if htol0>htol
htol=htol0;
disp(' ')
disp('Numerical noise in the likelihood')
disp('Tolerance has to be relaxed')
disp(' ')
- % elseif htol0gtol && check==0 && jitftol0,
+if ftol>ftol0
disp(' ')
disp('Numerical noise in the likelihood')
disp('Tolerance had to be relaxed')
disp(' ')
end
-if jit==nit,
+if jit==nit
disp(' ')
disp('Maximum number of iterations reached')
disp(' ')
end
-if norm(gg)<=gtol,
+if norm(gg)<=gtol
disp(['Estimation ended:'])
disp(['Gradient norm < ', num2str(gtol)])
end
@@ -267,15 +246,7 @@ end
return
-%
-function f00 = lsearch(lam,func,x,dx,varargin)
-
-
-x0=x-dx*lam;
-f00=feval(func,x0,varargin{:});
-
-
-
-
-
+function f00 = lsearch(lam,func,x,dx,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults)
+ x0=x-dx*lam;
+ f00=feval(func,x0,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
\ No newline at end of file
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 82ce92f9d..1aa801732 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -48,6 +48,7 @@ MODFILES = \
fs2000/fs2000.mod \
fs2000/fs2000a.mod \
fs2000/fs2000c.mod \
+ fs2000/fs2000d.mod \
homotopy/homotopy1_test.mod \
homotopy/homotopy2_test.mod \
homotopy/homotopy3_test.mod \
diff --git a/tests/fs2000/fs2000d.mod b/tests/fs2000/fs2000d.mod
new file mode 100644
index 000000000..04f122805
--- /dev/null
+++ b/tests/fs2000/fs2000d.mod
@@ -0,0 +1,75 @@
+// See fs2000.mod in the examples/ directory for details on the model
+
+var m P c e W R k d n l gy_obs gp_obs y dA;
+varexo e_a e_m;
+
+parameters alp bet gam mst rho psi del;
+
+alp = 0.33;
+bet = 0.99;
+gam = 0.003;
+mst = 1.011;
+rho = 0.7;
+psi = 0.787;
+del = 0.02;
+
+model;
+dA = exp(gam+e_a);
+log(m) = (1-rho)*log(mst) + rho*log(m(-1))+e_m;
+-P/(c(+1)*P(+1)*m)+bet*P(+1)*(alp*exp(-alp*(gam+log(e(+1))))*k^(alp-1)*n(+1)^(1-alp)+(1-del)*exp(-(gam+log(e(+1)))))/(c(+2)*P(+2)*m(+1))=0;
+W = l/n;
+-(psi/(1-psi))*(c*P/(1-n))+l/n = 0;
+R = P*(1-alp)*exp(-alp*(gam+e_a))*k(-1)^alp*n^(-alp)/W;
+1/(c*P)-bet*P*(1-alp)*exp(-alp*(gam+e_a))*k(-1)^alp*n^(1-alp)/(m*l*c(+1)*P(+1)) = 0;
+c+k = exp(-alp*(gam+e_a))*k(-1)^alp*n^(1-alp)+(1-del)*exp(-(gam+e_a))*k(-1);
+P*c = m;
+m-1+d = l;
+e = exp(e_a);
+y = k(-1)^alp*n^(1-alp)*exp(-alp*(gam+e_a));
+gy_obs = dA*y/y(-1);
+gp_obs = (P/P(-1))*m(-1)/dA;
+end;
+
+initval;
+k = 6;
+m = mst;
+P = 2.25;
+c = 0.45;
+e = 1;
+W = 4;
+R = 1.02;
+d = 0.85;
+n = 0.19;
+l = 0.86;
+y = 0.6;
+gy_obs = exp(gam);
+gp_obs = exp(-gam);
+dA = exp(gam);
+end;
+
+shocks;
+var e_a; stderr 0.014;
+var e_m; stderr 0.005;
+end;
+
+steady;
+
+check;
+
+estimated_params;
+alp, beta_pdf, 0.356, 0.02;
+bet, beta_pdf, 0.993, 0.002;
+gam, normal_pdf, 0.0085, 0.003;
+mst, normal_pdf, 1.0002, 0.007;
+rho, beta_pdf, 0.129, 0.223;
+psi, beta_pdf, 0.65, 0.05;
+del, beta_pdf, 0.01, 0.005;
+stderr e_a, inv_gamma_pdf, 0.035449, inf;
+stderr e_m, inv_gamma_pdf, 0.008862, inf;
+end;
+
+varobs gp_obs gy_obs;
+
+options_.solve_tolf = 1e-12;
+
+estimation(order=1,datafile=fsdat_simul,nobs=192,mode_compute=5,loglinear,mh_replic=0);