349 lines
12 KiB
Matlab
349 lines
12 KiB
Matlab
function [fval,cost_flag,ys,trend_coeff,info,DLIK,AHess] = DsgeLikelihood(xparam1,gend,data,data_index,number_of_observations,no_more_missing_observations,derivatives_info)
|
|
% function [fval,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend,data,data_index,number_of_observations,no_more_missing_observations)
|
|
% Evaluates the posterior kernel of a dsge model.
|
|
%
|
|
% INPUTS
|
|
% xparam1 [double] vector of model parameters.
|
|
% gend [integer] scalar specifying the number of observations.
|
|
% data [double] matrix of data
|
|
% data_index [cell] cell of column vectors
|
|
% number_of_observations [integer]
|
|
% no_more_missing_observations [integer]
|
|
% OUTPUTS
|
|
% fval : MINUS value of the log posterior kernel at xparam1.
|
|
% cost_flag : zero if the function returns a penalty, one otherwise.
|
|
% ys : steady state of original endogenous variables
|
|
% trend_coeff :
|
|
% info : vector of informations about the penalty:
|
|
% 41: one (many) parameter(s) do(es) not satisfied the lower bound
|
|
% 42: one (many) parameter(s) do(es) not satisfied the upper bound
|
|
% DLIK : vector of analytic scores
|
|
% AHess : asymptotic Hessian matrix
|
|
%
|
|
% SPECIAL REQUIREMENTS
|
|
%
|
|
|
|
% Copyright (C) 2004-2011 Dynare Team
|
|
%
|
|
% This file is part of Dynare.
|
|
%
|
|
% Dynare is free software: you can redistribute it and/or modify
|
|
% it under the terms of the GNU General Public License as published by
|
|
% the Free Software Foundation, either version 3 of the License, or
|
|
% (at your option) any later version.
|
|
%
|
|
% Dynare is distributed in the hope that it will be useful,
|
|
% but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
% GNU General Public License for more details.
|
|
%
|
|
% You should have received a copy of the GNU General Public License
|
|
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
global bayestopt_ estim_params_ options_ trend_coeff_ M_ oo_
|
|
fval = [];
|
|
ys = [];
|
|
trend_coeff = [];
|
|
cost_flag = 1;
|
|
nobs = size(options_.varobs,1);
|
|
if nargout > 5,
|
|
analytic_derivation=1;
|
|
else
|
|
analytic_derivation=0;
|
|
end
|
|
%------------------------------------------------------------------------------
|
|
% 1. Get the structural parameters & define penalties
|
|
%------------------------------------------------------------------------------
|
|
if options_.mode_compute ~= 1 && any(xparam1 < bayestopt_.lb)
|
|
k = find(xparam1 < bayestopt_.lb);
|
|
fval = bayestopt_.penalty+sum((bayestopt_.lb(k)-xparam1(k)).^2);
|
|
cost_flag = 0;
|
|
info = 41;
|
|
return;
|
|
end
|
|
if options_.mode_compute ~= 1 && any(xparam1 > bayestopt_.ub)
|
|
k = find(xparam1 > bayestopt_.ub);
|
|
fval = bayestopt_.penalty+sum((xparam1(k)-bayestopt_.ub(k)).^2);
|
|
cost_flag = 0;
|
|
info = 42;
|
|
return;
|
|
end
|
|
Q = M_.Sigma_e;
|
|
H = M_.H;
|
|
for i=1:estim_params_.nvx
|
|
k =estim_params_.var_exo(i,1);
|
|
Q(k,k) = xparam1(i)*xparam1(i);
|
|
end
|
|
offset = estim_params_.nvx;
|
|
if estim_params_.nvn
|
|
for i=1:estim_params_.nvn
|
|
k = estim_params_.var_endo(i,1);
|
|
H(k,k) = xparam1(i+offset)*xparam1(i+offset);
|
|
end
|
|
offset = offset+estim_params_.nvn;
|
|
end
|
|
if estim_params_.ncx
|
|
for i=1:estim_params_.ncx
|
|
k1 =estim_params_.corrx(i,1);
|
|
k2 =estim_params_.corrx(i,2);
|
|
Q(k1,k2) = xparam1(i+offset)*sqrt(Q(k1,k1)*Q(k2,k2));
|
|
Q(k2,k1) = Q(k1,k2);
|
|
end
|
|
[CholQ,testQ] = chol(Q);
|
|
if testQ %% The variance-covariance matrix of the structural innovations is not definite positive.
|
|
%% We have to compute the eigenvalues of this matrix in order to build the penalty.
|
|
a = diag(eig(Q));
|
|
k = find(a < 0);
|
|
if k > 0
|
|
fval = bayestopt_.penalty+sum(-a(k));
|
|
cost_flag = 0;
|
|
info = 43;
|
|
return
|
|
end
|
|
end
|
|
offset = offset+estim_params_.ncx;
|
|
end
|
|
if estim_params_.ncn
|
|
for i=1:estim_params_.ncn
|
|
k1 = options_.lgyidx2varobs(estim_params_.corrn(i,1));
|
|
k2 = options_.lgyidx2varobs(estim_params_.corrn(i,2));
|
|
H(k1,k2) = xparam1(i+offset)*sqrt(H(k1,k1)*H(k2,k2));
|
|
H(k2,k1) = H(k1,k2);
|
|
end
|
|
[CholH,testH] = chol(H);
|
|
if testH
|
|
a = diag(eig(H));
|
|
k = find(a < 0);
|
|
if k > 0
|
|
fval = bayestopt_.penalty+sum(-a(k));
|
|
cost_flag = 0;
|
|
info = 44;
|
|
return
|
|
end
|
|
end
|
|
offset = offset+estim_params_.ncn;
|
|
end
|
|
if estim_params_.np > 0
|
|
M_.params(estim_params_.param_vals(:,1)) = xparam1(offset+1:end);
|
|
end
|
|
M_.Sigma_e = Q;
|
|
M_.H = H;
|
|
%------------------------------------------------------------------------------
|
|
% 2. call model setup & reduction program
|
|
%------------------------------------------------------------------------------
|
|
[T,R,SteadyState,info] = dynare_resolve('restrict');
|
|
|
|
if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 22 || info(1) == 24
|
|
fval = bayestopt_.penalty+1;
|
|
cost_flag = 0;
|
|
return
|
|
elseif info(1) == 3 || info(1) == 4 || info(1)==6 ||info(1) == 19 || info(1) == 20 || info(1) == 21 || info(1) == 23
|
|
fval = bayestopt_.penalty+info(2);
|
|
cost_flag = 0;
|
|
return
|
|
end
|
|
bayestopt_.mf = bayestopt_.mf1;
|
|
if options_.noconstant
|
|
constant = zeros(nobs,1);
|
|
else
|
|
if options_.loglinear
|
|
constant = log(SteadyState(bayestopt_.mfys));
|
|
else
|
|
constant = SteadyState(bayestopt_.mfys);
|
|
end
|
|
end
|
|
if bayestopt_.with_trend
|
|
trend_coeff = zeros(nobs,1);
|
|
t = options_.trend_coeffs;
|
|
for i=1:length(t)
|
|
if ~isempty(t{i})
|
|
trend_coeff(i) = evalin('base',t{i});
|
|
end
|
|
end
|
|
trend = repmat(constant,1,gend)+trend_coeff*[1:gend];
|
|
else
|
|
trend = repmat(constant,1,gend);
|
|
end
|
|
start = options_.presample+1;
|
|
np = size(T,1);
|
|
mf = bayestopt_.mf;
|
|
no_missing_data_flag = (number_of_observations==gend*nobs);
|
|
%------------------------------------------------------------------------------
|
|
% 3. Initial condition of the Kalman filter
|
|
%------------------------------------------------------------------------------
|
|
kalman_algo = options_.kalman_algo;
|
|
if options_.lik_init == 1 % Kalman filter
|
|
if kalman_algo ~= 2
|
|
kalman_algo = 1;
|
|
end
|
|
Pstar = lyapunov_symm(T,R*Q*R',options_.qz_criterium,options_.lyapunov_complex_threshold);
|
|
Pinf = [];
|
|
elseif options_.lik_init == 2 % Old Diffuse Kalman filter
|
|
if kalman_algo ~= 2
|
|
kalman_algo = 1;
|
|
end
|
|
Pstar = options_.Harvey_scale_factor*eye(np);
|
|
Pinf = [];
|
|
elseif options_.lik_init == 3 % Diffuse Kalman filter
|
|
if kalman_algo ~= 4
|
|
kalman_algo = 3;
|
|
end
|
|
[Z,ST,R1,QT,Pstar,Pinf] = schur_statespace_transformation(mf,T,R,Q,options_.qz_criterium);
|
|
end
|
|
if kalman_algo == 2
|
|
end
|
|
kalman_tol = options_.kalman_tol;
|
|
riccati_tol = options_.riccati_tol;
|
|
mf = bayestopt_.mf1;
|
|
Y = data-trend;
|
|
|
|
if analytic_derivation,
|
|
no_DLIK = 0;
|
|
DLIK = [];
|
|
AHess = [];
|
|
if nargin<7 || isempty(derivatives_info)
|
|
[A,B] = dynare_resolve;
|
|
if ~isempty(estim_params_.var_exo),
|
|
indexo=estim_params_.var_exo(:,1);
|
|
else
|
|
indexo=[];
|
|
end
|
|
if ~isempty(estim_params_.param_vals),
|
|
indparam=estim_params_.param_vals(:,1);;
|
|
else
|
|
indparam=[];
|
|
end
|
|
|
|
[dum, DT, DOm, DYss] = getH(A, B, M_,oo_,0, ...
|
|
indparam,indexo);
|
|
else
|
|
DT = derivatives_info.DT;
|
|
DOm = derivatives_info.DOm;
|
|
DYss = derivatives_info.DYss;
|
|
if isfield(derivatives_info,'no_DLIK'),
|
|
no_DLIK = derivatives_info.no_DLIK;
|
|
end
|
|
clear derivatives_info,
|
|
end
|
|
iv = oo_.dr.restrict_var_list;
|
|
DYss = [zeros(size(DYss,1),offset) DYss];
|
|
DT = DT(iv,iv,:);
|
|
DOm = DOm(iv,iv,:);
|
|
DYss = DYss(iv,:);
|
|
DH=zeros([size(H),length(xparam1)]);
|
|
DQ=zeros([size(Q),length(xparam1)]);
|
|
DP=zeros([size(T),length(xparam1)]);
|
|
for i=1:estim_params_.nvx,
|
|
k =estim_params_.var_exo(i,1);
|
|
DQ(k,k,i) = 2*sqrt(Q(k,k));
|
|
dum = lyapunov_symm(T,DOm(:,:,i),options_.qz_criterium,options_.lyapunov_complex_threshold);
|
|
kk = find(abs(dum) < 1e-12);
|
|
dum(kk) = 0;
|
|
DP(:,:,i)=dum;
|
|
end
|
|
offset = estim_params_.nvx;
|
|
for i=1:estim_params_.nvn,
|
|
k = estim_params_.var_endo(i,1);
|
|
DH(k,k,i+offset) = 2*sqrt(H(k,k));
|
|
end
|
|
|
|
offset = offset + estim_params_.nvn;
|
|
for j=1:estim_params_.np,
|
|
dum = lyapunov_symm(T,DT(:,:,j+offset)*Pstar*T'+T*Pstar*DT(:,:,j+offset)'+DOm(:,:,j+offset),options_.qz_criterium,options_.lyapunov_complex_threshold);
|
|
kk = find(abs(dum) < 1e-12);
|
|
dum(kk) = 0;
|
|
DP(:,:,j+offset)=dum;
|
|
end
|
|
end
|
|
|
|
%------------------------------------------------------------------------------
|
|
% 4. Likelihood evaluation
|
|
%------------------------------------------------------------------------------
|
|
if (kalman_algo==1)% Multivariate Kalman Filter
|
|
if no_missing_data_flag
|
|
LIK = kalman_filter(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol);
|
|
if analytic_derivation,
|
|
if no_DLIK==0,
|
|
[DLIK] = score(T,R,Q,H,Pstar,Y,DT,DYss,DOm,DH,DP,start,mf,kalman_tol,riccati_tol);
|
|
end
|
|
if nargout==7,
|
|
[AHess] = AHessian(T,R,Q,H,Pstar,Y,DT,DYss,DOm,DH,DP,start,mf,kalman_tol,riccati_tol);
|
|
end
|
|
end
|
|
else
|
|
LIK = ...
|
|
missing_observations_kalman_filter(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol, ...
|
|
data_index,number_of_observations,no_more_missing_observations);
|
|
end
|
|
if isinf(LIK)
|
|
kalman_algo = 2;
|
|
end
|
|
end
|
|
if (kalman_algo==2)% Univariate Kalman Filter
|
|
no_correlation_flag = 1;
|
|
if isequal(H,0)
|
|
H = zeros(nobs,1);
|
|
else
|
|
if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
|
|
H = diag(H);
|
|
else
|
|
no_correlation_flag = 0;
|
|
end
|
|
end
|
|
if no_correlation_flag
|
|
LIK = univariate_kalman_filter(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol,data_index,number_of_observations,no_more_missing_observations);
|
|
else
|
|
LIK = univariate_kalman_filter_corr(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol,data_index,number_of_observations,no_more_missing_observations);
|
|
end
|
|
end
|
|
if (kalman_algo==3)% Multivariate Diffuse Kalman Filter
|
|
if no_missing_data_flag
|
|
LIK = diffuse_kalman_filter(ST,R1,Q,H,Pinf,Pstar,Y,start,Z,kalman_tol, ...
|
|
riccati_tol);
|
|
else
|
|
LIK = missing_observations_diffuse_kalman_filter(ST,R1,Q,H,Pinf, ...
|
|
Pstar,Y,start,Z,kalman_tol,riccati_tol,...
|
|
data_index,number_of_observations,...
|
|
no_more_missing_observations);
|
|
end
|
|
if isinf(LIK)
|
|
kalman_algo = 4;
|
|
end
|
|
end
|
|
if (kalman_algo==4)% Univariate Diffuse Kalman Filter
|
|
no_correlation_flag = 1;
|
|
if isequal(H,0)
|
|
H = zeros(nobs,1);
|
|
else
|
|
if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
|
|
H = diag(H);
|
|
else
|
|
no_correlation_flag = 0;
|
|
end
|
|
end
|
|
if no_correlation_flag
|
|
LIK = univariate_diffuse_kalman_filter(ST,R1,Q,H,Pinf,Pstar,Y, ...
|
|
start,Z,kalman_tol,riccati_tol,data_index,...
|
|
number_of_observations,no_more_missing_observations);
|
|
else
|
|
LIK = univariate_diffuse_kalman_filter_corr(ST,R1,Q,H,Pinf,Pstar, ...
|
|
Y,start,Z,kalman_tol,riccati_tol,...
|
|
data_index,number_of_observations,...
|
|
no_more_missing_observations);
|
|
end
|
|
end
|
|
if isnan(LIK)
|
|
cost_flag = 0;
|
|
return
|
|
end
|
|
if imag(LIK)~=0
|
|
likelihood = bayestopt_.penalty;
|
|
else
|
|
likelihood = LIK;
|
|
end
|
|
% ------------------------------------------------------------------------------
|
|
% Adds prior if necessary
|
|
% ------------------------------------------------------------------------------
|
|
lnprior = priordens(xparam1,bayestopt_.pshape,bayestopt_.p6,bayestopt_.p7,bayestopt_.p3,bayestopt_.p4);
|
|
fval = (likelihood-lnprior);
|
|
options_.kalman_algo = kalman_algo; |