updated in compliance with new version of dsgelikelihood.

git-svn-id: https://www.dynare.org/svn/dynare/dynare_v4@2306 ac1d8469-bf42-47a9-8791-bf33cf982152
time-shift
ratto 2008-12-12 07:39:03 +00:00
parent 299e68470d
commit 697edeb109
1 changed files with 191 additions and 98 deletions

View File

@ -1,55 +1,66 @@
function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend,data)
% function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend,data)
% Evaluates the likelihood at each observation and the marginal density of a dsge model
% used in the optimization algorithm number 5
function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood_hh(xparam1,gend,data,data_index,number_of_observations,no_more_missing_observations)
% function [fval,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend,data,data_index,number_of_observations,no_more_missing_observations)
% Evaluates the posterior kernel of a dsge model.
%
% INPUTS
% xparam1: vector of model parameters
% gend : scalar specifying the number of observations
% data : matrix of data
%
% xparam1 [double] vector of model parameters.
% gend [integer] scalar specifying the number of observations.
% data [double] matrix of data
% data_index [cell] cell of column vectors
% number_of_observations [integer]
% no_more_missing_observations [integer]
% OUTPUTS
% fval : value of the posterior kernel at xparam1
% llik : gives the density at each observation
% cost_flag : zero if the function returns a penalty, one otherwise
% fval : value of the posterior kernel at xparam1.
% cost_flag : zero if the function returns a penalty, one otherwise.
% ys : steady state of original endogenous variables
% trend_coeff :
% info : vector of informations about the penalty
% info : vector of informations about the penalty:
% 41: one (many) parameter(s) do(es) not satisfied the lower bound
% 42: one (many) parameter(s) do(es) not satisfied the upper bound
%
% SPECIAL REQUIREMENTS
% Adapted from dsgelikelihood.m
%
% copyright marco.ratto@jrc.it [13-03-2007]
%
% Copyright (C) 2004-2008 Dynare Team
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
global bayestopt_ estim_params_ options_ trend_coeff_ M_ oo_ xparam1_test
global bayestopt_ estim_params_ options_ trend_coeff_ M_ oo_
fval = [];
ys = [];
trend_coeff = [];
xparam1_test = xparam1;
llik = NaN;
cost_flag = 1;
nobs = size(options_.varobs,1);
%------------------------------------------------------------------------------
% 1. Get the structural parameters & define penalties
%------------------------------------------------------------------------------
if options_.mode_compute ~= 1 & any(xparam1 < bayestopt_.lb)
k = find(xparam1 < bayestopt_.lb);
fval = bayestopt_.penalty+sum((bayestopt_.lb(k)-xparam1(k)).^2);
llik=fval;
cost_flag = 0;
info = 41;
return;
k = find(xparam1 < bayestopt_.lb);
fval = bayestopt_.penalty+sum((bayestopt_.lb(k)-xparam1(k)).^2);
cost_flag = 0;
info = 41;
return;
end
if options_.mode_compute ~= 1 & any(xparam1 > bayestopt_.ub)
k = find(xparam1 > bayestopt_.ub);
fval = bayestopt_.penalty+sum((xparam1(k)-bayestopt_.ub(k)).^2);
llik=fval;
cost_flag = 0;
info = 42;
return;
k = find(xparam1 > bayestopt_.ub);
fval = bayestopt_.penalty+sum((xparam1(k)-bayestopt_.ub(k)).^2);
cost_flag = 0;
info = 42;
return;
end
Q = M_.Sigma_e;
H = M_.H;
@ -79,7 +90,6 @@ function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend
k = find(a < 0);
if k > 0
fval = bayestopt_.penalty+sum(-a(k));
llik=fval;
cost_flag = 0;
info = 43;
return
@ -100,7 +110,6 @@ function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend
k = find(a < 0);
if k > 0
fval = bayestopt_.penalty+sum(-a(k));
llik=fval;
cost_flag = 0;
info = 44;
return
@ -108,10 +117,9 @@ function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend
end
offset = offset+estim_params_.ncn;
end
M_.params(estim_params_.param_vals(:,1)) = xparam1(offset+1:end);
% for i=1:estim_params_.np
% M_.params(estim_params_.param_vals(i,1)) = xparam1(i+offset);
%end
if estim_params_.np > 0
M_.params(estim_params_.param_vals(:,1)) = xparam1(offset+1:end);
end
M_.Sigma_e = Q;
M_.H = H;
%------------------------------------------------------------------------------
@ -122,12 +130,10 @@ function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend
bayestopt_.restrict_aux);
if info(1) == 1 | info(1) == 2 | info(1) == 5
fval = bayestopt_.penalty+1;
llik=fval;
cost_flag = 0;
return
elseif info(1) == 3 | info(1) == 4 | info(1) == 20
fval = bayestopt_.penalty+info(2)^2;
llik=fval;
fval = bayestopt_.penalty+info(2);%^2; % penalty power raised in DR1.m and resol already. GP July'08
cost_flag = 0;
return
end
@ -156,81 +162,168 @@ function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend
start = options_.presample+1;
np = size(T,1);
mf = bayestopt_.mf;
no_missing_data_flag = (number_of_observations==gend*nobs);
%------------------------------------------------------------------------------
% 3. Initial condition of the Kalman filter
%------------------------------------------------------------------------------
kalman_algo = options_.kalman_algo;
if options_.lik_init == 1 % Kalman filter
Pstar = lyapunov_symm(T,R*Q*R',options_.qz_criterium);
Pinf = [];
elseif options_.lik_init == 2 % Old Diffuse Kalman filter
Pstar = 10*eye(np);
Pinf = [];
elseif options_.lik_init == 3 % Diffuse Kalman filter
Pstar = zeros(np,np);
ivs = bayestopt_.restrict_var_list_stationary;
ivd = bayestopt_.restrict_var_list_nonstationary;
RR=T(:,bayestopt_.restrict_var_list_nonstationary);
i=find(abs(RR)>1.e-10);
R0=zeros(size(RR));
R0(i)=sign(RR(i));
Pinf=R0*R0';
T0 = T;
R1 = R;
for j=1:size(T,1),
for i=1:length(ivd),
T0(j,:) = T0(j,:)-RR(j,i).*T(ivd(i),:);
R1(j,:) = R1(j,:)-RR(j,i).*R(ivd(i),:);
if kalman_algo ~= 2
kalman_algo = 1;
end
Pstar = lyapunov_symm(T,R*Q*R',options_.qz_criterium);
Pinf = [];
elseif options_.lik_init == 2 % Old Diffuse Kalman filter
if kalman_algo ~= 2
kalman_algo = 1;
end
Pstar = 10*eye(np);
Pinf = [];
elseif options_.lik_init == 3 % Diffuse Kalman filter
if kalman_algo ~= 4
kalman_algo = 3;
end
[QT,ST] = schur(T);
if exist('OCTAVE_VERSION') || matlab_ver_less_than('7.0.1')
e1 = abs(my_ordeig(ST)) > 2-options_.qz_criterium;
else
e1 = abs(ordeig(ST)) > 2-options_.qz_criterium;
end
[QT,ST] = ordschur(QT,ST,e1);
if exist('OCTAVE_VERSION') || matlab_ver_less_than('7.0.1')
k = find(abs(my_ordeig(ST)) > 2-options_.qz_criterium);
else
k = find(abs(ordeig(ST)) > 2-options_.qz_criterium);
end
nk = length(k);
nk1 = nk+1;
Pinf = zeros(np,np);
Pinf(1:nk,1:nk) = eye(nk);
Pstar = zeros(np,np);
B = QT'*R*Q*R'*QT;
for i=np:-1:nk+2
if ST(i,i-1) == 0
if i == np
c = zeros(np-nk,1);
else
c = ST(nk1:i,:)*(Pstar(:,i+1:end)*ST(i,i+1:end)')+...
ST(i,i)*ST(nk1:i,i+1:end)*Pstar(i+1:end,i);
end
q = eye(i-nk)-ST(nk1:i,nk1:i)*ST(i,i);
Pstar(nk1:i,i) = q\(B(nk1:i,i)+c);
Pstar(i,nk1:i-1) = Pstar(nk1:i-1,i)';
else
if i == np
c = zeros(np-nk,1);
c1 = zeros(np-nk,1);
else
c = ST(nk1:i,:)*(Pstar(:,i+1:end)*ST(i,i+1:end)')+...
ST(i,i)*ST(nk1:i,i+1:end)*Pstar(i+1:end,i)+...
ST(i,i-1)*ST(nk1:i,i+1:end)*Pstar(i+1:end,i-1);
c1 = ST(nk1:i,:)*(Pstar(:,i+1:end)*ST(i-1,i+1:end)')+...
ST(i-1,i-1)*ST(nk1:i,i+1:end)*Pstar(i+1:end,i-1)+...
ST(i-1,i)*ST(nk1:i,i+1:end)*Pstar(i+1:end,i);
end
q = [eye(i-nk)-ST(nk1:i,nk1:i)*ST(i,i) -ST(nk1:i,nk1:i)*ST(i,i-1);...
-ST(nk1:i,nk1:i)*ST(i-1,i) eye(i-nk)-ST(nk1:i,nk1:i)*ST(i-1,i-1)];
z = q\[B(nk1:i,i)+c;B(nk1:i,i-1)+c1];
Pstar(nk1:i,i) = z(1:(i-nk));
Pstar(nk1:i,i-1) = z(i-nk+1:end);
Pstar(i,nk1:i-1) = Pstar(nk1:i-1,i)';
Pstar(i-1,nk1:i-2) = Pstar(nk1:i-2,i-1)';
i = i - 1;
end
end
if i == nk+2
c = ST(nk+1,:)*(Pstar(:,nk+2:end)*ST(nk1,nk+2:end)')+ST(nk1,nk1)*ST(nk1,nk+2:end)*Pstar(nk+2:end,nk1);
Pstar(nk1,nk1)=(B(nk1,nk1)+c)/(1-ST(nk1,nk1)*ST(nk1,nk1));
end
Z = QT(mf,:);
R1 = QT'*R;
[QQ,RR,EE] = qr(Z*ST(:,1:nk),0);
k = find(abs(diag(RR)) < 1e-8);
if length(k) > 0
k1 = EE(:,k);
dd =ones(nk,1);
dd(k1) = zeros(length(k1),1);
Pinf(1:nk,1:nk) = diag(dd);
end
end
Pstar = lyapunov_symm(T0,R1*Q*R1',options_.qz_criterium);
end
if kalman_algo == 2
no_correlation_flag = 1;
if length(H)==1
H = zeros(nobs,1);
else
if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
H = diag(H);
else
no_correlation_flag = 1;
end
end
end
kalman_tol = options_.kalman_tol;
riccati_tol = options_.riccati_tol;
mf = bayestopt_.mf1;
Y = data-trend;
%------------------------------------------------------------------------------
% 4. Likelihood evaluation
%------------------------------------------------------------------------------
if any(any(H ~= 0)) % should be replaced by a flag
if options_.kalman_algo == 1
[LIK, lik] =DiffuseLikelihoodH1(T,R,Q,H,Pinf,Pstar,data,trend,start);
if isinf(LIK) & ~estim_params_.ncn %% The univariate approach considered here doesn't
%% apply when H has some off-diagonal elements.
[LIK, lik] =DiffuseLikelihoodH3(T,R,Q,H,Pinf,Pstar,data,trend,start);
elseif isinf(LIK) & estim_params_.ncn
[LIK, lik] =DiffuseLikelihoodH3corr(T,R,Q,H,Pinf,Pstar,data,trend,start);
end
elseif options_.kalman_algo == 3
if ~estim_params_.ncn %% The univariate approach considered here doesn't
%% apply when H has some off-diagonal elements.
[LIK, lik] =DiffuseLikelihoodH3(T,R,Q,H,Pinf,Pstar,data,trend,start);
if (kalman_algo==1)% Multivariate Kalman Filter
if no_missing_data_flag
[LIK, lik] = kalman_filter(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol);
else
[LIK, lik] =DiffuseLikelihoodH3corr(T,R,Q,H,Pinf,Pstar,data,trend,start);
end
end
else
if options_.kalman_algo == 1
%nv = size(bayestopt_.Z,1);
%LIK = kalman_filter(bayestopt_.Z,zeros(nv,nv),T,R,Q,data,zeros(size(T,1),1),Pstar,'u');
[LIK, lik] =DiffuseLikelihood1(T,R,Q,Pinf,Pstar,data,trend,start);
% LIK = diffuse_likelihood1(T,R,Q,Pinf,Pstar,data-trend,start);
%if abs(LIK1-LIK)>0.0000000001
% disp(['LIK1 and LIK are not equal! ' num2str(abs(LIK1-LIK))])
%end
if isinf(LIK)
[LIK, lik] =DiffuseLikelihood3(T,R,Q,Pinf,Pstar,data,trend,start);
[LIK, lik] = ...
missing_observations_kalman_filter(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol, ...
data_index,number_of_observations,no_more_missing_observations);
end
if isinf(LIK)
kalman_algo = 2;
end
end
if (kalman_algo==2)% Univariate Kalman Filter
if no_correlation_flag
[LIK, lik] = univariate_kalman_filter(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol,data_index,number_of_observations,no_more_missing_observations);
else
[LIK, lik] = univariate_kalman_filter_corr(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol,data_index,number_of_observations,no_more_missing_observations);
end
end
if (kalman_algo==3)% Multivariate Diffuse Kalman Filter
if no_missing_data_flag
data1 = data - trend;
if any(any(H ~= 0))
[LIK, lik] = DiffuseLikelihoodH1_Z(ST,Z,R1,Q,H,Pinf,Pstar,data1,start);
else
[LIK, lik] = DiffuseLikelihood1_Z(ST,Z,R1,Q,Pinf,Pstar,data1,start);
end
if isinf(LIK)
kalman_algo = 4;
end
else
error(['The diffuse filter is not yet implemented for models with missing observations'])
end
end
if (kalman_algo==4)% Univariate Diffuse Kalman Filter
data1 = data - trend;
if any(any(H ~= 0))
if ~estim_params_.ncn
[LIK, lik] = DiffuseLikelihoodH3_Z(ST,Z,R1,Q,H,Pinf,Pstar,data1,trend,start);
else
[LIK, lik] = DiffuseLikelihoodH3corr_Z(ST,Z,R1,Q,H,Pinf,Pstar,data1,trend,start);
end
else
[LIK, lik] = DiffuseLikelihood3_Z(ST,Z,R1,Q,Pinf,Pstar,data1,start);
end
elseif options_.kalman_algo == 3
[LIK, lik] =DiffuseLikelihood3(T,R,Q,Pinf,Pstar,data,trend,start);
end
end
if imag(LIK) ~= 0
likelihood = bayestopt_.penalty;
lik=ones(size(lik)).*bayestopt_.penalty;
likelihood = bayestopt_.penalty;
else
likelihood = LIK;
likelihood = LIK;
end
% ------------------------------------------------------------------------------
% Adds prior if necessary
% ------------------------------------------------------------------------------
lnprior = priordens(xparam1,bayestopt_.pshape,bayestopt_.p1,bayestopt_.p2,bayestopt_.p3,bayestopt_.p4);
fval = (likelihood-lnprior);
options_.kalman_algo = kalman_algo;
llik=[-lnprior; .5*lik(start:end)];