diff --git a/matlab/particle/DsgeLikelihood.m b/matlab/particle/DsgeLikelihood.m new file mode 100644 index 000000000..8e033d7b8 --- /dev/null +++ b/matlab/particle/DsgeLikelihood.m @@ -0,0 +1,265 @@ +function [fval,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend,data,data_index,number_of_observations,no_more_missing_observations) +% function [fval,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend,data,data_index,number_of_observations,no_more_missing_observations) +% Evaluates the posterior kernel of a dsge model. +% +% INPUTS +% xparam1 [double] vector of model parameters. +% gend [integer] scalar specifying the number of observations. +% data [double] matrix of data +% data_index [cell] cell of column vectors +% number_of_observations [integer] +% no_more_missing_observations [integer] +% OUTPUTS +% fval : value of the posterior kernel at xparam1. +% cost_flag : zero if the function returns a penalty, one otherwise. +% ys : steady state of original endogenous variables +% trend_coeff : +% info : vector of informations about the penalty: +% 41: one (many) parameter(s) do(es) not satisfied the lower bound +% 42: one (many) parameter(s) do(es) not satisfied the upper bound +% +% SPECIAL REQUIREMENTS +% + +% Copyright (C) 2004-2009 Dynare Team +% +% This file is part of Dynare. +% +% Dynare is free software: you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation, either version 3 of the License, or +% (at your option) any later version. +% +% Dynare is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +% GNU General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with Dynare. If not, see . + + global bayestopt_ estim_params_ options_ trend_coeff_ M_ oo_ + fval = []; + ys = []; + trend_coeff = []; + cost_flag = 1; + nobs = size(options_.varobs,1); + %------------------------------------------------------------------------------ + % 1. Get the structural parameters & define penalties + %------------------------------------------------------------------------------ + if options_.mode_compute ~= 1 & any(xparam1 < bayestopt_.lb) + k = find(xparam1 < bayestopt_.lb); + fval = bayestopt_.penalty+sum((bayestopt_.lb(k)-xparam1(k)).^2); + cost_flag = 0; + info = 41; + return; + end + if options_.mode_compute ~= 1 & any(xparam1 > bayestopt_.ub) + k = find(xparam1 > bayestopt_.ub); + fval = bayestopt_.penalty+sum((xparam1(k)-bayestopt_.ub(k)).^2); + cost_flag = 0; + info = 42; + return; + end + Q = M_.Sigma_e; + H = M_.H; + for i=1:estim_params_.nvx + k =estim_params_.var_exo(i,1); + Q(k,k) = xparam1(i)*xparam1(i); + end + offset = estim_params_.nvx; + if estim_params_.nvn + for i=1:estim_params_.nvn + k = estim_params_.var_endo(i,1); + H(k,k) = xparam1(i+offset)*xparam1(i+offset); + end + offset = offset+estim_params_.nvn; + end + if estim_params_.ncx + for i=1:estim_params_.ncx + k1 =estim_params_.corrx(i,1); + k2 =estim_params_.corrx(i,2); + Q(k1,k2) = xparam1(i+offset)*sqrt(Q(k1,k1)*Q(k2,k2)); + Q(k2,k1) = Q(k1,k2); + end + [CholQ,testQ] = chol(Q); + if testQ %% The variance-covariance matrix of the structural innovations is not definite positive. + %% We have to compute the eigenvalues of this matrix in order to build the penalty. + a = diag(eig(Q)); + k = find(a < 0); + if k > 0 + fval = bayestopt_.penalty+sum(-a(k)); + cost_flag = 0; + info = 43; + return + end + end + offset = offset+estim_params_.ncx; + end + if estim_params_.ncn + for i=1:estim_params_.ncn + k1 = options_.lgyidx2varobs(estim_params_.corrn(i,1)); + k2 = options_.lgyidx2varobs(estim_params_.corrn(i,2)); + H(k1,k2) = xparam1(i+offset)*sqrt(H(k1,k1)*H(k2,k2)); + H(k2,k1) = H(k1,k2); + end + [CholH,testH] = chol(H); + if testH + a = diag(eig(H)); + k = find(a < 0); + if k > 0 + fval = bayestopt_.penalty+sum(-a(k)); + cost_flag = 0; + info = 44; + return + end + end + offset = offset+estim_params_.ncn; + end + if estim_params_.np > 0 + M_.params(estim_params_.param_vals(:,1)) = xparam1(offset+1:end); + end + M_.Sigma_e = Q; + M_.H = H; + %------------------------------------------------------------------------------ + % 2. call model setup & reduction program + %------------------------------------------------------------------------------ + options_.order = 2;%%% 'cause we use a non linear filter here... + [T,R,SteadyState,info] = dynare_resolve(bayestopt_.restrict_var_list,... + bayestopt_.restrict_columns,... + bayestopt_.restrict_aux); + if info(1) == 1 || info(1) == 2 || info(1) == 5 + fval = bayestopt_.penalty+1; + cost_flag = 0; + return + elseif info(1) == 3 || info(1) == 4 || info(1)==6 ||info(1) == 19 || info(1) == 20 || info(1) == 21 + fval = bayestopt_.penalty+info(2); + cost_flag = 0; + return + end + bayestopt_.mf = bayestopt_.mf1; + if options_.noconstant + constant = zeros(nobs,1); + else + if options_.loglinear + constant = log(SteadyState(bayestopt_.mfys)); + else + constant = SteadyState(bayestopt_.mfys); + end + end + if bayestopt_.with_trend + trend_coeff = zeros(nobs,1); + t = options_.trend_coeffs; + for i=1:length(t) + if ~isempty(t{i}) + trend_coeff(i) = evalin('base',t{i}); + end + end + trend = repmat(constant,1,gend)+trend_coeff*[1:gend]; + else + trend = repmat(constant,1,gend); + end + start = options_.presample+1; + np = size(T,1); + mf = bayestopt_.mf; + no_missing_data_flag = (number_of_observations==gend*nobs); + %------------------------------------------------------------------------------ + % 3. Initial condition of the Kalman filter + %------------------------------------------------------------------------------ + kalman_algo = options_.kalman_algo; + if options_.lik_init == 1 % Kalman filter + if kalman_algo ~= 2 + kalman_algo = 1; + end + Pstar = lyapunov_symm(T,R*Q*R',options_.qz_criterium,options_.lyapunov_complex_threshold); + Pinf = []; + elseif options_.lik_init == 2 % Old Diffuse Kalman filter + if kalman_algo ~= 2 + kalman_algo = 1; + end + Pstar = options_.Harvey_scale_factor*eye(np); + Pinf = []; + elseif options_.lik_init == 3 % Diffuse Kalman filter + if kalman_algo ~= 4 + kalman_algo = 3; + end + [QT,ST] = schur(T); + e1 = abs(ordeig(ST)) > 2-options_.qz_criterium; + [QT,ST] = ordschur(QT,ST,e1); + k = find(abs(ordeig(ST)) > 2-options_.qz_criterium); + nk = length(k); + nk1 = nk+1; + Pinf = zeros(np,np); + Pinf(1:nk,1:nk) = eye(nk); + Pstar = zeros(np,np); + B = QT'*R*Q*R'*QT; + for i=np:-1:nk+2 + if ST(i,i-1) == 0 + if i == np + c = zeros(np-nk,1); + else + c = ST(nk1:i,:)*(Pstar(:,i+1:end)*ST(i,i+1:end)')+... + ST(i,i)*ST(nk1:i,i+1:end)*Pstar(i+1:end,i); + end + q = eye(i-nk)-ST(nk1:i,nk1:i)*ST(i,i); + Pstar(nk1:i,i) = q\(B(nk1:i,i)+c); + Pstar(i,nk1:i-1) = Pstar(nk1:i-1,i)'; + else + if i == np + c = zeros(np-nk,1); + c1 = zeros(np-nk,1); + else + c = ST(nk1:i,:)*(Pstar(:,i+1:end)*ST(i,i+1:end)')+... + ST(i,i)*ST(nk1:i,i+1:end)*Pstar(i+1:end,i)+... + ST(i,i-1)*ST(nk1:i,i+1:end)*Pstar(i+1:end,i-1); + c1 = ST(nk1:i,:)*(Pstar(:,i+1:end)*ST(i-1,i+1:end)')+... + ST(i-1,i-1)*ST(nk1:i,i+1:end)*Pstar(i+1:end,i-1)+... + ST(i-1,i)*ST(nk1:i,i+1:end)*Pstar(i+1:end,i); + end + q = [eye(i-nk)-ST(nk1:i,nk1:i)*ST(i,i) -ST(nk1:i,nk1:i)*ST(i,i-1);... + -ST(nk1:i,nk1:i)*ST(i-1,i) eye(i-nk)-ST(nk1:i,nk1:i)*ST(i-1,i-1)]; + z = q\[B(nk1:i,i)+c;B(nk1:i,i-1)+c1]; + Pstar(nk1:i,i) = z(1:(i-nk)); + Pstar(nk1:i,i-1) = z(i-nk+1:end); + Pstar(i,nk1:i-1) = Pstar(nk1:i-1,i)'; + Pstar(i-1,nk1:i-2) = Pstar(nk1:i-2,i-1)'; + i = i - 1; + end + end + if i == nk+2 + c = ST(nk+1,:)*(Pstar(:,nk+2:end)*ST(nk1,nk+2:end)')+ST(nk1,nk1)*ST(nk1,nk+2:end)*Pstar(nk+2:end,nk1); + Pstar(nk1,nk1)=(B(nk1,nk1)+c)/(1-ST(nk1,nk1)*ST(nk1,nk1)); + end + Z = QT(mf,:); + R1 = QT'*R; + [QQ,RR,EE] = qr(Z*ST(:,1:nk),0); + k = find(abs(diag([RR; zeros(nk-size(Z,1),size(RR,2))])) < 1e-8); + if length(k) > 0 + k1 = EE(:,k); + dd =ones(nk,1); + dd(k1) = zeros(length(k1),1); + Pinf(1:nk,1:nk) = diag(dd); + end + end + if kalman_algo == 2 + end + kalman_tol = options_.kalman_tol; + riccati_tol = options_.riccati_tol; + mf = bayestopt_.mf1; + Y = data-trend; + %------------------------------------------------------------------------------ + % 4. Likelihood evaluation + %------------------------------------------------------------------------------ + rfm.state.dr = oo_.dr; + rfm.state.Q = Q; + rfm.measurement.H = H; + number_of_particles = 10; + + LIK = gaussian_particle_filter(rfm,Y,start,mf,number_of_particles); + + + % ------------------------------------------------------------------------------ + % Adds prior if necessary + % ------------------------------------------------------------------------------ + lnprior = priordens(xparam1,bayestopt_.pshape,bayestopt_.p6,bayestopt_.p7,bayestopt_.p3,bayestopt_.p4); + fval = (LIK-lnprior); \ No newline at end of file diff --git a/matlab/particle/simult_.m b/matlab/particle/simult_.m new file mode 100644 index 000000000..9b7ad8f9e --- /dev/null +++ b/matlab/particle/simult_.m @@ -0,0 +1,90 @@ +function y_=simult_(y0,dr,ex_,iorder) +% function y_=simult_(y0,dr,ex_,iorder) +% +% Simulates the model, given the path of exogenous variables and the +% decision rules. +% +% INPUTS +% y0: starting values +% dr: structure of decisions rules for stochastic simulations +% ex_: matrix of shocks +% iorder=0: first-order approximation +% iorder=1: second-order approximation +% +% OUTPUTS +% y_: stochastic simulations results +% +% SPECIAL REQUIREMENTS +% none + +% Copyright (C) 2001-2007 Dynare Team +% +% This file is part of Dynare. +% +% Dynare is free software: you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation, either version 3 of the License, or +% (at your option) any later version. +% +% Dynare is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +% GNU General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with Dynare. If not, see . + +global M_ options_ it_ + iter = size(ex_,1); + if ~isempty(dr.ghu) + nx = size(dr.ghu,2); + end + y_ = zeros(size(y0,1),iter+M_.maximum_lag); + + y_(:,1:M_.maximum_lag) = y0; + k1 = [M_.maximum_lag:-1:1]; + k2 = dr.kstate(find(dr.kstate(:,2) <= M_.maximum_lag+1),[1 2]); + k2 = k2(:,1)+(M_.maximum_lag+1-k2(:,2))*M_.endo_nbr; + k3 = M_.lead_lag_incidence(1:M_.maximum_lag,:)'; + k3 = find(k3(:)); + k4 = dr.kstate(find(dr.kstate(:,2) < M_.maximum_lag+1),[1 2]); + k4 = k4(:,1)+(M_.maximum_lag+1-k4(:,2))*M_.endo_nbr; + + if iorder == 1 + if ~isempty(dr.ghu) + for i = M_.maximum_lag+1: iter+M_.maximum_lag + tempx1 = y_(dr.order_var,k1); + tempx2 = tempx1-repmat(dr.ys(dr.order_var),1,M_.maximum_lag); + tempx = tempx2(k2); + y_(dr.order_var,i) = dr.ys(dr.order_var)+dr.ghx*tempx+dr.ghu* ... + ex_(i-M_.maximum_lag,:)'; + k1 = k1+1; + end + else + for i = M_.maximum_lag+1: iter+M_.maximum_lag + tempx1 = y_(dr.order_var,k1); + tempx2 = tempx1-repmat(dr.ys(dr.order_var),1,M_.maximum_lag); + tempx = tempx2(k2); + y_(dr.order_var,i) = dr.ys(dr.order_var)+dr.ghx*tempx; + k1 = k1+1; + end + end + elseif iorder == 2 + for i = M_.maximum_lag+1: iter+M_.maximum_lag + tempx1 = y_(dr.order_var,k1); + tempx2 = tempx1-repmat(dr.ys(dr.order_var),1,M_.maximum_lag); + tempx = tempx2(k2); + tempu = ex_(i-M_.maximum_lag,:)'; + %tempuu = kron(tempu,tempu); + % tempxx = kron(tempx,tempx); + % tempxu = kron(tempx,tempu); + %y_(dr.order_var,i) = dr.ys(dr.order_var)+dr.ghs2/2+dr.ghx*tempx+ ... + % dr.ghu*tempu+0.5*(dr.ghxx*tempxx+dr.ghuu*tempuu)+dr.ghxu* ... + % tempxu; + y_(dr.order_var,i) = dr.ys(dr.order_var)+dr.ghs2/2+dr.ghx*tempx+ ... + dr.ghu*tempu+0.5*(A_times_B_kronecker_C(dr.ghxx,tempx)+A_times_B_kronecker_C(dr.ghuu,tempu))+A_times_B_kronecker_C(dr.ghxu,tempx,tempu); + k1 = k1+1; + end + end + +% MJ 08/30/02 corrected bug at order 2 \ No newline at end of file