Merge branch 'master' of kirikou.dynare.org:/srv/d_kirikou/git/dynare

time-shift
Stéphane Adjemian (Charybdis) 2010-05-11 22:40:44 +02:00
commit e69903f4ac
31 changed files with 808 additions and 447 deletions

View File

@ -53,7 +53,7 @@ elseif k <= (nvx+nvn)
vname = deblank(options_.varobs(estim_params_.var_endo(k-estim_params_.nvx,1),:));
nam=['SE_EOBS_',vname];
if TeX
tname = deblank(options_.TeX_varobs(estim_params_.var_endo(k-estim_params_.nvx,1),:));
tname = deblank(M_.endo_names_tex(estim_params_.var_endo(k-estim_params_.nvx,1),:));
texnam = ['$ SE_{' tname '} $'];
end
elseif k <= (nvx+nvn+ncx)

View File

@ -1,120 +1,124 @@
function dr=mult_elimination(varlist,M_, options_, oo_)
% function mult_elimination()
% replaces Lagrange multipliers in Ramsey policy by lagged value of state
% and shock variables
%
% INPUT
% none
%
% OUTPUT
% dr: a structure with the new decision rule
%
% SPECIAL REQUIREMENTS
% none
% Copyright (C) 2003-2008 Dynare Team
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
dr = oo_.dr;
nstatic = dr.nstatic;
npred = dr.npred;
order_var = dr.order_var;
nstates = M_.endo_names(order_var(nstatic+(1:npred)),:);
il = strmatch('mult_',nstates);
nil = setdiff(1:dr.npred,il);
m_nbr = length(il);
nm_nbr = length(nil);
AA1 = dr.ghx(:,nil);
AA2 = dr.ghx(:,il);
A1 = dr.ghx(nstatic+(1:npred),nil);
A2 = dr.ghx(nstatic+(1:npred),il);
B = dr.ghu(nstatic+(1:npred),:);
A11 = A1(nil,:);
A21 = A1(il,:);
A12 = A2(nil,:);
A22 = A2(il,:);
[Q1,R1,E1] = qr(A2);
n1 = sum(abs(diag(R1)) > 1e-8);
Q1_12 = Q1(1:nm_nbr,n1+1:end);
Q1_22 = Q1(nm_nbr+1:end,n1+1:end);
[Q2,R2,E2] = qr(Q1_22');
n2 = sum(abs(diag(R2)) > 1e-8);
R2_1 = inv(R2(1:n2,1:n2));
M1(order_var,:) = AA1 - AA2*E2*[R2_1*Q2(:,1:n2)'*Q1_12'; zeros(m_nbr-n2,nm_nbr)];
M2(order_var,:) = AA2*E2*[R2_1*Q2(:,1:n2)'*[Q1_12' Q1_22']*A1; zeros(m_nbr-n2,length(nil))];
M3(order_var,:) = dr.ghu;
M4(order_var,:) = AA2*E2*[R2_1*Q2(:,1:n2)'*[Q1_12' Q1_22']*B; zeros(m_nbr-n2,size(B,2))];
endo_nbr = M_.orig_model.endo_nbr;
exo_nbr = M_.exo_nbr;
lead_lag_incidence = M_.lead_lag_incidence(:,1:endo_nbr+exo_nbr);
lead_lag_incidence1 = lead_lag_incidence(1,:) > 0;
maximum_lag = M_.maximum_lag;
for i=1:maximum_lag-1
lead_lag_incidence1 = [lead_lag_incidence1; lead_lag_incidence(i,:)| ...
lead_lag_incidence(i+1,:)];
end
lead_lag_incidence1 = [lead_lag_incidence1; ...
lead_lag_incidence(M_.maximum_lag,:) > 0];
k = find(lead_lag_incidence1');
lead_lag_incidence1 = zeros(size(lead_lag_incidence1'));
lead_lag_incidence1(k) = 1:length(k);
lead_lag_incidence1 = lead_lag_incidence1';
kstate = zeros(0,2);
for i=maximum_lag:-1:1
k = find(lead_lag_incidence(i,:));
kstate = [kstate; [k' repmat(i+1,length(k),1)]];
end
dr.M1 = M1;
dr.M2 = M2;
dr.M3 = M3;
dr.M4 = M4;
nvar = length(varlist);
nspred = dr.nspred;
nspred = 6;
if nvar > 0
res_table = zeros(2*(nspred+M_.exo_nbr),nvar);
headers = 'Variables';
for i=1:length(varlist)
k = strmatch(varlist{i},M_.endo_names(dr.order_var,:),'exact');
headers = strvcat(headers,varlist{i});
res_table(1:nspred,i) = M1(k,:)';
res_table(nspred+(1:nspred),i) = M2(k,:)';
res_table(2*nspred+(1:M_.exo_nbr),i) = M3(k,:)';
res_table(2*nspred+M_.exo_nbr+(1:M_.exo_nbr),i) = M4(k,:)';
end
my_title='ELIMINATION OF THE MULTIPLIERS';
lab1 = M_.endo_names(dr.order_var(dr.nstatic+[ 1 2 5:8]),:);
labels = strvcat(lab1,lab1,M_.exo_names,M_.exo_names);
lh = size(labels,2)+2;
dyntable(my_title,headers,labels,res_table,lh,10,6);
disp(' ')
end
function dr=mult_elimination(varlist,M_, options_, oo_)
% function mult_elimination()
% replaces Lagrange multipliers in Ramsey policy by lagged value of state
% and shock variables
%
% INPUT
% none
%
% OUTPUT
% dr: a structure with the new decision rule
%
% SPECIAL REQUIREMENTS
% none
% Copyright (C) 2003-2010 Dynare Team
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
dr = oo_.dr;
nstatic = dr.nstatic;
npred = dr.npred;
order_var = dr.order_var;
nstates = M_.endo_names(order_var(nstatic+(1:npred)),:);
il = strmatch('mult_',nstates);
nil = setdiff(1:dr.npred,il);
m_nbr = length(il);
nm_nbr = length(nil);
AA1 = dr.ghx(:,nil);
AA2 = dr.ghx(:,il);
A1 = dr.ghx(nstatic+(1:npred),nil);
A2 = dr.ghx(nstatic+(1:npred),il);
B = dr.ghu(nstatic+(1:npred),:);
A11 = A1(nil,:);
A21 = A1(il,:);
A12 = A2(nil,:);
A22 = A2(il,:);
B1 = B(nil,:);
B2 = B(il,:);
[Q1,R1,E1] = qr([A12; A22]);
n1 = sum(abs(diag(R1)) > 1e-8);
Q1_12 = Q1(1:nm_nbr,n1+1:end);
Q1_22 = Q1(nm_nbr+(1:m_nbr),n1+1:end);
[Q2,R2,E2] = qr(Q1_22');
n2 = sum(abs(diag(R2)) > 1e-8);
R2_1 = inv(R2(1:n2,1:n2));
M1 = AA1 - AA2*E2*[R2_1*Q2(:,1:n2)'*Q1_12'; zeros(m_nbr-n2,nm_nbr)];
M2 = AA2*E2*[R2_1*Q2(:,1:n2)'*[Q1_12' Q1_22']*[A11;A21]; zeros(m_nbr-n2,length(nil))];
M3 = dr.ghu;
M4 = AA2*E2*[R2_1*Q2(:,1:n2)'*[Q1_12' Q1_22']*[B1;B2]; zeros(m_nbr-n2,size(B,2))];
k1 = nstatic+(1:npred);
k1 = k1(nil);
endo_nbr = M_.orig_model.endo_nbr;
exo_nbr = M_.exo_nbr;
lead_lag_incidence = M_.lead_lag_incidence(:,1:endo_nbr+exo_nbr);
lead_lag_incidence1 = lead_lag_incidence(1,:) > 0;
maximum_lag = M_.maximum_lag;
for i=1:maximum_lag-1
lead_lag_incidence1 = [lead_lag_incidence1; lead_lag_incidence(i,:)| ...
lead_lag_incidence(i+1,:)];
end
lead_lag_incidence1 = [lead_lag_incidence1; ...
lead_lag_incidence(M_.maximum_lag,:) > 0];
k = find(lead_lag_incidence1');
lead_lag_incidence1 = zeros(size(lead_lag_incidence1'));
lead_lag_incidence1(k) = 1:length(k);
lead_lag_incidence1 = lead_lag_incidence1';
kstate = zeros(0,2);
for i=maximum_lag:-1:1
k = find(lead_lag_incidence(i,:));
kstate = [kstate; [k' repmat(i+1,length(k),1)]];
end
dr.M1 = M1;
dr.M2 = M2;
dr.M3 = M3;
dr.M4 = M4;
nvar = length(varlist);
nspred = dr.nspred;
nspred = 6;
if nvar > 0
res_table = zeros(2*(nspred+M_.exo_nbr),nvar);
headers = 'Variables';
for i=1:length(varlist)
k = strmatch(varlist{i},M_.endo_names(dr.order_var,:),'exact');
headers = strvcat(headers,varlist{i});
res_table(1:nspred,i) = M1(k,:)';
res_table(nspred+(1:nspred),i) = M2(k,:)';
res_table(2*nspred+(1:M_.exo_nbr),i) = M3(k,:)';
res_table(2*nspred+M_.exo_nbr+(1:M_.exo_nbr),i) = M4(k,:)';
end
my_title='ELIMINATION OF THE MULTIPLIERS';
lab1 = M_.endo_names(dr.order_var(dr.nstatic+[ 1 2 5:8]),:);
labels = strvcat(lab1,lab1,M_.exo_names,M_.exo_names);
lh = size(labels,2)+2;
dyntable(my_title,headers,labels,res_table,lh,10,6);
disp(' ')
end

View File

@ -31,7 +31,7 @@ DetrendData::DetrendData(const bool INlogLinear) //, Vector& INtrendCoeff)
};
void
DetrendData::detrend(const Vector &SteadyState, const MatrixView &dataView, Matrix &Y)
DetrendData::detrend(const VectorView &SteadyState, const MatrixConstView &dataView, Matrix &Y)
{
// dummy
Y=dataView;

View File

@ -33,7 +33,7 @@ class DetrendData
public:
virtual ~DetrendData(){};
DetrendData(const bool logLinear); // add later Vector& trendCoeff);
void detrend(const Vector &SteadyState, const MatrixView &dataView, Matrix &Y);
void detrend(const VectorView &SteadyState, const MatrixConstView &dataView, Matrix &Y);
private:
const bool logLinear;

View File

@ -0,0 +1,40 @@
/*
* Copyright (C) 2009-2010 Dynare Team
*
* This file is part of Dynare.
*
* Dynare is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Dynare is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Dynare. If not, see <http://www.gnu.org/licenses/>.
*/
///////////////////////////////////////////////////////////
// EstimatedParameter.cpp
// Implementation of the Class EstimatedParameter
// Created on: 02-Feb-2010 13:06:35
///////////////////////////////////////////////////////////
#include "EstimatedParameter.hh"
EstimatedParameter::EstimatedParameter(const EstimatedParameter::pType type_arg,
size_t ID1_arg, size_t ID2_arg, const std::vector<size_t> &subSampleIDs_arg,
double lower_bound_arg, double upper_bound_arg, Prior prior_arg) :
ptype(type_arg), ID1(ID1_arg), ID2(ID2_arg),
lower_bound(lower_bound_arg), upper_bound(upper_bound_arg), prior(prior_arg),
subSampleIDs(subSampleIDs_arg)
{
}
EstimatedParameter::~EstimatedParameter()
{
}

View File

@ -27,18 +27,11 @@
#include "Prior.hh"
#include <cstdlib>
#include <vector>
struct EstimatedParameter
{
public:
EstimatedParameter(
size_t ID1, size_t ID2, size_t type, size_t startPeriod, size_t endPeriod,
double lower_boound, double upper_boound, Prior prior
);
virtual ~EstimatedParameter();
public:
const size_t ID1;
const size_t ID2;
// parameter types
enum pType
{
shock_SD = 1, // standard deviation of a structural shock
@ -47,9 +40,18 @@ public:
measureErr_Corr = 4, // correlation between two measurement errors
deepPar = 5 // deep parameter
};
const pType ptype;
const double lower_boound;
const double upper_boound;
EstimatedParameter(const EstimatedParameter::pType type,
size_t ID1, size_t ID2, const std::vector<size_t> &subSampleIDs,
double lower_bound, double upper_bound, Prior prior
);
virtual ~EstimatedParameter();
const enum pType ptype;
const size_t ID1;
const size_t ID2;
const double lower_bound;
const double upper_bound;
const Prior prior;
const std::vector<size_t> subSampleIDs;
};

View File

@ -0,0 +1,36 @@
/*
* Copyright (C) 2010 Dynare Team
*
* This file is part of Dynare.
*
* Dynare is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Dynare is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Dynare. If not, see <http://www.gnu.org/licenses/>.
*/
///////////////////////////////////////////////////////////
// EstimatedParametersDescription.cpp
// Implementation of the Class EstimatedParametersDescription
// Created on: 02-Feb-2010 13:06:47
///////////////////////////////////////////////////////////
#include "EstimatedParametersDescription.hh"
EstimatedParametersDescription::~EstimatedParametersDescription()
{
}
EstimatedParametersDescription::EstimatedParametersDescription(std::vector<EstimationSubsample> &INestSubsamples, std::vector<EstimatedParameter> &INestParams) :
estSubsamples(INestSubsamples), estParams(INestParams)
{
}

View File

@ -46,21 +46,9 @@ class EstimatedParametersDescription
{
public:
virtual ~EstimatedParametersDescription();
public:
EstimatedParametersDescription(size_t ncx, size_t ncn, size_t nvx, size_t nvn, size_t np,
std::vector<EstimationSubsample> &estSubsamples, std::vector<EstimatedParameter> &estParams);
//Vector getAllParams(int period);
//int getNumPeriods();
EstimationSubsample&getSubsample(int period);
std::vector<EstimationSubsample>&getAllSubsamples();
//EstimatedParameter& getParameter(size_t ID);
//private:
EstimatedParametersDescription(std::vector<EstimationSubsample> &estSubsamples, std::vector<EstimatedParameter> &estParams);
std::vector<EstimationSubsample> estSubsamples;
std::vector<EstimatedParameter> estParams;
//vector<Prior> prior;
};
#endif // !defined(E8F2C096_A301_42e8_80FF_A643291BF995__INCLUDED_)

View File

@ -1,44 +1,36 @@
/*
* Copyright (C) 2008-2009 Dynare Team
*
* This file is part of Dynare.
*
* Dynare is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Dynare is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Dynare. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef ESTIMATION_H
#define ESTIMATION_H
//#include "k_ord_dynare.h"
#include "kalman.h"
#include "math.h"
#include "disclyap_fast.h"
#include "Mexutils.h"
#include "estutils.h"
#include <cstring>
#include <cctype>
class DsgeLikelihood;//GeneralParams;
class Estimation
{
double logLikelihood;
//instance MexStruct
GeneralParams& gParams;
DsgeLikelihood& DsgeLik;
public:
double getLikelihood()
{return logLikelihood;};
};
#endif
/*
* Copyright (C) 2009-2010 Dynare Team
*
* This file is part of Dynare.
*
* Dynare is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Dynare is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Dynare. If not, see <http://www.gnu.org/licenses/>.
*/
///////////////////////////////////////////////////////////
// EstimationSubsample.cpp
// Implementation of the Class EstimationSubsample
// Created on: 02-Feb-2010 13:06:01
///////////////////////////////////////////////////////////
#include "EstimationSubsample.hh"
EstimationSubsample::EstimationSubsample(size_t INstartPeriod, size_t INendPeriod) :
startPeriod(INstartPeriod), endPeriod(INendPeriod)
{
}
EstimationSubsample::~EstimationSubsample()
{
}

View File

@ -33,46 +33,48 @@ InitializeKalmanFilter::~InitializeKalmanFilter()
InitializeKalmanFilter::InitializeKalmanFilter(const std::string &modName, size_t n_endo_arg, size_t n_exo_arg,
const std::vector<size_t> &zeta_fwrd_arg, const std::vector<size_t> &zeta_back_arg,
const std::vector<size_t> &zeta_mixed_arg, const std::vector<size_t> &zeta_static_arg,
double qz_criterium, const std::vector<size_t> &order_var_arg, const std::vector<size_t> &inv_order_var_arg,
const std::vector<size_t> &rivIN, const std::vector<size_t> &ricIN, double lyapunov_tolIN, int &info) :
riv(rivIN), inv_ric(ricIN.size()), order_var(order_var_arg), lyapunov_tol(lyapunov_tolIN),
const std::vector<size_t> &zeta_varobs_back_mixed_arg,
double qz_criterium_arg,
double lyapunov_tol_arg, int &info) :
lyapunov_tol(lyapunov_tol_arg),
zeta_varobs_back_mixed(zeta_varobs_back_mixed_arg),
detrendData(false), modelSolution(modName, n_endo_arg, n_exo_arg, zeta_fwrd_arg, zeta_back_arg,
zeta_mixed_arg, zeta_static_arg, qz_criterium), discLyapFast(riv.size()),
ghx(n_endo_arg, zeta_back_arg.size() + zeta_mixed_arg.size()),
ghx_raw(n_endo_arg, zeta_back_arg.size() + zeta_mixed_arg.size()),
ghu(n_endo_arg, n_exo_arg), ghu_raw(n_endo_arg, n_exo_arg),
Rt(n_exo_arg, riv.size()), RQ(riv.size(), n_exo_arg)
zeta_mixed_arg, zeta_static_arg, qz_criterium_arg),
discLyapFast(zeta_varobs_back_mixed.size()),
g_x(n_endo_arg, zeta_back_arg.size() + zeta_mixed_arg.size()),
g_u(n_endo_arg, n_exo_arg),
Rt(n_exo_arg, zeta_varobs_back_mixed.size()),
RQ(zeta_varobs_back_mixed.size(), n_exo_arg)
{
size_t n_pred = ghx.getCols();
size_t n_static = zeta_static_arg.size();
size_t j = 0;
for (size_t i = 0; i < n_endo_arg; ++i)
if (inv_order_var_arg[i]-n_static < n_pred && inv_order_var_arg[i]-n_static >= 0)
inv_ric[j++] = ricIN[inv_order_var_arg[i]-n_static];
std::vector<size_t> zeta_back_mixed;
set_union(zeta_back_arg.begin(), zeta_back_arg.end(),
zeta_mixed_arg.begin(), zeta_mixed_arg.end(),
back_inserter(zeta_back_mixed));
for (size_t i = 0; i < zeta_back_mixed.size(); i++)
pi_bm_vbm.push_back(find(zeta_varobs_back_mixed.begin(), zeta_varobs_back_mixed.end(),
zeta_back_mixed[i]) - zeta_varobs_back_mixed.begin());
}
// initialise parameter dependent KF matrices only but not Ps
void
InitializeKalmanFilter::initialize(Vector &steadyState, const Vector &deepParams, Matrix &R, const Matrix &Z,
InitializeKalmanFilter::initialize(VectorView &steadyState, const Vector &deepParams, Matrix &R,
const Matrix &Q, Matrix &RQRt, Matrix &T,
double &penalty, const MatrixView &dataView, Matrix &Y, int &info)
double &penalty, const MatrixConstView &dataView, Matrix &Y, int &info)
{
modelSolution.compute(steadyState, deepParams, ghx_raw, ghu_raw);
modelSolution.compute(steadyState, deepParams, g_x, g_u);
detrendData.detrend(steadyState, dataView, Y);
mat::reorderRowsByVectors(ghx, mat::nullVec, ghx_raw, order_var);
mat::reorderRowsByVectors(ghu, mat::nullVec, ghu_raw, order_var);
setT(T, info);
setRQR(R, Q, RQRt, info);
}
// initialise all KF matrices
void
InitializeKalmanFilter::initialize(Vector &steadyState, const Vector &deepParams, Matrix &R, const Matrix &Z,
InitializeKalmanFilter::initialize(VectorView &steadyState, const Vector &deepParams, Matrix &R,
const Matrix &Q, Matrix &RQRt, Matrix &T, Matrix &Pstar, Matrix &Pinf,
double &penalty, const MatrixView &dataView, Matrix &Y, int &info)
double &penalty, const MatrixConstView &dataView, Matrix &Y, int &info)
{
initialize(steadyState, deepParams, R, Z, Q, RQRt, T, penalty, dataView, Y, info);
initialize(steadyState, deepParams, R, Q, RQRt, T, penalty, dataView, Y, info);
setPstar(Pstar, Pinf, T, RQRt, info);
}
@ -80,26 +82,19 @@ InitializeKalmanFilter::initialize(Vector &steadyState, const Vector &deepParams
void
InitializeKalmanFilter::setT(Matrix &T, int &info)
{
// here is the content of [T R]=[A,B] = kalman_transition_matrix(oo_.dr,iv,ic,aux,M_.exo_nbr);
// Initialize the empty columns of T to zero
T.setAll(0.0);
//T(i_n_iv,ic) = dr.ghx(iv,:);
mat::assignByVectors(T, mat::nullVec, inv_ric, ghx, riv, mat::nullVec);
mat::assignByVectors(T, mat::nullVec, pi_bm_vbm, g_x, zeta_varobs_back_mixed, mat::nullVec);
}
void
InitializeKalmanFilter::setRQR(Matrix &R, const Matrix &Q, Matrix &RQRt, int &info)
{
R.setAll(0.0);
//B(i_n_iv,:) = dr.ghu(iv,:); and R=B;
mat::assignByVectors(R, mat::nullVec, mat::nullVec, ghu, riv, mat::nullVec);
mat::assignByVectors(R, mat::nullVec, mat::nullVec, g_u, zeta_varobs_back_mixed, mat::nullVec);
// Matrix RQRt=R*Q*R'
RQ.setAll(0.0);
blas::gemm("N", "N", 1.0, R, Q, 1.0, RQ); // R*Q
RQRt.setAll(0.0);
blas::gemm("N", "T", 1.0, RQ, R, 1.0, RQRt); // R*Q*R'
blas::gemm("N", "N", 1.0, R, Q, 0.0, RQ); // R*Q
blas::gemm("N", "T", 1.0, RQ, R, 0.0, RQRt); // R*Q*R'
}
void

View File

@ -40,29 +40,32 @@ class InitializeKalmanFilter
{
public:
/*!
\param[in] zeta_varobs_back_mixed_arg The union of indices of observed, backward and mixed variables
*/
InitializeKalmanFilter(const std::string& modName, size_t n_endo, size_t n_exo, const std::vector<size_t> &zeta_fwrd_arg,
const std::vector<size_t> &zeta_back_arg, const std::vector<size_t> &zeta_mixed_arg, const std::vector<size_t> &zeta_static_arg,
double qz_criterium, const std::vector<size_t> &order_var_arg, const std::vector<size_t> &inv_order_var_arg,
const std::vector<size_t> &riv, const std::vector<size_t> &ric, double lyapunov_tol, int &info);
const std::vector<size_t> &zeta_varobs_back_mixed_arg,
double qz_criterium_arg, double lyapunov_tol_arg, int &info);
virtual ~InitializeKalmanFilter();
// initialise all KF matrices
void initialize(Vector &steadyState, const Vector &deepParams, Matrix &R, const Matrix &Z, const Matrix &Q, Matrix &RQRt,
Matrix &T, Matrix &Pstar, Matrix &Pinf, double &penalty, const MatrixView &dataView, Matrix &Y, int &info);
void initialize(VectorView &steadyState, const Vector &deepParams, Matrix &R, const Matrix &Q, Matrix &RQRt,
Matrix &T, Matrix &Pstar, Matrix &Pinf, double &penalty, const MatrixConstView &dataView, Matrix &Y, int &info);
// initialise parameter dependent KF matrices only but not Ps
void initialize(Vector &steadyState, const Vector &deepParams, Matrix &R, const Matrix &Z, const Matrix &Q, Matrix &RQRt,
Matrix &T, double &penalty, const MatrixView &dataView, Matrix &Y, int &info);
void initialize(VectorView &steadyState, const Vector &deepParams, Matrix &R, const Matrix &Q, Matrix &RQRt,
Matrix &T, double &penalty, const MatrixConstView &dataView, Matrix &Y, int &info);
private:
const std::vector<size_t> riv; // restrict_var_list
std::vector<size_t> inv_ric; // inverse restrict_columns
const std::vector<size_t> order_var;
const double lyapunov_tol;
const std::vector<size_t> zeta_varobs_back_mixed;
//! Indices of back+mixed zetas inside varobs+back+mixed zetas
std::vector<size_t> pi_bm_vbm;
DetrendData detrendData;
ModelSolution modelSolution;
DiscLyapFast discLyapFast; //Lyapunov solver
Matrix ghx, ghx_raw;
Matrix ghu, ghu_raw;
Matrix g_x;
Matrix g_u;
Matrix Rt, RQ;
void setT(Matrix &T, int &info);
void setRQR(Matrix &R, const Matrix &Q, Matrix &RQRt, int &info);

View File

@ -33,42 +33,58 @@ KalmanFilter::~KalmanFilter()
KalmanFilter::KalmanFilter(const std::string &modName, size_t n_endo, size_t n_exo,
const std::vector<size_t> &zeta_fwrd_arg, const std::vector<size_t> &zeta_back_arg,
const std::vector<size_t> &zeta_mixed_arg, const std::vector<size_t> &zeta_static_arg,
double qz_criterium, const std::vector<size_t> &order_var,
const std::vector<size_t> &inv_order_var, const std::vector<size_t> &varobs_arg,
const std::vector<size_t> &riv, const std::vector<size_t> &ric,
double riccati_tol_in, double lyapunov_tol, int &info) :
Z(varobs_arg.size(), riv.size()), T(riv.size()), R(riv.size(), n_exo),
Pstar(riv.size(), riv.size()), Pinf(riv.size(), riv.size()), RQRt(riv.size(), riv.size()),
Ptmp(riv.size(), riv.size()), F(varobs_arg.size(), varobs_arg.size()),
Finv(varobs_arg.size(), varobs_arg.size()), K(riv.size(), varobs_arg.size()), KFinv(riv.size(), varobs_arg.size()),
oldKFinv(riv.size(), varobs_arg.size()), Finverter(varobs_arg.size()), a_init(riv.size(), 1),
a_new(riv.size(), 1), vt(varobs_arg.size(), 1), vtFinv(1, varobs_arg.size()), vtFinvVt(1), riccati_tol(riccati_tol_in),
double qz_criterium_arg, const std::vector<size_t> &varobs_arg,
double riccati_tol_arg, double lyapunov_tol_arg, int &info) :
zeta_varobs_back_mixed(compute_zeta_varobs_back_mixed(zeta_back_arg, zeta_mixed_arg, varobs_arg)),
Z(varobs_arg.size(), zeta_varobs_back_mixed.size()), T(zeta_varobs_back_mixed.size()), R(zeta_varobs_back_mixed.size(), n_exo),
Pstar(zeta_varobs_back_mixed.size(), zeta_varobs_back_mixed.size()), Pinf(zeta_varobs_back_mixed.size(), zeta_varobs_back_mixed.size()), RQRt(zeta_varobs_back_mixed.size(), zeta_varobs_back_mixed.size()),
Ptmp(zeta_varobs_back_mixed.size(), zeta_varobs_back_mixed.size()), F(varobs_arg.size(), varobs_arg.size()),
Finv(varobs_arg.size(), varobs_arg.size()), K(zeta_varobs_back_mixed.size(), varobs_arg.size()), KFinv(zeta_varobs_back_mixed.size(), varobs_arg.size()),
oldKFinv(zeta_varobs_back_mixed.size(), varobs_arg.size()), Finverter(varobs_arg.size()), a_init(zeta_varobs_back_mixed.size(), 1),
a_new(zeta_varobs_back_mixed.size(), 1), vt(varobs_arg.size(), 1), vtFinv(1, varobs_arg.size()), vtFinvVt(1), riccati_tol(riccati_tol_arg),
initKalmanFilter(modName, n_endo, n_exo, zeta_fwrd_arg, zeta_back_arg, zeta_mixed_arg,
zeta_static_arg, qz_criterium, order_var, inv_order_var, riv, ric, lyapunov_tol, info)
zeta_static_arg, zeta_varobs_back_mixed, qz_criterium_arg, lyapunov_tol_arg, info)
{
a_init.setAll(0.0);
Z.setAll(0.0);
for (size_t i = 0; i < varobs_arg.size(); ++i)
Z(i, varobs_arg[i]) = 1.0;
{
size_t j = find(zeta_varobs_back_mixed.begin(), zeta_varobs_back_mixed.end(),
varobs_arg[i]) - zeta_varobs_back_mixed.begin();
Z(i, j) = 1.0;
}
}
std::vector<size_t>
KalmanFilter::compute_zeta_varobs_back_mixed(const std::vector<size_t> &zeta_back_arg, const std::vector<size_t> &zeta_mixed_arg, const std::vector<size_t> &varobs_arg)
{
std::vector<size_t> varobs_sorted = varobs_arg;
sort(varobs_sorted.begin(), varobs_sorted.end());
std::vector<size_t> zeta_back_mixed, zeta_varobs_back_mixed;
set_union(zeta_back_arg.begin(), zeta_back_arg.end(),
zeta_mixed_arg.begin(), zeta_mixed_arg.end(),
back_inserter(zeta_back_mixed));
set_union(zeta_back_mixed.begin(), zeta_back_mixed.end(),
varobs_arg.begin(), varobs_arg.end(),
back_inserter(zeta_varobs_back_mixed));
return zeta_varobs_back_mixed;
}
double
KalmanFilter::compute(const MatrixView &dataView, Vector &steadyState,
KalmanFilter::compute(const MatrixConstView &dataView, VectorView &steadyState,
const Matrix &Q, const Matrix &H, const Vector &deepParams,
VectorView &vll, size_t start, size_t period, double &penalty, int &info)
{
double ll;
Matrix Y(dataView.getRows(), dataView.getCols()); // data
if(period==0) // initialise all KF matrices
initKalmanFilter.initialize(steadyState, deepParams, R, Z, Q, RQRt, T, Pstar, Pinf,
initKalmanFilter.initialize(steadyState, deepParams, R, Q, RQRt, T, Pstar, Pinf,
penalty, dataView, Y, info);
else // initialise parameter dependent KF matrices only but not Ps
initKalmanFilter.initialize(steadyState, deepParams, R, Z, Q, RQRt, T,
initKalmanFilter.initialize(steadyState, deepParams, R, Q, RQRt, T,
penalty, dataView, Y, info);
return ll = filter(Y, H, vll, start, info);
return filter(Y, H, vll, start, info);
};
@ -79,7 +95,7 @@ double
KalmanFilter::filter(const Matrix &dataView, const Matrix &H, VectorView &vll, size_t start, int &info)
{
double loglik=0.0, ll, logFdet, Fdet;
int p = Finv.getRows();
size_t p = Finv.getRows();
bool nonstationary = true;
for (size_t t = 0; t < dataView.getCols(); ++t)
@ -87,8 +103,7 @@ KalmanFilter::filter(const Matrix &dataView, const Matrix &H, VectorView &vll,
if (nonstationary)
{
// K=PZ'
K.setAll(0.0);
blas::gemm("N", "T", 1.0, Pstar, Z, 1.0, K);
blas::gemm("N", "T", 1.0, Pstar, Z, 0.0, K);
//F=ZPZ' +H = ZK+H
F = H;
@ -99,11 +114,10 @@ KalmanFilter::filter(const Matrix &dataView, const Matrix &H, VectorView &vll,
mat::set_identity(Finv);
Finverter.invMult("N", F, Finv); // F now contains its LU decomposition!
// KFinv gain matrix
KFinv.setAll(0.0);
blas::gemm("N", "N", 1.0, K, Finv, 1.0, KFinv);
blas::gemm("N", "N", 1.0, K, Finv, 0.0, KFinv);
// deteminant of F:
Fdet = 1;
for (int d = 0; d < p; ++d)
for (size_t d = 0; d < p; ++d)
Fdet *= (-F(d, d));
logFdet=log(fabs(Fdet));
@ -114,8 +128,7 @@ KalmanFilter::filter(const Matrix &dataView, const Matrix &H, VectorView &vll,
blas::gemm("N", "T", -1.0, KFinv, K, 1.0, Ptmp);
// 2) Ptmp= T*Ptmp
Pstar = Ptmp;
Ptmp.setAll(0.0);
blas::gemm("N", "N", 1.0, T, Pstar, 1.0, Ptmp);
blas::gemm("N", "N", 1.0, T, Pstar, 0.0, Ptmp);
// 3) Pt+1= Ptmp*T' +RQR'
Pstar = RQRt;
blas::gemm("N", "T", 1.0, Ptmp, T, 1.0, Pstar);
@ -125,7 +138,6 @@ KalmanFilter::filter(const Matrix &dataView, const Matrix &H, VectorView &vll,
}
// err= Yt - Za
//VectorView yt(dataView.getData()+t*dataView.getRows(),dataView.getRows(),1); // current observation vector
MatrixConstView yt(dataView, 0, t, dataView.getRows(), 1); // current observation vector
vt = yt;
blas::gemm("N", "N", -1.0, Z, a_init, 1.0, vt);

View File

@ -29,7 +29,7 @@
#include "InitializeKalmanFilter.hh"
/**
* vanilla Kalman filter without constant and with measurement error (use scalar
* Vanilla Kalman filter without constant and with measurement error (use scalar
* 0 when no measurement error).
* If multivariate filter is faster, do as in Matlab: start with multivariate
* filter and switch to univariate filter only in case of singularity
@ -50,18 +50,16 @@ public:
virtual ~KalmanFilter();
KalmanFilter(const std::string &modName, size_t n_endo, size_t n_exo, const std::vector<size_t> &zeta_fwrd_arg,
const std::vector<size_t> &zeta_back_arg, const std::vector<size_t> &zeta_mixed_arg, const std::vector<size_t> &zeta_static_arg,
double qz_criterium, const std::vector<size_t> &order_var_arg, const std::vector<size_t> &inv_order_var,
const std::vector<size_t> &varobs_arg, const std::vector<size_t> &riv, const std::vector<size_t> &ric,
double riccati_tol, double lyapunov_tol, int &info);
double qz_criterium_arg, const std::vector<size_t> &varobs_arg,
double riccati_tol_arg, double lyapunov_tol_arg, int &info);
double compute(const MatrixView &dataView, Vector &steadyState,
double compute(const MatrixConstView &dataView, VectorView &steadyState,
const Matrix &Q, const Matrix &H, const Vector &deepParams,
VectorView &vll, size_t start, size_t period, double &penalty, int &info);
protected:
// static double calcStepLogLik(const PLUFact &Finv, const Vector &v);
private:
const std::vector<size_t> zeta_varobs_back_mixed;
static std::vector<size_t> compute_zeta_varobs_back_mixed(const std::vector<size_t> &zeta_back_arg, const std::vector<size_t> &zeta_mixed_arg, const std::vector<size_t> &varobs_arg);
Matrix Z; //nob*mm matrix mapping endogeneous variables and observations
Matrix T; //mm*mm transition matrix of the state equation.
Matrix R; //mm*rr matrix, mapping structural innovations to state variables.
@ -77,7 +75,9 @@ private:
Matrix vtFinv, vtFinvVt; // intermeiate observation error *Finv vector
double riccati_tol;
InitializeKalmanFilter initKalmanFilter; //Initialise KF matrices
double filter(const Matrix &dataView, const Matrix &H, VectorView &vll, size_t start, int &info);
// Method
double filter(const Matrix &data, const Matrix &H, VectorView &vll, size_t start, int &info);
};

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2009-2010 Dynare Team
*
* This file is part of Dynare.
*
* Dynare is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Dynare is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Dynare. If not, see <http://www.gnu.org/licenses/>.
*/
///////////////////////////////////////////////////////////
// LogLikelihoodMain.cpp
// Implementation of the Class LogLikelihoodMain
// Created on: 02-Feb-2010 12:57:09
///////////////////////////////////////////////////////////
#include "LogLikelihoodMain.hh"
LogLikelihoodMain::LogLikelihoodMain( //const Matrix &data_arg, Vector &deepParams_arg,
const std::string &modName, EstimatedParametersDescription &estiParDesc, size_t n_endo, size_t n_exo,
const std::vector<size_t> &zeta_fwrd_arg, const std::vector<size_t> &zeta_back_arg,
const std::vector<size_t> &zeta_mixed_arg, const std::vector<size_t> &zeta_static_arg, const double qz_criterium,
const std::vector<size_t> &varobs, double riccati_tol, double lyapunov_tol, int &info_arg)
: estSubsamples(estiParDesc.estSubsamples),
logLikelihoodSubSample(modName, estiParDesc, n_endo, n_exo, zeta_fwrd_arg, zeta_back_arg, zeta_mixed_arg, zeta_static_arg, qz_criterium,
varobs, riccati_tol, lyapunov_tol, info_arg),
//deepParams(deepParams_arg),
vll(estSubsamples[estSubsamples.size()-1].endPeriod) // time dimension size of data
{
}
LogLikelihoodMain::~LogLikelihoodMain()
{
}
double
LogLikelihoodMain::compute(Matrix &steadyState, const Vector &estParams, Vector &deepParams, const MatrixConstView &data, Matrix &Q, Matrix &H, size_t start, int &info)
{
for (size_t i = 1; i <= (size_t) estSubsamples.size(); ++i)
{
VectorView vSteadyState = mat::get_col(steadyState, i);
MatrixConstView dataView(data, 0, estSubsamples[i].startPeriod,
dataView.getRows(), estSubsamples[i].endPeriod-estSubsamples[i].startPeriod+1);
VectorView vllView(vll, estSubsamples[i].startPeriod, estSubsamples[i].endPeriod-estSubsamples[i].startPeriod+1);
logLikelihood += logLikelihoodSubSample.compute(vSteadyState, dataView, estParams, deepParams,
Q, H, vllView, info, start, i);
}
return logLikelihood;
};

View File

@ -25,7 +25,6 @@
#if !defined(E126AEF5_AC28_400a_821A_3BCFD1BC4C22__INCLUDED_)
#define E126AEF5_AC28_400a_821A_3BCFD1BC4C22__INCLUDED_
//#include "EstimatedParametersDescription.hh"
#include "LogLikelihoodSubSample.hh"
class LogLikelihoodMain {
@ -33,26 +32,25 @@ private:
double logLikelihood;
std::vector<EstimationSubsample> &estSubsamples; // reference to member of EstimatedParametersDescription
LogLikelihoodSubSample logLikelihoodSubSample;
Vector deepParams;
Vector &vll; // vector of all KF step likelihoods
Matrix &data; // input data
Matrix &steadyState;
//GeneralParams& estimOptions;
int presampleStart;
Matrix Q;
Matrix H;
Vector vll; // vector of all KF step likelihoods
public:
virtual ~LogLikelihoodMain();
LogLikelihoodMain(const Matrix &data, //GeneralParams& estimOptions,
const std::string &modName, EstimatedParametersDescription &estiParDesc, size_t n_endo, size_t n_exo,
const std::vector<size_t> &zeta_fwrd_arg, const std::vector<size_t> &zeta_back_arg, const std::vector<size_t> &zeta_mixed_arg,
const std::vector<size_t> &zeta_static_arg, const double qz_criterium, const std::vector<size_t> &order_var,
const std::vector<size_t> &inv_order_var, const std::vector<size_t> &varobs, const std::vector<size_t> &riv,
const std::vector<size_t> &ric, double riccati_tol_in, double lyapunov_tol, int &info);
LogLikelihoodMain( //const Matrix &data, Vector &deepParams_arg, //GeneralParams& estimOptions,
const std::string &modName, EstimatedParametersDescription &estiParDesc, size_t n_endo, size_t n_exo,
const std::vector<size_t> &zeta_fwrd_arg, const std::vector<size_t> &zeta_back_arg, const std::vector<size_t> &zeta_mixed_arg,
const std::vector<size_t> &zeta_static_arg, const double qz_criterium_arg, const std::vector<size_t> &varobs_arg,
double riccati_tol_arg, double lyapunov_tol_arg, int &info);
double compute(Matrix &steadyState, Vector &estParams, int &info); // for calls from estimation and to set Steady State
double compute(Vector &estParams); // for calls in loop from optimiser
/**
* Compute method Inputs:
* Matrix &steadyState; Matrix of sub-sample periods column-vectors of steady states, one column vectro for each sub-sample period
* vectors of deep deepParams and estimated estParams
* Matrix &data input data reference
* Q and H KF matrices of shock and measurement error varinaces and covariances
* KF logLikelihood calculation start period.
*/
double compute(Matrix &steadyState, const Vector &estParams, Vector &deepParams, const MatrixConstView &data, Matrix &Q, Matrix &H, size_t presampleStart, int &info); // for calls from estimation and to set Steady State
Vector &
getVll()

View File

@ -23,28 +23,162 @@
// Created on: 14-Jan-2010 22:39:14
///////////////////////////////////////////////////////////
#include "LogLikelihoodSubSample.hh"
//#include "LogLikelihoodSubSample.hh"
#include "LogLikelihoodMain.hh" // use ...Main.hh for testing only
#include <algorithm>
#include "LapackBindings.hh"
LogLikelihoodSubSample::~LogLikelihoodSubSample()
{
};
LogLikelihoodSubSample::LogLikelihoodSubSample(const std::string &modName, size_t n_endo, size_t n_exo,
const std::vector<size_t> &zeta_fwrd_arg, const std::vector<size_t> &zeta_back_arg, const std::vector<size_t> &zeta_mixed_arg,
const std::vector<size_t> &zeta_static_arg, const double qz_criterium, const std::vector<size_t> &order_var,
const std::vector<size_t> &inv_order_var, const std::vector<size_t> &varobs, const std::vector<size_t> &riv,
const std::vector<size_t> &ric, double riccati_tol, double lyapunov_tol, int &info) :
LogLikelihoodSubSample::LogLikelihoodSubSample(const std::string &modName, EstimatedParametersDescription &INestiParDesc, size_t n_endo, size_t n_exo,
const std::vector<size_t> &zeta_fwrd_arg, const std::vector<size_t> &zeta_back_arg,
const std::vector<size_t> &zeta_mixed_arg, const std::vector<size_t> &zeta_static_arg, const double qz_criterium,
const std::vector<size_t> &varobs, double riccati_tol, double lyapunov_tol, int &INinfo) :
estiParDesc(INestiParDesc),
kalmanFilter(modName, n_endo, n_exo, zeta_fwrd_arg, zeta_back_arg, zeta_mixed_arg, zeta_static_arg, qz_criterium,
order_var, inv_order_var, varobs, riv, ric, riccati_tol, lyapunov_tol, info)
varobs, riccati_tol, lyapunov_tol, INinfo), eigQ(n_exo), eigH(varobs.size()), info(INinfo)
{
};
double
LogLikelihoodSubSample::compute(Vector &steadyState, const MatrixView &dataView, const Vector &deepParams, //const estPeriod &estPeriod,
VectorView &vll, int &info, size_t start, size_t period, const Matrix &Q, const Matrix &H)
LogLikelihoodSubSample::compute(VectorView &steadyState, const MatrixConstView &dataView, const Vector &estParams, Vector &deepParams,
Matrix &Q, Matrix &H, VectorView &vll, int &info, size_t start, size_t period)
{
logLikelihood = kalmanFilter.compute(dataView, steadyState, Q, H, deepParams, vll, start, period, penalty, info);
updateParams(estParams, deepParams, Q, H, period);
if (info == 0)
logLikelihood = kalmanFilter.compute(dataView, steadyState, Q, H, deepParams, vll, start, period, penalty, info);
// else
// logLikelihood+=penalty;
return logLikelihood;
};
void
LogLikelihoodSubSample::updateParams(const Vector &estParams, Vector &deepParams,
Matrix &Q, Matrix &H, size_t period)
{
size_t i, j, k, k1, k2;
int test;
bool found;
std::vector<size_t>::const_iterator it;
info = 0;
for (i = 0; i < estParams.getSize(); ++i)
{
found = false;
it = find(estiParDesc.estParams[i].subSampleIDs.begin(),
estiParDesc.estParams[i].subSampleIDs.end(), period);
if (it != estiParDesc.estParams[i].subSampleIDs.end())
found = true;
if (found)
{
switch (estiParDesc.estParams[i].ptype)
{
case EstimatedParameter::shock_SD:
k = (int) estiParDesc.estParams[i].ID1;
Q(k, k) = estParams(i)*estParams(i);
break;
case EstimatedParameter::measureErr_SD:
#ifdef DEBUG
mexPrintf("Setting of H var_endo\n");
#endif
k = (int) estiParDesc.estParams[i].ID1;
H(k, k) = estParams(i)*estParams(i);
break;
case EstimatedParameter::shock_Corr:
#ifdef DEBUG
mexPrintf("Setting of Q corrx\n");
#endif
k1 = (int) estiParDesc.estParams[i].ID1;
k2 = (int) estiParDesc.estParams[i].ID2;
Q(k1, k2) = estParams(i)*sqrt(Q(k1, k1)*Q(k2, k2));
Q(k2, k1) = Q(k1, k2);
// [CholQ,testQ] = chol(Q);
test = lapack::choleskyDecomp(Q, "L");
if (test > 0)
{
mexPrintf("Caugth unhandled exception with cholesky of Q matrix: ");
logLikelihood = penalty;
info = 1;
}
else if (test < 0)
{
// The variance-covariance matrix of the structural innovations is not definite positive.
// We have to compute the eigenvalues of this matrix in order to build the penalty.
double delta = 0;
eigQ.calculate(Q); // get eigenvalues
//k = find(a < 0);
if (eigQ.hasConverged())
{
const Vector &evQ = eigQ.getD();
for (i = 0; i < evQ.getSize(); ++i)
if (evQ(i) < 0)
delta -= evQ(i);
}
logLikelihood = penalty+delta;
info = 43;
} // if
break;
case EstimatedParameter::measureErr_Corr:
#ifdef DEBUG
mexPrintf("Setting of H corrn\n");
#endif
k1 = (int) estiParDesc.estParams[i].ID1;
k2 = (int) estiParDesc.estParams[i].ID2;
// H(k1,k2) = xparam1(i)*sqrt(H(k1,k1)*H(k2,k2));
// H(k2,k1) = H(k1,k2);
H(k1, k2) = estParams(i)*sqrt(H(k1, k1)*H(k2, k2));
H(k2, k1) = H(k1, k2);
//[CholH,testH] = chol(H);
test = lapack::choleskyDecomp(H, "L");
if (test > 0)
{
mexPrintf("Caugth unhandled exception with cholesky of Q matrix: ");
logLikelihood = penalty;
info = 1;
}
else if (test < 0)
{
// The variance-covariance matrix of the measurement errors is not definite positive.
// We have to compute the eigenvalues of this matrix in order to build the penalty.
//a = diag(eig(H));
double delta = 0;
eigH.calculate(H); // get eigenvalues
//k = find(a < 0);
if (eigH.hasConverged())
{
const Vector &evH = eigH.getD();
for (i = 0; i < evH.getSize(); ++i)
if (evH(i) < 0)
delta -= evH(i);
}
logLikelihood = penalty+delta;
info = 44;
} // end if
break;
//if estim_params_.np > 0 // i.e. num of deep parameters >0
case EstimatedParameter::deepPar:
k = (int) estiParDesc.estParams[i].ID1;
deepParams(k) = estParams(i);
break;
default:
logLikelihood = penalty;
info = 1;
} // end switch
} // end found
#ifdef DEBUG
mexPrintf("End of Setting of HQ params\n");
#endif
} //end for
};

View File

@ -34,12 +34,11 @@ class LogLikelihoodSubSample {
public:
LogLikelihoodSubSample(const std::string &modName, EstimatedParametersDescription &estiParDesc, size_t n_endo, size_t n_exo,
const std::vector<size_t> &zeta_fwrd_arg, const std::vector<size_t> &zeta_back_arg, const std::vector<size_t> &zeta_mixed_arg,
const std::vector<size_t> &zeta_static_arg, const double qz_criterium, const std::vector<size_t> &order_var,
const std::vector<size_t> &inv_order_var, const std::vector<size_t> &varobs, const std::vector<size_t> &riv,
const std::vector<size_t> &ric, double riccati_tol_in, double lyapunov_tol, int &info);
const std::vector<size_t> &zeta_fwrd_arg, const std::vector<size_t> &zeta_back_arg,
const std::vector<size_t> &zeta_mixed_arg, const std::vector<size_t> &zeta_static_arg, const double qz_criterium,
const std::vector<size_t> &varobs_arg, double riccati_tol_in, double lyapunov_tol, int &info);
double compute(Vector &steadyState, const MatrixView &dataView, Vector &deepParams,
double compute(VectorView &steadyState, const MatrixConstView &dataView, const Vector &estParams, Vector &deepParams,
Matrix &Q, Matrix &H, VectorView &vll, int &info, size_t start, size_t period);
virtual ~LogLikelihoodSubSample();
@ -50,6 +49,7 @@ private:
KalmanFilter kalmanFilter;
VDVEigDecomposition eigQ;
VDVEigDecomposition eigH;
int &info;
// methods
void updateParams(const Vector &estParams, Vector &deepParams,
Matrix &Q, Matrix &H, size_t period);

View File

@ -9,11 +9,21 @@ endif
EXTRA_DIST = \
DecisionRules.cc \
DecisionRules.hh \
dr1.cpp \
DsgeLikelihood.cpp \
DsgeLikelihood.h \
dynare_resolve.cpp \
Estimation.h \
ioutils.h \
MexUtils.cpp \
MexUtils.h
DetrendData.cc \
DetrendData.hh \
EstimatedParameter.hh \
EstimatedParametersDescription.hh \
EstimationSubsample.hh \
InitializeKalmanFilter.cc \
InitializeKalmanFilter.hh \
KalmanFilter.cc \
KalmanFilter.hh \
LogLikelihoodMain.hh \
LogLikelihoodSubSample.cc \
LogLikelihoodSubSample.hh \
ModelSolution.cc \
ModelSolution.hh \
Prior.hh \
utils/dynamic_dll.cc \
utils/dynamic_dll.hh \
utils/ts_exception.h

View File

@ -51,7 +51,7 @@ ModelSolution::ModelSolution(const std::string& modName, size_t n_endo_arg, siz
}
void
ModelSolution::compute(Vector& steadyState, const Vector& deepParams, Matrix& ghx, Matrix& ghu) throw (DecisionRules::BlanchardKahnException, GeneralizedSchurDecomposition::GSDException)
ModelSolution::compute(VectorView& steadyState, const Vector& deepParams, Matrix& ghx, Matrix& ghu) throw (DecisionRules::BlanchardKahnException, GeneralizedSchurDecomposition::GSDException)
{
// compute Steady State
ComputeSteadyState(steadyState, deepParams);
@ -63,7 +63,7 @@ ModelSolution::compute(Vector& steadyState, const Vector& deepParams, Matrix& gh
}
void
ModelSolution::ComputeModelSolution(Vector& steadyState, const Vector& deepParams, Matrix& ghx, Matrix& ghu) throw (DecisionRules::BlanchardKahnException, GeneralizedSchurDecomposition::GSDException)
ModelSolution::ComputeModelSolution(VectorView &steadyState, const Vector& deepParams, Matrix& ghx, Matrix& ghu) throw (DecisionRules::BlanchardKahnException, GeneralizedSchurDecomposition::GSDException)
{
// set extended Steady State
@ -85,7 +85,7 @@ ModelSolution::ComputeModelSolution(Vector& steadyState, const Vector& deepParam
decisionRules.compute(jacobian,ghx, ghu);
}
void
ModelSolution::ComputeSteadyState(Vector& steadyState, const Vector& deepParams)
ModelSolution::ComputeSteadyState(VectorView& steadyState, const Vector& deepParams)
{
// does nothig for time being.
}

View File

@ -42,7 +42,7 @@ public:
const std::vector<size_t>& zeta_back_arg, const std::vector<size_t>& zeta_mixed_arg,
const std::vector<size_t>& zeta_static_arg, double qz_criterium);
virtual ~ModelSolution(){};
void compute( Vector& steadyState, const Vector& deepParams, Matrix& ghx, Matrix& ghu ) throw (DecisionRules::BlanchardKahnException, GeneralizedSchurDecomposition::GSDException);
void compute( VectorView& steadyState, const Vector& deepParams, Matrix& ghx, Matrix& ghu ) throw (DecisionRules::BlanchardKahnException, GeneralizedSchurDecomposition::GSDException);
private:
const size_t n_endo;
@ -55,8 +55,8 @@ private:
DecisionRules decisionRules;
DynamicModelDLL dynamicDLLp;
//Matrix jacobian;
void ComputeModelSolution( Vector& steadyState, const Vector& deepParams, Matrix& ghx, Matrix& ghu ) throw (DecisionRules::BlanchardKahnException, GeneralizedSchurDecomposition::GSDException);
void ComputeSteadyState( Vector& steadyState, const Vector& deepParams);
void ComputeModelSolution( VectorView& steadyState, const Vector& deepParams, Matrix& ghx, Matrix& ghu ) throw (DecisionRules::BlanchardKahnException, GeneralizedSchurDecomposition::GSDException);
void ComputeSteadyState( VectorView& steadyState, const Vector& deepParams);
};

View File

@ -0,0 +1,38 @@
/*
* Copyright (C) 2009-2010 Dynare Team
*
* This file is part of Dynare.
*
* Dynare is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Dynare is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Dynare. If not, see <http://www.gnu.org/licenses/>.
*/
///////////////////////////////////////////////////////////
// Prior.cpp
// Implementation of the Class Prior
// Created on: 02-Feb-2010 13:06:20
///////////////////////////////////////////////////////////
#include "Prior.hh"
Prior::Prior(Prior::pShape shape_arg, double mean_arg, double mode_arg, double lower_bound_arg, double upper_bound_arg, double fhp_arg, double shp_arg) :
shape(shape_arg), mean(mean_arg), mode(mode_arg), lower_bound(lower_bound_arg), upper_bound(upper_bound_arg), fhp(fhp_arg), shp(shp_arg)
{
}
Prior::~Prior()
{
}

View File

@ -30,9 +30,6 @@ struct Prior
{
public:
Prior(int shape, double mean, double mode, double lower_boound, double upper_boound, double fhp, double shp);
virtual ~Prior();
//! probablity density functions
enum pShape
{
@ -43,11 +40,15 @@ public:
Uniform = 5,
Inv_gamma_2 = 6 //Inverse gamma (type 2) density
};
Prior(Prior::pShape shape, double mean, double mode, double lower_bound, double upper_bound, double fhp, double shp);
virtual ~Prior();
const pShape shape;
const double mean;
const double mode;
const double lower_boound;
const double upper_boound;
const double lower_bound;
const double upper_bound;
/**
* first shape parameter
*/

View File

@ -5,7 +5,8 @@ endif
endif
EXTRA_DIST = \
BlasBindings.hh
BlasBindings.hh \
DiscLyapFast.hh \
GeneralizedSchurDecomposition.cc \
GeneralizedSchurDecomposition.hh \
LUSolver.cc \

View File

@ -63,16 +63,6 @@ MatrixView::MatrixView(double *data_arg, size_t rows_arg, size_t cols_arg, size_
{
}
MatrixView::MatrixView(Matrix &arg, size_t row_offset, size_t col_offset,
size_t rows_arg, size_t cols_arg) :
data(arg.getData() + row_offset + col_offset*arg.getLd()), rows(rows_arg), cols(cols_arg), ld(arg.getLd())
{
assert(row_offset < arg.getRows()
&& row_offset + rows_arg <= arg.getRows()
&& col_offset < arg.getCols()
&& col_offset + cols_arg <= arg.getCols());
}
std::ostream &
operator<<(std::ostream &out, const MatrixView &M)
{
@ -94,16 +84,6 @@ MatrixConstView::MatrixConstView(const double *data_arg, size_t rows_arg, size_t
{
}
MatrixConstView::MatrixConstView(const Matrix &arg, size_t row_offset, size_t col_offset,
size_t rows_arg, size_t cols_arg) :
data(arg.getData() + row_offset + col_offset*arg.getLd()), rows(rows_arg), cols(cols_arg), ld(arg.getLd())
{
assert(row_offset < arg.getRows()
&& row_offset + rows_arg <= arg.getRows()
&& col_offset < arg.getCols()
&& col_offset + cols_arg <= arg.getCols());
}
std::ostream &
operator<<(std::ostream &out, const MatrixConstView &M)
{

View File

@ -102,8 +102,16 @@ class MatrixView
const size_t rows, cols, ld;
public:
MatrixView(double *data_arg, size_t rows_arg, size_t cols_arg, size_t ld_arg);
MatrixView(Matrix &arg, size_t row_offset, size_t col_offset,
size_t rows_arg, size_t cols_arg);
template<class Mat>
MatrixView(Mat &arg, size_t row_offset, size_t col_offset,
size_t rows_arg, size_t cols_arg) :
data(arg.getData() + row_offset + col_offset*arg.getLd()), rows(rows_arg), cols(cols_arg), ld(arg.getLd())
{
assert(row_offset < arg.getRows()
&& row_offset + rows_arg <= arg.getRows()
&& col_offset < arg.getCols()
&& col_offset + cols_arg <= arg.getCols());
}
virtual ~MatrixView(){};
inline size_t getRows() const { return rows; }
inline size_t getCols() const { return cols; }
@ -140,8 +148,16 @@ class MatrixConstView
const size_t rows, cols, ld;
public:
MatrixConstView(const double *data_arg, size_t rows_arg, size_t cols_arg, size_t ld_arg);
MatrixConstView(const Matrix &arg, size_t row_offset, size_t col_offset,
size_t rows_arg, size_t cols_arg);
template<class Mat>
MatrixConstView(const Mat &arg, size_t row_offset, size_t col_offset,
size_t rows_arg, size_t cols_arg) :
data(arg.getData() + row_offset + col_offset*arg.getLd()), rows(rows_arg), cols(cols_arg), ld(arg.getLd())
{
assert(row_offset < arg.getRows()
&& row_offset + rows_arg <= arg.getRows()
&& col_offset < arg.getCols()
&& col_offset + cols_arg <= arg.getCols());
}
virtual ~MatrixConstView(){};
inline size_t getRows() const { return rows; }
inline size_t getCols() const { return cols; }

View File

@ -33,7 +33,7 @@
class VDVEigDecomposition
{
lapack_int lda, n;
int lwork, info;
lapack_int lwork, info;
double tmpwork;
double *work;
bool converged;
@ -45,9 +45,9 @@ public:
class VDVEigException
{
public:
const int info;
const lapack_int info;
std::string message;
VDVEigException(int info_arg, std::string message_arg) :
VDVEigException(lapack_int info_arg, std::string message_arg) :
info(info_arg), message(message_arg) {
};
};
@ -101,12 +101,12 @@ VDVEigDecomposition::calculate(const Mat &m) throw(VDVEigException)
if (m.getCols() != (size_t) n || m.getLd() != (size_t) lda)
throw(VDVEigException(info, "Matrix not matching VDVEigDecomposition class"));
int tmplwork = -1;
lapack_int tmplwork = -1;
V = m;
dsyev("V", "U", &n, V.getData(), &lda, D.getData(), &tmpwork, &tmplwork, &info);
if (lwork < (int) tmpwork)
if (lwork < tmpwork)
{
lwork = (int) tmpwork;
lwork = tmpwork;
delete[] work;
work = new double[lwork];
}

View File

@ -52,16 +52,23 @@ operator<<(std::ostream &out, const Vector &V)
return out;
}
VectorView::VectorView(Vector &arg, size_t offset, size_t size_arg)
: data(arg.getData() + offset), size(size_arg), stride(1)
{
}
VectorView::VectorView(double *data_arg, size_t size_arg, size_t stride_arg)
: data(data_arg), size(size_arg), stride(stride_arg)
{
}
VectorView::VectorView(Vector &arg, size_t offset, size_t size_arg)
: data(arg.getData() + offset*arg.getStride()), size(size_arg), stride(arg.getStride())
{
assert(offset < arg.getSize() && offset + size <= arg.getSize());
}
VectorView::VectorView(VectorView &arg, size_t offset, size_t size_arg)
: data(arg.getData() + offset*arg.getStride()), size(size_arg), stride(arg.getStride())
{
assert(offset < arg.getSize() && offset + size <= arg.getSize());
}
VectorView &
VectorView::operator= (const VectorView &arg)
{
@ -73,6 +80,29 @@ VectorView::operator= (const VectorView &arg)
return *this;
}
VectorConstView::VectorConstView(const double *data_arg, size_t size_arg, size_t stride_arg)
: data(data_arg), size(size_arg), stride(stride_arg)
{
}
VectorConstView::VectorConstView(const Vector &arg, size_t offset, size_t size_arg)
: data(arg.getData() + offset*arg.getStride()), size(size_arg), stride(arg.getStride())
{
assert(offset < arg.getSize() && offset + size <= arg.getSize());
}
VectorConstView::VectorConstView(const VectorView &arg, size_t offset, size_t size_arg)
: data(arg.getData() + offset*arg.getStride()), size(size_arg), stride(arg.getStride())
{
assert(offset < arg.getSize() && offset + size <= arg.getSize());
}
VectorConstView::VectorConstView(const VectorConstView &arg, size_t offset, size_t size_arg)
: data(arg.getData() + offset*arg.getStride()), size(size_arg), stride(arg.getStride())
{
assert(offset < arg.getSize() && offset + size <= arg.getSize());
}
std::ostream &
operator<<(std::ostream &out, const VectorView &V)
{
@ -80,16 +110,6 @@ operator<<(std::ostream &out, const VectorView &V)
return out;
}
VectorConstView::VectorConstView(const Vector &arg, size_t offset, size_t size_arg)
: data(arg.getData() + offset), size(size_arg), stride(1)
{
}
VectorConstView::VectorConstView(const double *data_arg, size_t size_arg, size_t stride_arg)
: data(data_arg), size(size_arg), stride(stride_arg)
{
}
std::ostream &
operator<<(std::ostream &out, const VectorConstView &V)
{

View File

@ -97,8 +97,11 @@ private:
double *const data;
const size_t size, stride;
public:
VectorView(Vector &arg, size_t offset, size_t size_arg);
VectorView(double *data_arg, size_t size_arg, size_t stride_arg);
/* Can't use a template for the 2 constructors below: this would override the
constructor which uses a pointer, because the argument list is the same */
VectorView(Vector &arg, size_t offset, size_t size_arg);
VectorView(VectorView &arg, size_t offset, size_t size_arg);
virtual ~VectorView(){};
inline size_t getSize() const { return size; }
inline size_t getStride() const { return stride; }
@ -135,8 +138,13 @@ private:
const double *const data;
const size_t size, stride;
public:
VectorConstView(const Vector &arg, size_t offset, size_t size_arg);
VectorConstView(const double *data_arg, size_t size_arg, size_t stride_arg);
/* Can't use a template for the 3 constructors below: this would override the
constructor which uses a pointer, because the argument list is the same */
VectorConstView(const Vector &arg, size_t offset, size_t size_arg);
VectorConstView(const VectorView &arg, size_t offset, size_t size_arg);
VectorConstView(const VectorConstView &arg, size_t offset, size_t size_arg);
virtual ~VectorConstView(){};
inline size_t getSize() const { return size; }
inline size_t getStride() const { return stride; }

View File

@ -100,47 +100,32 @@ main(int argc, char **argv)
for (int i = 0; i < 2; ++i)
zeta_fwrd_arg.push_back(fwd[i]-1);
size_t nriv = 6, nric = 4;
size_t sriv[] = {7, 8, 10, 11, 12, 13};
size_t sric[] = {3, 4, 5, 6};
size_t varobs[] = {12, 11};
std::vector<size_t> varobs_arg;
for (size_t i = 0; i < 2; ++i)
varobs_arg.push_back(varobs[i]-1);
std::vector<size_t> riv;
for (size_t i = 0; i < nriv; ++i)
riv.push_back(sriv[i]-1);
std::vector<size_t> ric;
for (size_t i = 0; i < nric; ++i)
ric.push_back(sric[i]-1);
// Compute zeta_varobs_back_mixed
sort(varobs_arg.begin(), varobs_arg.end());
std::vector<size_t> zeta_back_mixed, zeta_varobs_back_mixed;
set_union(zeta_back_arg.begin(), zeta_back_arg.end(),
zeta_mixed_arg.begin(), zeta_mixed_arg.end(),
back_inserter(zeta_back_mixed));
set_union(zeta_back_mixed.begin(), zeta_back_mixed.end(),
varobs_arg.begin(), varobs_arg.end(),
back_inserter(zeta_varobs_back_mixed));
size_t nobs = 2;
size_t svarobs[] = {2, 1};
std::vector<size_t> varobs;
for (size_t i = 0; i < nobs; ++i)
varobs.push_back(svarobs[i]-1);
size_t n_vbm = zeta_varobs_back_mixed.size();
Matrix
T(riv.size(), riv.size()), R(riv.size(), n_exo),
RQRt(riv.size(), riv.size()), Pstar(riv.size(), riv.size()),
Pinf(riv.size(), riv.size()), Z(varobs.size(), riv.size()), Q(n_exo);
Z.setAll(0.0);
for (size_t i = 0; i < varobs.size(); ++i)
Z(i, varobs[i]) = 1.0;
T(n_vbm, n_vbm), R(n_vbm, n_exo),
RQRt(n_vbm, n_vbm), Pstar(n_vbm, n_vbm),
Pinf(n_vbm, n_vbm), Q(n_exo);
MatrixView
vCovVW(vcov, n_exo, n_exo, n_exo);
Q = vCovVW;
size_t sorder_var[] =
{ 4, 5, 6, 8, 9, 10, 11, 12, 14, 1, 7, 13, 2, 3, 15};
std::vector<size_t> order_var;
for (size_t ii = 0; ii < n_endo; ++ii)
order_var.push_back(sorder_var[ii]-1); //= (1:endo_nbr)';
size_t sinv_order_var[] =
{ 10, 13, 14, 1, 2, 3, 11, 4, 5, 6, 7, 8, 12, 9, 15};
std::vector<size_t> inv_order_var;
for (size_t ii = 0; ii < n_endo; ++ii)
inv_order_var.push_back(sinv_order_var[ii]-1); //= (1:endo_nbr)';
double lyapunov_tol = 1e-16;
int info = 0;
const MatrixView
@ -153,13 +138,13 @@ main(int argc, char **argv)
InitializeKalmanFilter
initializeKalmanFilter(modName, n_endo, n_exo,
zeta_fwrd_arg, zeta_back_arg, zeta_mixed_arg, zeta_static_arg, qz_criterium,
order_var, inv_order_var, riv, ric, lyapunov_tol, info);
zeta_fwrd_arg, zeta_back_arg, zeta_mixed_arg, zeta_static_arg,
zeta_varobs_back_mixed, qz_criterium,
lyapunov_tol, info);
std::cout << "Initilise KF with Q: " << std::endl << Q << std::endl;
std::cout << "and Z" << std::endl << Z << std::endl;
std::cout << "Initialize KF with Q: " << std::endl << Q << std::endl;
initializeKalmanFilter.initialize(steadyState, deepParams, R, Z, Q, RQRt, T, Pstar, Pinf,
initializeKalmanFilter.initialize(steadyState, deepParams, R, Q, RQRt, T, Pstar, Pinf,
penalty, dataView, yView, info);
std::cout << "Matrix T: " << std::endl << T << std::endl;

View File

@ -100,51 +100,20 @@ main(int argc, char **argv)
for (int i = 0; i < 2; ++i)
zeta_fwrd_arg.push_back(fwd[i]-1);
size_t nriv = 6, nric = 4;
size_t sriv[] = {7, 8, 10, 11, 12, 13};
size_t sric[] = {3, 4, 5, 6};
std::vector<size_t> riv;
for (size_t i = 0; i < nriv; ++i)
riv.push_back(sriv[i]-1);
std::vector<size_t> ric;
for (size_t i = 0; i < nric; ++i)
ric.push_back(sric[i]-1);
size_t nobs = 2;
size_t svarobs[] = {2, 1};
std::vector<size_t> varobs;
size_t varobs[] = {12, 11};
std::vector<size_t> varobs_arg;
for (size_t i = 0; i < nobs; ++i)
varobs.push_back(svarobs[i]-1);
varobs_arg.push_back(varobs[i]-1);
Matrix Q(n_exo), H(nobs);
/*
T(riv.size(), riv.size()), R(riv.size(), n_exo),
RQRt(riv.size(), riv.size()), Pstar(riv.size(), riv.size()),
Pinf(riv.size(), riv.size()), Z(varobs.size(), riv.size()),
*/
H.setAll(0.0);
/* for (size_t i = 0; i < varobs.size(); ++i)
Z(i, varobs[i]) = 1.0;
*/
MatrixView
vCovVW(vcov, n_exo, n_exo, n_exo);
Q = vCovVW;
std::cout << "Matrix Q: " << std::endl << Q << std::endl;
size_t sorder_var[] =
{ 4, 5, 6, 8, 9, 10, 11, 12, 14, 1, 7, 13, 2, 3, 15};
std::vector<size_t> order_var;
for (size_t ii = 0; ii < n_endo; ++ii)
order_var.push_back(sorder_var[ii]-1); //= (1:endo_nbr)';
size_t sinv_order_var[] =
{ 10, 13, 14, 1, 2, 3, 11, 4, 5, 6, 7, 8, 12, 9, 15};
std::vector<size_t> inv_order_var;
for (size_t ii = 0; ii < n_endo; ++ii)
inv_order_var.push_back(sinv_order_var[ii]-1); //= (1:endo_nbr)';
double lyapunov_tol = 1e-16;
double riccati_tol = 1e-16;
int info = 0;
@ -158,10 +127,8 @@ main(int argc, char **argv)
KalmanFilter kalman(modName, n_endo, n_exo,
zeta_fwrd_arg, zeta_back_arg, zeta_mixed_arg, zeta_static_arg, qz_criterium,
order_var, inv_order_var, varobs, riv, ric, riccati_tol, lyapunov_tol, info);
varobs_arg, riccati_tol, lyapunov_tol, info);
std::cout << "Initilise KF with Q: " << std::endl << Q << std::endl;
// std::cout << "and Z" << std::endl << Z << std::endl;
size_t start=0, period=0;
double ll=kalman.compute(dataView, steadyState, Q, H, deepParams,
vwll, start, period, penalty, info);

View File

@ -0,0 +1,67 @@
// Test of mult_elimination function
// parameters value is set to posterior mean as computed by ls1.mod
// dR in objective function
var A de dq dR pie pie_obs pie_s R R_obs y y_obs y_s ;
varexo e_A e_pies e_q e_ys ;
parameters psi1 psi2 psi3 rho_R tau alpha rr k rho_q rho_A rho_ys rho_pies ls1_r s1 s2 s3 s4 s5;
psi1 = 1.54;
psi2 = 0.25;
psi3 = 0.25;
rho_R = 0.5;
alpha = 0.3;
rr = 2.51;
k = 0.5;
tau = 0.5;
rho_q = 0.4;
rho_A = 0.2;
rho_ys = 0.9;
rho_pies = 0.7;
model(linear);
y = y(+1) - (tau +alpha*(2-alpha)*(1-tau))*(R-pie(+1))-alpha*(tau +alpha*(2-alpha)*(1-tau))*dq(+1) + alpha*(2-alpha)*((1-tau)/tau)*(y_s-y_s(+1))-A(+1);
pie = exp(-rr/400)*pie(+1)+alpha*exp(-rr/400)*dq(+1)-alpha*dq+(k/(tau+alpha*(2-alpha)*(1-tau)))*y+alpha*(2-alpha)*(1-tau)/(tau*(tau+alpha*(2-alpha)*(1-tau)))*y_s;
pie = de+(1-alpha)*dq+pie_s;
//R = rho_R*R(-1)+(1-rho_R)*(psi1*pie+psi2*(y+alpha*(2-alpha)*((1-tau)/tau)*y_s)+psi3*de)+e_R;
dq = rho_q*dq(-1)+e_q;
y_s = rho_ys*y_s(-1)+e_ys;
pie_s = rho_pies*pie_s(-1)+e_pies;
A = rho_A*A(-1)+e_A;
y_obs = y-y(-1)+A;
pie_obs = 4*pie;
R_obs = 4*R;
dR = R-R(-1);
end;
shocks;
var e_q = 2.5^2;
var e_A = 1.89;
var e_ys = 1.89;
var e_pies = 1.89;
end;
planner_objective 0.25*pie_obs^2+y^2+0.1*dR^2;
ramsey_policy(order=1,irf=0,planner_discount=0.95);
dr2 = mult_elimination({'R'},M_,options_,oo_);
k1 = oo_.dr.nstatic+(1:oo_.dr.npred);
k2 = strmatch('mult_',M_.endo_names(oo_.dr.order_var(k1),:));
k3 = k1(setdiff(1:oo_.dr.npred,k2));
k4 = oo_.dr.order_var(k3);
V0 = oo_.var(k4,k4);
Atest = [dr2.M1(k3,:) dr2.M2(k3,:) dr2.M4(k3,:); eye(6) zeros(6,10);zeros(4,16)];
Btest = [dr2.M3(k3,:); zeros(6,4); eye(4)];
V1=lyapunov_symm(Atest,Btest*M_.Sigma_e*Btest',options_.qz_criterium,options_.lyapunov_complex_threshold);
if max(max(abs(V1(1:6,1:6)-V0)))
disp('Test OK')
end