Merge pull request #319 from JohannesPfeifer/master

Bugfixes, documentation, and features
time-shift
Sébastien Villemot 2013-03-18 01:52:43 -07:00
commit 4b095c2ce5
27 changed files with 212 additions and 84 deletions

View File

@ -3161,7 +3161,7 @@ period(s). The periods must be strictly positive. Conditional variances are give
decomposition provides the decomposition of the effects of shocks upon
impact. The results are stored in
@code{oo_.conditional_variance_decomposition}
(@pxref{oo_.conditional_variance_decomposition}).
(@pxref{oo_.conditional_variance_decomposition}). The variance decomposition is only conducted, if theoretical moments are requested, i.e. using the @code{periods=0}-option. Currently, variance decompositions are only implemented for @code{order=1}. In case of @code{order=2}, Dynare will thus not display the variance decomposition based on a second order approximation, but on a first order approximation.
@item pruning
Discard higher order terms when iteratively computing simulations of
@ -4230,7 +4230,7 @@ period 1, the conditional variance decomposition provides the
decomposition of the effects of shocks upon impact. The results are
stored in
@code{oo_.PosteriorTheoreticalMoments.dsge.ConditionalVarianceDecomposition},
but currently there is not output. Note that this option requires the
but currently there is no output. Note that this option requires the
option @code{moments_varendo} to be specified.
@item filtered_vars
@ -4461,6 +4461,9 @@ Median of the posterior distribution
@item Std
Standard deviation of the posterior distribution
@item Variance
Variance of the posterior distribution
@item deciles
Deciles of the distribution.
@ -4649,6 +4652,21 @@ oo_.posterior_mean.shocks_std.ex
oo_.posterior_hpdsup.measurement_errors_corr.gdp_conso
@end example
@defvr {MATLAB/Octave variable} oo_.MC_record.Seeds
Variable set by the @code{estimation} command. Stores seeds used in MCMC chains
@end defvr
@defvr {MATLAB/Octave variable} oo_.MC_record.AcceptationRates
Variable set by the @code{estimation} command. Stores acceptation rates of the MCMC chains
@end defvr
@defvr {MATLAB/Octave variable} oo_.MC_record.LastParameters
Variable set by the @code{estimation} command. Stores parameter vector of final MCMC chain draw
@end defvr
@defvr {MATLAB/Octave variable} oo_.MC_record.LastLogPost
Variable set by the @code{estimation} command. Stores log-posterior of final MCMC chain draw
@end defvr
@deffn Command model_comparison @var{FILENAME}[(@var{DOUBLE})]@dots{};
@deffnx Command model_comparison (marginal_density = laplace | modifiedharmonicmean) @var{FILENAME}[(@var{DOUBLE})]@dots{};
@ -5316,7 +5334,7 @@ The discussion of the methodologies and their application is described in
With respect to the previous version of the toolbox, in order to work
properly, the GSA toolbox no longer requires that the Dynare
estimation environment is setup.
estimation environment is set up.
Sensitivity analysis results are saved locally in @code{<mod_file>/GSA},
where @code{<mod_file>.mod} is the name of the DYNARE model file.

View File

@ -130,7 +130,7 @@ clear pmet temp moyenne CSUP CINF csup cinf n linea iter tmp;
pages = floor(npar/3);
k = 0;
for i = 1:pages
h=dyn_figure(options_,'Name','MCMC univariate diagnostic (Brooks and Gelman,1998)');
h=dyn_figure(options_,'Name','MCMC univariate convergence diagnostic (Brooks and Gelman,1998)');
boxplot = 1;
for j = 1:3 % Loop over parameters
k = k+1;
@ -193,7 +193,7 @@ if reste
nr = 2;
nc = 3;
end
h = dyn_figure(options_,'Name','MCMC univariate diagnostic (Brooks and Gelman, 1998)');
h = dyn_figure(options_,'Name','MCMC univariate convergence diagnostic (Brooks and Gelman, 1998)');
boxplot = 1;
for j = 1:reste
k = k+1;
@ -308,7 +308,7 @@ for iter = Origin:StepSize:NumberOfDraws
end
MDIAG(:,[2 4 6],:) = MDIAG(:,[2 4 6],:)/nblck;
h = dyn_figure(options_,'Name','Multivariate diagnostic');
h = dyn_figure(options_,'Name','Multivariate convergence diagnostic');
boxplot = 1;
for crit = 1:3
if crit == 1

View File

@ -1,4 +1,4 @@
function adaptive_metropolis_hastings(TargetFun,ProposalFun,xparam1,vv,mh_bounds,varargin)
function record=adaptive_metropolis_hastings(TargetFun,ProposalFun,xparam1,vv,mh_bounds,varargin)
%function adaptive_metropolis_hastings(TargetFun,ProposalFun,xparam1,vv,mh_bounds,varargin)
% Random walk Metropolis-Hastings algorithm.
%
@ -11,7 +11,7 @@ function adaptive_metropolis_hastings(TargetFun,ProposalFun,xparam1,vv,mh_bounds
% o varargin list of argument following mh_bounds
%
% OUTPUTS
% None
% o record [struct] structure describing the iterations
%
% ALGORITHM
% Metropolis-Hastings.

View File

@ -46,7 +46,7 @@ if isfield(oo_, [ TYPE 'TheoreticalMoments' ])
if isfield(temporary_structure,'dsge')
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge;'])
if isfield(temporary_structure,'ConditionalVarianceDecomposition')
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.mean;'])
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.Mean;'])
if isfield(temporary_structure,name)
if sum(Steps-temporary_structure.(name)(1,:)) == 0
% Nothing (new) to do here...
@ -85,11 +85,11 @@ for i=1:length(Steps)
p_hpdsup(i) = hpd_interval(2);
p_density(:,:,i) = pp_density;
end
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.steps = Steps;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.mean.' name ' = p_mean;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.median.' name ' = p_median;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.variance.' name ' = p_variance;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.hpdinf.' name ' = p_hpdinf;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.hpdsup.' name ' = p_hpdsup;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.Steps = Steps;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.Mean.' name ' = p_mean;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.Median.' name ' = p_median;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.Variance.' name ' = p_variance;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.HPDinf.' name ' = p_hpdinf;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.HPDsup.' name ' = p_hpdsup;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.deciles.' name ' = p_deciles;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.ConditionalVarianceDecomposition.density.' name ' = p_density;']);

View File

@ -48,9 +48,9 @@ if isfield(oo_,[TYPE 'TheoreticalMoments'])
if isfield(temporary_structure,'dsge')
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge;'])
if isfield(temporary_structure,'correlation')
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge.correlation.mean;'])
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge.correlation.Mean;'])
if isfield(temporary_structure,var1)
eval(['temporary_structure_1 = oo_.' TYPE 'TheoreticalMoments.dsge.correlation.mean.' var1 ';'])
eval(['temporary_structure_1 = oo_.' TYPE 'TheoreticalMoments.dsge.correlation.Mean.' var1 ';'])
if isfield(temporary_structure_1,var2)
eval(['temporary_structure_2 = temporary_structure_1.' var2 ';'])
l1 = length(temporary_structure_2);
@ -98,11 +98,11 @@ if ~isconst(tmp)
if isfield(temporary_structure,'dsge')
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge;'])
if isfield(temporary_structure,'correlation')
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'mean',nar,p_mean);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'median',nar,p_median);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'variance',nar,p_var);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'hpdinf',nar,hpd_interval(1));
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'hpdsup',nar,hpd_interval(2));
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'Mean',nar,p_mean);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'Median',nar,p_median);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'Variance',nar,p_var);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'HPDinf',nar,hpd_interval(1));
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'HPDsup',nar,hpd_interval(2));
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'deciles',nar,p_deciles);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'density',nar,density);
end
@ -114,11 +114,11 @@ else
if isfield(temporary_structure,'dsge')
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge;'])
if isfield(temporary_structure,'correlation')
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'mean',nar,NaN);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'median',nar,NaN);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'variance',nar,NaN);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'hpdinf',nar,NaN);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'hpdsup',nar,NaN);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'Mean',nar,NaN);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'Median',nar,NaN);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'Nariance',nar,NaN);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'HPDinf',nar,NaN);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'HPDsup',nar,NaN);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'deciles',nar,NaN);
oo_ = fill_output_structure(var1,var2,TYPE,oo_,'density',nar,NaN);
end
@ -128,11 +128,11 @@ end
function oo_ = initialize_output_structure(var1,var2,nar,type,oo_)
name = [ var1 '.' var2 ];
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.mean.' name ' = NaN(' int2str(nar) ',1);']);
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.median.' name ' = NaN(' int2str(nar) ',1);']);
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.variance.' name ' = NaN(' int2str(nar) ',1);']);
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.hpdinf.' name ' = NaN(' int2str(nar) ',1);']);
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.hpdsup.' name ' = NaN(' int2str(nar) ',1);']);
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.Mean.' name ' = NaN(' int2str(nar) ',1);']);
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.Median.' name ' = NaN(' int2str(nar) ',1);']);
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.Variance.' name ' = NaN(' int2str(nar) ',1);']);
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.HPDinf.' name ' = NaN(' int2str(nar) ',1);']);
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.HPDsup.' name ' = NaN(' int2str(nar) ',1);']);
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.deciles.' name ' = cell(' int2str(nar) ',1);']);
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.density.' name ' = cell(' int2str(nar) ',1);']);
for i=1:nar
@ -143,7 +143,7 @@ end
function oo_ = fill_output_structure(var1,var2,type,oo_,moment,lag,result)
name = [ var1 '.' var2 ];
switch moment
case {'mean','median','variance','hpdinf','hpdsup'}
case {'Mean','Median','Variance','HPDinf','HPDsup'}
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.' moment '.' name '(' int2str(lag) ',1) = result;']);
case {'deciles','density'}
eval(['oo_.' type 'TheoreticalMoments.dsge.correlation.' moment '.' name '(' int2str(lag) ',1) = {result};']);

View File

@ -48,16 +48,16 @@ if isfield(oo_,[ TYPE 'TheoreticalMoments'])
if isfield(temporary_structure,'dsge')
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge;'])
if isfield(temporary_structure,'covariance')
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge.covariance.mean;'])
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge.covariance.Mean;'])
if isfield(temporary_structure,var1)
eval(['temporary_structure_1 = oo_.' TYPE 'TheoreticalMoments.dsge.covariance.mean.' var1 ';'])
eval(['temporary_structure_1 = oo_.' TYPE 'TheoreticalMoments.dsge.covariance.Mean.' var1 ';'])
if isfield(temporary_structure_1,var2)
% Nothing to do (the covariance matrix is symmetric!).
return
end
else
if isfield(temporary_structure,var2)
eval(['temporary_structure_2 = oo_.' TYPE 'TheoreticalMoments.dsge.covariance.mean.' var2 ';'])
eval(['temporary_structure_2 = oo_.' TYPE 'TheoreticalMoments.dsge.covariance.Mean.' var2 ';'])
if isfield(temporary_structure_2,var1)
% Nothing to do (the covariance matrix is symmetric!).
return
@ -80,19 +80,19 @@ name = [var1 '.' var2];
if ~isconst(tmp)
[p_mean, p_median, p_var, hpd_interval, p_deciles, density] = ...
posterior_moments(tmp,1,mh_conf_sig);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.mean.' name ' = p_mean;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.median.' name ' = p_median;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.variance.' name ' = p_var;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.hpdinf.' name ' = hpd_interval(1);']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.hpdsup.' name ' = hpd_interval(2);']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.Mean.' name ' = p_mean;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.Median.' name ' = p_median;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.Variance.' name ' = p_var;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.HPDinf.' name ' = hpd_interval(1);']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.HPDsup.' name ' = hpd_interval(2);']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.deciles.' name ' = p_deciles;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.density.' name ' = density;']);
else
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.mean.' name ' = NaN;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.median.' name ' = NaN;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.variance.' name ' = NaN;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.hpdinf.' name ' = NaN;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.hpdsup.' name ' = NaN;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.Mean.' name ' = NaN;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.Median.' name ' = NaN;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.Variance.' name ' = NaN;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.HPDinf.' name ' = NaN;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.HPDsup.' name ' = NaN;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.deciles.' name ' = NaN;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.covariance.density.' name ' = NaN;']);
end

View File

@ -61,7 +61,11 @@ if ~options_.noprint %options_.nomoments == 0
dyntable(title,headers,labels,z,lh,11,4);
if M_.exo_nbr > 1 && size(stationary_vars, 1) > 0
disp(' ')
title='VARIANCE DECOMPOSITION (in percent)';
if options_.order == 2
title='VARIANCE DECOMPOSITION (in percent), based on first order approximation';
else
title='VARIANCE DECOMPOSITION (in percent)';
end
if options_.hp_filter
title = [title ' (HP filter, lambda = ' ...
num2str(options_.hp_filter) ')'];

View File

@ -49,8 +49,13 @@ StateSpaceModel.order_var = dr.order_var;
conditional_decomposition_array = conditional_variance_decomposition(StateSpaceModel,Steps,SubsetOfVariables );
if options_.noprint == 0
disp(' ')
if options_.order == 2
disp(' ')
disp('CONDITIONAL VARIANCE DECOMPOSITION (in percent), based on first order approximation')
else
disp(' ')
disp('CONDITIONAL VARIANCE DECOMPOSITION (in percent)')
end
end
vardec_i = zeros(length(SubsetOfVariables),exo_nbr);

View File

@ -760,8 +760,12 @@ if analytic_derivation
else
lnprior = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
end
fval = (likelihood-lnprior);
if DynareOptions.endogenous_prior==1
[lnpriormom] = endogenous_prior(Y,Pstar,BayesInfo);
fval = (likelihood-lnprior-lnpriormom);
else
fval = (likelihood-lnprior);
end
if isnan(fval)
info = 47;

View File

@ -156,7 +156,7 @@ if isequal(options_.mode_compute,0) && isempty(options_.mode_file) && options_.m
oo_.Smoother.SteadyState = ys;
oo_.Smoother.TrendCoeffs = trend_coeff;
if options_.filter_covariance
oo_.Smoother.variance = P;
oo_.Smoother.Variance = P;
end
i_endo = bayestopt_.smoother_saved_var_list;
if options_.nk ~= 0
@ -482,12 +482,12 @@ if options_.mode_check == 1 && ~options_.mh_posterior_mode_estimation
end
oo_.posterior.optimization.mode = xparam1;
oo_.posterior.optimization.variance = [];
oo_.posterior.optimization.Variance = [];
if ~options_.mh_posterior_mode_estimation
if options_.cova_compute
invhess = inv(hh);
stdh = sqrt(diag(invhess));
oo_.posterior.optimization.variance = invhess;
oo_.posterior.optimization.Variance = invhess;
end
else
variances = bayestopt_.p2.*bayestopt_.p2;
@ -921,7 +921,7 @@ if (any(bayestopt_.pshape >0 ) && options_.mh_replic) || ...
ana_deriv = options_.analytic_derivation;
options_.analytic_derivation = 0;
if options_.cova_compute
feval(options_.posterior_sampling_method,objective_function,options_.proposal_distribution,xparam1,invhess,bounds,dataset_,options_,M_,estim_params_,bayestopt_,oo_);
oo_.MC_record=feval(options_.posterior_sampling_method,objective_function,options_.proposal_distribution,xparam1,invhess,bounds,dataset_,options_,M_,estim_params_,bayestopt_,oo_);
else
error('I Cannot start the MCMC because the hessian of the posterior kernel at the mode was not computed.')
end
@ -943,7 +943,7 @@ if (any(bayestopt_.pshape >0 ) && options_.mh_replic) || ...
if ~options_.nograph
oo_ = PlotPosteriorDistributions(estim_params_, M_, options_, bayestopt_, oo_);
end
[oo_.posterior.metropolis.mean,oo_.posterior.metropolis.variance] ...
[oo_.posterior.metropolis.mean,oo_.posterior.metropolis.Variance] ...
= GetPosteriorMeanVariance(M_,options_.mh_drop);
else
load([M_.fname '_results'],'oo_');
@ -974,7 +974,7 @@ if (~((any(bayestopt_.pshape > 0) && options_.mh_replic) || (any(bayestopt_.psha
[atT,innov,measurement_error,updated_variables,ys,trend_coeff,aK,T,R,P,PK,decomp] = DsgeSmoother(xparam1,dataset_.info.ntobs,dataset_.data,dataset_.missing.aindex,dataset_.missing.state);
oo_.Smoother.SteadyState = ys;
oo_.Smoother.TrendCoeffs = trend_coeff;
oo_.Smoother.variance = P;
oo_.Smoother.Variance = P;
i_endo = bayestopt_.smoother_saved_var_list;
if options_.nk ~= 0
oo_.FilteredVariablesKStepAhead = aK(options_.filter_step_ahead, ...

90
matlab/endogenous_prior.m Normal file
View File

@ -0,0 +1,90 @@
function [lnpriormom] = endogenous_prior(data,Pstar,BayesInfo)
% Computes the endogenous log prior addition to the initial prior
%
% INPUTS
% data [double] n*T vector of data observations
% Pstar [double] k*k matrix of
% BayesInfo [structure]
%
% OUTPUTS
% lnpriormom [double] scalar of log endogenous prior value
% Code to implement notes on endogenous priors by Lawrence Christiano,
% specified in the appendix of:
% Introducing Financial Frictions and Unemployment into a Small Open Economy Model
% by Lawrence J. Christiano, Mathias Trabandt and Karl Walentin (2011), Journal of Economic Dynamics and Control
% this is the 'mother' of the priors on the model parameters.
% the priors include a metric across some choosen moments of the (supposedly
% pre-sample) data.
% *** Implemented file for variances, but in principle any moment
% *** could be matched
% As a default, the prior second moments are computed from the same sample
% used to find the posterior mode. This could be changed by making the
% appropriate adjustment to the following code.
% Copyright (C) 2011 Lawrence J. Christiano, Mathias Trabandt and Karl Walentin
% Copyright (C) 2013 Dynare Team
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
Y=data';
[Tsamp,n]=size(Y); % sample length and number of matched moments (here set equal to nr of observables)
hmat=zeros(n,Tsamp);
Ydemean=zeros(Tsamp,n);
C0=zeros(n,n);
C1=zeros(n,n);
C2=zeros(n,n);
for j=1:n
Ydemean(:,j)=Y(:,j)-mean(Y(:,j));
end
Fhat=diag(Ydemean'*Ydemean)/Tsamp;
% we need ht, where t=1,...,T
for t=1:Tsamp
hmat(:,t)=diag(Ydemean(t,:)'*Ydemean(t,:))-Fhat;
end
% To calculate Shat we need C0, C1 and C2
for t=1:Tsamp
C0=C0+1/Tsamp*hmat(:,t)*hmat(:,t)';
end
for t=2:Tsamp
C1=C1+1/(Tsamp-1)*hmat(:,t)*hmat(:,t-1)';
end
for t=3:Tsamp
C2=C2+1/(Tsamp-2)*hmat(:,t)*hmat(:,t-2)';
end
% Finally, we have the sampling uncertainty measure Shat:
Shat=C0 +(1-1/(2+1))*(C1+C1')...
+(1-2/(2+1))*(C2+C2');
% Model variances below:
mf=BayesInfo.mf1;
II=eye(size(Pstar,2));
Z=II(mf,:);
% This is Ftheta, variance of model variables, given param vector theta:
Ftheta=diag(Z*Pstar(:,mf)); % +H;
% below commented out line is for Del Negro Schorfheide style priors:
% lnpriormom=-.5*n*TT*log(2*pi)-.5*TT*log(det(sigma))-.5*TT*trace(inv(sigma)*(gamyy-2*phi'*gamxy+phi'*gamxx*phi));
lnpriormom=.5*n*log(Tsamp/(2*pi))-.5*log(det(Shat))-.5*Tsamp*(Fhat-Ftheta)'/Shat*(Fhat-Ftheta);

View File

@ -131,7 +131,7 @@ clear('priordens')
oo.Smoother.SteadyState = ys;
oo.Smoother.TrendCoeffs = trend_coeff;
if options_.filter_covariance
oo.Smoother.variance = P;
oo.Smoother.Variance = P;
end
i_endo = bayestopt_.smoother_saved_var_list;
if options_.nk ~= 0

View File

@ -146,7 +146,7 @@ dynare_graph_close;
% saving results
save_results(yf_mean,'oo_.forecast.mean.',var_list);
save_results(yf_mean,'oo_.forecast.Mean.',var_list);
save_results(yf1(:,:,k1(1)),'oo_.forecast.HPDinf.',var_list);
save_results(yf1(:,:,k1(2)),'oo_.forecast.HPDsup.',var_list);
save_results(yf2(:,:,k2(1)),'oo_.forecast.HPDTotalinf.',var_list);

View File

@ -552,6 +552,9 @@ options_.graph_save_formats.fig = 0;
% risky steady state
options_.risky_steadystate = 0;
% endogenous prior
options_.endogenous_prior = 0;
% use GPU
options_.gpu = 0;

View File

@ -39,7 +39,7 @@ for j=1:nvar
if ymax-ymin < 1e-6
continue
end
fhandle = dyn_figure(DynareOptions,'Name',endo_names(i_var(j),:));
fhandle = dyn_figure(DynareOptions,'Name',['Shock decomposition: ',endo_names(i_var(j),:)]);
ax=axes('Position',[0.1 0.1 0.6 0.8]);
axis(ax,[xmin xmax ymin ymax]);
plot(ax,x(2:end),z1(end,:),'k-','LineWidth',2)
@ -79,6 +79,6 @@ for j=1:nvar
y1 = y1 + height;
end
dyn_saveas(fhandle,[DynareModel.fname '_shock_decomposition_' endo_names(i_var(j),:)],DynareOptions);
dyn_saveas(fhandle,[DynareModel.fname,'_shock_decomposition_',deblank(endo_names(i_var(j),:))],DynareOptions);
hold off
end

View File

@ -131,6 +131,7 @@ if info(1)==0,
options_.irf = 0;
options_.noprint = 1;
options_.order = 1;
options_.SpectralDensity.trigger = 0;
options_.periods = data_info.info.ntobs+100;
if options_.kalman_algo > 2,
options_.kalman_algo = 1;

View File

@ -203,7 +203,7 @@ forecasts.controled_variables = constrained_vars;
forecasts.instruments = options_cond_fcst.controlled_varexo;
for i = 1:EndoSize
eval(['forecasts.cond.mean.' deblank(M_.endo_names(oo_.dr.order_var(i),:)) ' = mFORCS1(i,:)'';']);
eval(['forecasts.cond.Mean.' deblank(M_.endo_names(oo_.dr.order_var(i),:)) ' = mFORCS1(i,:)'';']);
tmp = sort(squeeze(FORCS1(i,:,:))');
eval(['forecasts.cond.ci.' deblank(M_.endo_names(oo_.dr.order_var(i),:)) ...
' = [tmp(t1,:)'' ,tmp(t2,:)'' ]'';']);
@ -227,7 +227,7 @@ end
mFORCS2 = mean(FORCS2,3);
for i = 1:EndoSize
eval(['forecasts.uncond.mean.' deblank(M_.endo_names(oo_.dr.order_var(i),:)) ' = mFORCS2(i,:)'';']);
eval(['forecasts.uncond.Mean.' deblank(M_.endo_names(oo_.dr.order_var(i),:)) ' = mFORCS2(i,:)'';']);
tmp = sort(squeeze(FORCS2(i,:,:))');
eval(['forecasts.uncond.ci.' deblank(M_.endo_names(oo_.dr.order_var(i),:)) ...
' = [tmp(t1,:)'' ,tmp(t2,:)'' ]'';']);

View File

@ -1,4 +1,4 @@
function independent_metropolis_hastings(TargetFun,ProposalFun,xparam1,vv,mh_bounds,varargin)
function record=independent_metropolis_hastings(TargetFun,ProposalFun,xparam1,vv,mh_bounds,varargin)
% Independent Metropolis-Hastings algorithm.
%
@ -11,7 +11,7 @@ function independent_metropolis_hastings(TargetFun,ProposalFun,xparam1,vv,mh_bou
% o varargin list of argument following mh_bounds
%
% OUTPUTS
% None
% o record [struct] structure describing the iterations
%
% ALGORITHM
% Metropolis-Hastings.
@ -103,7 +103,7 @@ else
[fout, nBlockPerCPU, totCPU] = masterParallel(options_.parallel, fblck, nblck,NamFileInput,'independent_metropolis_hastings_core', localVars, globalVars, options_.parallel_info);
for j=1:totCPU,
offset = sum(nBlockPerCPU(1:j-1))+fblck-1;
record.LastLogLiK(offset+1:sum(nBlockPerCPU(1:j)))=fout(j).record.LastLogLiK(offset+1:sum(nBlockPerCPU(1:j)));
record.LastLogPost(offset+1:sum(nBlockPerCPU(1:j)))=fout(j).record.LastLogPost(offset+1:sum(nBlockPerCPU(1:j)));
record.LastParameters(offset+1:sum(nBlockPerCPU(1:j)),:)=fout(j).record.LastParameters(offset+1:sum(nBlockPerCPU(1:j)),:);
record.AcceptationRates(offset+1:sum(nBlockPerCPU(1:j)))=fout(j).record.AcceptationRates(offset+1:sum(nBlockPerCPU(1:j)));
record.Seeds(offset+1:sum(nBlockPerCPU(1:j)))=fout(j).record.Seeds(offset+1:sum(nBlockPerCPU(1:j)));

View File

@ -229,7 +229,7 @@ for b = fblck:nblck,
jsux = 0;
if j == nruns(b) % I record the last draw...
record.LastParameters(b,:) = x2(end,:);
record.LastLogLiK(b) = logpo2(end);
record.LastLogPost(b) = logpo2(end);
end
% size of next file in chain b
InitSizeArray(b) = min(nruns(b)-j,MAX_nruns);

View File

@ -194,7 +194,7 @@ if ~options_.load_mh_file && ~options_.mh_recover
record.InitialParameters = ix2;
record.InitialLogLiK = ilogpo2;
record.LastParameters = zeros(nblck,npar);
record.LastLogLiK = zeros(nblck,1);
record.LastLogPost = zeros(nblck,1);
record.LastFileNumber = AnticipatedNumberOfFiles ;
record.LastLineNumber = AnticipatedNumberOfLinesInTheLastFile;
save([MhDirectoryName '/' ModelName '_mh_history.mat'],'record');
@ -253,7 +253,7 @@ elseif options_.load_mh_file && ~options_.mh_recover
else
NewFile = ones(nblck,1)*(LastFileNumber+1);
end
ilogpo2 = record.LastLogLiK;
ilogpo2 = record.LastLogPost;
ix2 = record.LastParameters;
fblck = 1;
fline = ones(nblck,1)*(LastLineNumber+1);
@ -292,7 +292,7 @@ elseif options_.mh_recover
end
%% Default initialization:
if OldMh
ilogpo2 = record.LastLogLiK;
ilogpo2 = record.LastLogPost;
ix2 = record.LastParameters;
else
ilogpo2 = record.InitialLogLiK;

View File

@ -91,7 +91,7 @@ for plt = 1:nbplt,
NAMES = [];
TeXNAMES = [];
end
hh = dyn_figure(DynareOptions,'Name','Check plots');
hh = dyn_figure(DynareOptions,'Name','Mode check plots');
for k=1:min(nstar,length(x)-(plt-1)*nstar)
subplot(nr,nc,k)
kk = (plt-1)*nstar+k;

View File

@ -35,14 +35,14 @@ end
load conditional_forecasts;
if nargin==1 || isempty(periods) % Set default number of periods.
eval(['periods = length(forecasts.cond.mean.' Variables(1,:) ');']);
eval(['periods = length(forecasts.cond.Mean.' Variables(1,:) ');']);
end
for i=1:size(Variables,1)
eval(['ci1 = forecasts.cond.ci.' Variables(i,:) ';'])
eval(['m1 = forecasts.cond.mean.' Variables(i,:) ';'])
eval(['m1 = forecasts.cond.Mean.' Variables(i,:) ';'])
eval(['ci2 = forecasts.uncond.ci.' Variables(i,:) ';'])
eval(['m2 = forecasts.uncond.mean.' Variables(i,:) ';'])
eval(['m2 = forecasts.uncond.Mean.' Variables(i,:) ';'])
build_figure(Variables(i,:),ci1(:,1:periods),ci2(:,1:periods),m1(1:periods),m2(1:periods),options_);
end

View File

@ -148,7 +148,7 @@ else
[fout, nBlockPerCPU, totCPU] = masterParallel(options_.parallel, fblck, nblck,NamFileInput,'random_walk_metropolis_hastings_core', localVars, globalVars, options_.parallel_info);
for j=1:totCPU,
offset = sum(nBlockPerCPU(1:j-1))+fblck-1;
record.LastLogLiK(offset+1:sum(nBlockPerCPU(1:j)))=fout(j).record.LastLogLiK(offset+1:sum(nBlockPerCPU(1:j)));
record.LastLogPost(offset+1:sum(nBlockPerCPU(1:j)))=fout(j).record.LastLogPost(offset+1:sum(nBlockPerCPU(1:j)));
record.LastParameters(offset+1:sum(nBlockPerCPU(1:j)),:)=fout(j).record.LastParameters(offset+1:sum(nBlockPerCPU(1:j)),:);
record.AcceptationRates(offset+1:sum(nBlockPerCPU(1:j)))=fout(j).record.AcceptationRates(offset+1:sum(nBlockPerCPU(1:j)));
record.Seeds(offset+1:sum(nBlockPerCPU(1:j)))=fout(j).record.Seeds(offset+1:sum(nBlockPerCPU(1:j)));

View File

@ -247,7 +247,7 @@ for b = fblck:nblck,
jsux = 0;
if j == nruns(b) % I record the last draw...
record.LastParameters(b,:) = x2(end,:);
record.LastLogLiK(b) = logpo2(end);
record.LastLogPost(b) = logpo2(end);
end
% size of next file in chain b
InitSizeArray(b) = min(nruns(b)-j,MAX_nruns);

View File

@ -74,7 +74,7 @@ elseif rplottype == 1
for j = 1:size(y,1)
figure ;
plot(ix(i),y(j,i)) ;
title(['Plot of ' s1(:,j)]) ;
title(['Plot of ' s1(j,:)],'Interpreter','none') ;
xlabel('Periods') ;
end
elseif rplottype == 2
@ -87,8 +87,8 @@ elseif rplottype == 2
hold on ;
plot(ix(i),oo_.steady_state(j)*ones(1,size(i,1)),'w:') ;
xlabel('Periods') ;
ylabel([s1(:,j)]) ;
title(['Plot of ' s1(:,j)]) ;
ylabel([s1(j,:)],'Interpreter','none') ;
title(['Plot of ' s1(j,:)],'Interpreter','none') ;
end
end

View File

@ -142,6 +142,9 @@ if options_.nomoments == 0
elseif options_.periods == 0
% There is no code for theoretical moments at 3rd order
if options_.order <= 2
if options_.order == 2
warning('You have requested a second order approximation, but variance decompositions currently only allow for first order. Displaying decompositions at order=1 instead.')
end
disp_th_moments(oo_.dr,var_list);
end
else

View File

@ -44,7 +44,7 @@ if isfield(oo_, [ TYPE 'TheoreticalMoments'])
if isfield(temporary_structure,'dsge')
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge;'])
if isfield(temporary_structure,'VarianceDecomposition')
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.mean;'])
eval(['temporary_structure = oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.Mean;'])
if isfield(temporary_structure,name)
% Nothing to do.
return
@ -82,10 +82,10 @@ else
[p_mean, p_median, p_var, hpd_interval, p_deciles, density] = ...
posterior_moments(tmp,1,mh_conf_sig);
end
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.mean.' name ' = p_mean;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.median.' name ' = p_median;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.variance.' name ' = p_var;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.hpdinf.' name ' = hpd_interval(1);']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.hpdsup.' name ' = hpd_interval(2);']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.Mean.' name ' = p_mean;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.Median.' name ' = p_median;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.Variance.' name ' = p_var;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.HPDinf.' name ' = hpd_interval(1);']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.HPDsup.' name ' = hpd_interval(2);']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.deciles.' name ' = p_deciles;']);
eval(['oo_.' TYPE 'TheoreticalMoments.dsge.VarianceDecomposition.density.' name ' = density;']);