Remove objective_function_penalty_base

time-shift
Johannes Pfeifer 2016-06-01 18:22:51 +02:00 committed by Stéphane Adjemian (Lupi)
parent 329b91d717
commit 0c00e28799
22 changed files with 342 additions and 315 deletions

View File

@ -40,7 +40,7 @@ if isempty(varargin) || ( isequal(length(varargin), 1) && isequal(varargin{1},'h
return return
end end
global options_ M_ estim_params_ bayestopt_ oo_ objective_function_penalty_base global options_ M_ estim_params_ bayestopt_ oo_
donesomething = false; donesomething = false;

View File

@ -55,9 +55,9 @@ function [fval,info,exit_flag,DLIK,Hess,SteadyState,trend_coeff,Model,DynareOpti
%! @item info==19 %! @item info==19
%! The steadystate routine thrown an exception (inconsistent deep parameters). %! The steadystate routine thrown an exception (inconsistent deep parameters).
%! @item info==20 %! @item info==20
%! Cannot find the steady state, info(2) contains the sum of square residuals (of the static equations). %! Cannot find the steady state, info(4) contains the sum of square residuals (of the static equations).
%! @item info==21 %! @item info==21
%! The steady state is complex, info(2) contains the sum of square of imaginary parts of the steady state. %! The steady state is complex, info(4) contains the sum of square of imaginary parts of the steady state.
%! @item info==22 %! @item info==22
%! The steady has NaNs. %! The steady has NaNs.
%! @item info==23 %! @item info==23
@ -134,14 +134,12 @@ function [fval,info,exit_flag,DLIK,Hess,SteadyState,trend_coeff,Model,DynareOpti
% AUTHOR(S) stephane DOT adjemian AT univ DASH lemans DOT FR % AUTHOR(S) stephane DOT adjemian AT univ DASH lemans DOT FR
global objective_function_penalty_base
% Initialization of the returned variables and others... % Initialization of the returned variables and others...
fval = []; fval = [];
SteadyState = []; SteadyState = [];
trend_coeff = []; trend_coeff = [];
exit_flag = 1; exit_flag = 1;
info = 0; info = zeros(4,1);
DLIK = []; DLIK = [];
Hess = []; Hess = [];
@ -181,9 +179,10 @@ end
% Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain. % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
if ~isequal(DynareOptions.mode_compute,1) && any(xparam1<BoundsInfo.lb) if ~isequal(DynareOptions.mode_compute,1) && any(xparam1<BoundsInfo.lb)
k = find(xparam1<BoundsInfo.lb); k = find(xparam1<BoundsInfo.lb);
fval = objective_function_penalty_base+sum((BoundsInfo.lb(k)-xparam1(k)).^2); fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 41; info(1) = 41;
info(4)= sum((BoundsInfo.lb(k)-xparam1(k)).^2);
if analytic_derivation, if analytic_derivation,
DLIK=ones(length(xparam1),1); DLIK=ones(length(xparam1),1);
end end
@ -193,9 +192,10 @@ end
% Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain. % Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
if ~isequal(DynareOptions.mode_compute,1) && any(xparam1>BoundsInfo.ub) if ~isequal(DynareOptions.mode_compute,1) && any(xparam1>BoundsInfo.ub)
k = find(xparam1>BoundsInfo.ub); k = find(xparam1>BoundsInfo.ub);
fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2); fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 42; info(1) = 42;
info(4)= sum((xparam1(k)-BoundsInfo.ub(k)).^2);
if analytic_derivation, if analytic_derivation,
DLIK=ones(length(xparam1),1); DLIK=ones(length(xparam1),1);
end end
@ -212,18 +212,20 @@ H = Model.H;
if ~issquare(Q) || EstimatedParameters.ncx || isfield(EstimatedParameters,'calibrated_covariances') if ~issquare(Q) || EstimatedParameters.ncx || isfield(EstimatedParameters,'calibrated_covariances')
[Q_is_positive_definite, penalty] = ispd(Q(EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness,EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness)); [Q_is_positive_definite, penalty] = ispd(Q(EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness,EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness));
if ~Q_is_positive_definite if ~Q_is_positive_definite
fval = objective_function_penalty_base+penalty; fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 43; info(1) = 43;
info(4) = penalty;
return return
end end
if isfield(EstimatedParameters,'calibrated_covariances') if isfield(EstimatedParameters,'calibrated_covariances')
correct_flag=check_consistency_covariances(Q); correct_flag=check_consistency_covariances(Q);
if ~correct_flag if ~correct_flag
penalty = sum(Q(EstimatedParameters.calibrated_covariances.position).^2); penalty = sum(Q(EstimatedParameters.calibrated_covariances.position).^2);
fval = objective_function_penalty_base+penalty; fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 71; info(1) = 71;
info(4) = penalty;
return return
end end
end end
@ -233,18 +235,20 @@ end
if ~issquare(H) || EstimatedParameters.ncn || isfield(EstimatedParameters,'calibrated_covariances_ME') if ~issquare(H) || EstimatedParameters.ncn || isfield(EstimatedParameters,'calibrated_covariances_ME')
[H_is_positive_definite, penalty] = ispd(H(EstimatedParameters.H_entries_to_check_for_positive_definiteness,EstimatedParameters.H_entries_to_check_for_positive_definiteness)); [H_is_positive_definite, penalty] = ispd(H(EstimatedParameters.H_entries_to_check_for_positive_definiteness,EstimatedParameters.H_entries_to_check_for_positive_definiteness));
if ~H_is_positive_definite if ~H_is_positive_definite
fval = objective_function_penalty_base+penalty; fval = Inf;
info(1) = 44;
info(4) = penalty;
exit_flag = 0; exit_flag = 0;
info = 44;
return return
end end
if isfield(EstimatedParameters,'calibrated_covariances_ME') if isfield(EstimatedParameters,'calibrated_covariances_ME')
correct_flag=check_consistency_covariances(H); correct_flag=check_consistency_covariances(H);
if ~correct_flag if ~correct_flag
penalty = sum(H(EstimatedParameters.calibrated_covariances_ME.position).^2); penalty = sum(H(EstimatedParameters.calibrated_covariances_ME.position).^2);
fval = objective_function_penalty_base+penalty; fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 72; info(1) = 72;
info(4) = penalty;
return return
end end
end end
@ -259,30 +263,36 @@ end
[T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict'); [T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
% Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol). % Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) == 8 || info(1) == 9 || ... if info(1)
info(1) == 22 || info(1) == 24 || info(1) == 19 || info(1) == 25 || info(1) == 10 if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ...
fval = objective_function_penalty_base+1; info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ...
info = info(1); info(1) == 81 || info(1) == 84 || info(1) == 85
exit_flag = 0; %meaningful second entry of output that can be used
if analytic_derivation, fval = Inf;
DLIK=ones(length(xparam1),1); info(4) = info(2);
exit_flag = 0;
if analytic_derivation,
DLIK=ones(length(xparam1),1);
end
return
else
fval = Inf;
info(4) = 0.1;
exit_flag = 0;
if analytic_derivation,
DLIK=ones(length(xparam1),1);
end
return
end end
return
elseif info(1) == 3 || info(1) == 4 || info(1)==6 || info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1)==26
fval = objective_function_penalty_base+info(2);
info = info(1);
exit_flag = 0;
if analytic_derivation,
DLIK=ones(length(xparam1),1);
end
return
end end
% check endogenous prior restrictions % check endogenous prior restrictions
info=endogenous_prior_restrictions(T,R,Model,DynareOptions,DynareResults); info=endogenous_prior_restrictions(T,R,Model,DynareOptions,DynareResults);
if info(1), if info(1),
fval = objective_function_penalty_base+info(2); fval = Inf;
info = info(1); info(4)=info(2);
exit_flag = 0; exit_flag = 0;
if analytic_derivation, if analytic_derivation,
DLIK=ones(length(xparam1),1); DLIK=ones(length(xparam1),1);
@ -458,8 +468,9 @@ switch DynareOptions.lik_init
diffuse_periods = size(dlik,1); diffuse_periods = size(dlik,1);
end end
if isnan(dLIK), if isnan(dLIK),
info = 45; fval = Inf;
fval = objective_function_penalty_base + 100; info(1) = 45;
info(4) = 0.1;
exit_flag = 0; exit_flag = 0;
return return
end end
@ -698,8 +709,9 @@ if ((kalman_algo==1) || (kalman_algo==3))% Multivariate Kalman Filter
end end
else else
if isinf(LIK) if isinf(LIK)
info = 66; fval = Inf;
fval = objective_function_penalty_base+1; info(1) = 66;
info(4) = 0.1;
exit_flag = 0; exit_flag = 0;
return return
end end
@ -707,7 +719,7 @@ if ((kalman_algo==1) || (kalman_algo==3))% Multivariate Kalman Filter
else else
if DynareOptions.lik_init==3 if DynareOptions.lik_init==3
LIK = LIK + dLIK; LIK = LIK + dLIK;
if analytic_derivation==0 && nargout==2, if analytic_derivation==0 && nargout>3,
if ~singular_diffuse_filter, if ~singular_diffuse_filter,
lik = [dlik; lik]; lik = [dlik; lik];
else else
@ -771,7 +783,7 @@ if (kalman_algo==2) || (kalman_algo==4)
end end
if DynareOptions.lik_init==3 if DynareOptions.lik_init==3
LIK = LIK+dLIK; LIK = LIK+dLIK;
if analytic_derivation==0 && nargout==2, if analytic_derivation==0 && nargout>3,
lik = [dlik; lik]; lik = [dlik; lik];
end end
end end
@ -797,15 +809,17 @@ if analytic_derivation
end end
if isnan(LIK) if isnan(LIK)
info = 45; fval = Inf;
fval = objective_function_penalty_base + 100; info(1) = 45;
info(4) = 0.1;
exit_flag = 0; exit_flag = 0;
return return
end end
if imag(LIK)~=0 if imag(LIK)~=0
info = 46; fval = Inf;
fval = objective_function_penalty_base + 100; info(1) = 46;
info(4) = 0.1;
exit_flag = 0; exit_flag = 0;
return return
end end
@ -851,15 +865,17 @@ if DynareOptions.prior_restrictions.status
end end
if isnan(fval) if isnan(fval)
info = 47; fval = Inf;
fval = objective_function_penalty_base + 100; info(1) = 47;
info(4) = 0.1;
exit_flag = 0; exit_flag = 0;
return return
end end
if imag(fval)~=0 if imag(fval)~=0
info = 48; fval = Inf;
fval = objective_function_penalty_base + 100; info(1) = 48;
info(4) = 0.1;
exit_flag = 0; exit_flag = 0;
return return
end end
@ -867,7 +883,7 @@ end
% Update DynareOptions.kalman_algo. % Update DynareOptions.kalman_algo.
DynareOptions.kalman_algo = kalman_algo; DynareOptions.kalman_algo = kalman_algo;
if analytic_derivation==0 && nargout==2, if analytic_derivation==0 && nargout>3,
lik=lik(start:end,:); lik=lik(start:end,:);
DLIK=[-lnprior; lik(:)]; DLIK=[-lnprior; lik(:)];
end end

View File

@ -42,13 +42,12 @@ function [fval,info,exit_flag,grad,hess,SteadyState,trend_coeff,PHI,SIGMAu,iXX,p
% You should have received a copy of the GNU General Public License % You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>. % along with Dynare. If not, see <http://www.gnu.org/licenses/>.
global objective_function_penalty_base
persistent dsge_prior_weight_idx persistent dsge_prior_weight_idx
grad=[]; grad=[];
hess=[]; hess=[];
exit_flag = []; exit_flag = [];
info = []; info = zeros(4,1);
PHI = []; PHI = [];
SIGMAu = []; SIGMAu = [];
iXX = []; iXX = [];
@ -92,18 +91,20 @@ exit_flag = 1;
% Return, with endogenous penalty, if some dsge-parameters are smaller than the lower bound of the prior domain. % Return, with endogenous penalty, if some dsge-parameters are smaller than the lower bound of the prior domain.
if DynareOptions.mode_compute ~= 1 && any(xparam1 < BoundsInfo.lb) if DynareOptions.mode_compute ~= 1 && any(xparam1 < BoundsInfo.lb)
k = find(xparam1 < BoundsInfo.lb); k = find(xparam1 < BoundsInfo.lb);
fval = objective_function_penalty_base+sum((BoundsInfo.lb(k)-xparam1(k)).^2); fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 41; info(1) = 41;
info(4)= sum((BoundsInfo.lb(k)-xparam1(k)).^2);
return; return;
end end
% Return, with endogenous penalty, if some dsge-parameters are greater than the upper bound of the prior domain. % Return, with endogenous penalty, if some dsge-parameters are greater than the upper bound of the prior domain.
if DynareOptions.mode_compute ~= 1 && any(xparam1 > BoundsInfo.ub) if DynareOptions.mode_compute ~= 1 && any(xparam1 > BoundsInfo.ub)
k = find(xparam1 > BoundsInfo.ub); k = find(xparam1 > BoundsInfo.ub);
fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2); fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 42; info(1) = 42;
info(4) = sum((xparam1(k)-BoundsInfo.ub(k)).^2);
return; return;
end end
@ -124,11 +125,12 @@ dsge_prior_weight = Model.params(dsge_prior_weight_idx);
% Is the dsge prior proper? % Is the dsge prior proper?
if dsge_prior_weight<(NumberOfParameters+NumberOfObservedVariables)/NumberOfObservations; if dsge_prior_weight<(NumberOfParameters+NumberOfObservedVariables)/NumberOfObservations;
fval = objective_function_penalty_base+abs(NumberOfObservations*dsge_prior_weight-(NumberOfParameters+NumberOfObservedVariables)); fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 51; info(1) = 51;
info(2)=dsge_prior_weight; info(2)=dsge_prior_weight;
info(3)=(NumberOfParameters+NumberOfObservedVariables)/DynareDataset.nobs; info(3)=(NumberOfParameters+NumberOfObservedVariables)/DynareDataset.nobs;
info(4)=abs(NumberOfObservations*dsge_prior_weight-(NumberOfParameters+NumberOfObservedVariables));
return return
end end
@ -141,17 +143,21 @@ end
[T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict'); [T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
% Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol). % Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) == 8 || ... if info(1)
info(1) == 22 || info(1) == 24 || info(1) == 25 || info(1) == 10 if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ...
fval = objective_function_penalty_base+1; info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ...
info = info(1); info(1) == 81 || info(1) == 84 || info(1) == 85
exit_flag = 0; %meaningful second entry of output that can be used
return fval = Inf;
elseif info(1) == 3 || info(1) == 4 || info(1) == 6 || info(1) == 19 || info(1) == 20 || info(1) == 21 info(4) = info(2);
fval = objective_function_penalty_base+info(2); exit_flag = 0;
info = info(1); return
exit_flag = 0; else
return fval = Inf;
info(4) = 0.1;
exit_flag = 0;
return
end
end end
% Define the mean/steady state vector. % Define the mean/steady state vector.
@ -218,8 +224,9 @@ if ~isinf(dsge_prior_weight)% Evaluation of the likelihood of the dsge-var model
SIGMAu = tmp0 - tmp1*tmp2*tmp1'; clear('tmp0'); SIGMAu = tmp0 - tmp1*tmp2*tmp1'; clear('tmp0');
[SIGMAu_is_positive_definite, penalty] = ispd(SIGMAu); [SIGMAu_is_positive_definite, penalty] = ispd(SIGMAu);
if ~SIGMAu_is_positive_definite if ~SIGMAu_is_positive_definite
fval = objective_function_penalty_base + penalty; fval = Inf;
info = 52; info(1) = 52;
info(4) = penalty;
exit_flag = 0; exit_flag = 0;
return; return;
end end
@ -249,15 +256,17 @@ else% Evaluation of the likelihood of the dsge-var model when the dsge prior wei
end end
if isnan(lik) if isnan(lik)
info = 45; fval = Inf;
fval = objective_function_penalty_base + 100; info(1) = 45;
info(4) = 0.1;
exit_flag = 0; exit_flag = 0;
return return
end end
if imag(lik)~=0 if imag(lik)~=0
info = 46; fval = Inf;
fval = objective_function_penalty_base + 100; info(1) = 46;
info(4) = 0.1;
exit_flag = 0; exit_flag = 0;
return return
end end
@ -267,15 +276,17 @@ lnprior = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo
fval = (lik-lnprior); fval = (lik-lnprior);
if isnan(fval) if isnan(fval)
info = 47; fval = Inf;
fval = objective_function_penalty_base + 100; info(1) = 47;
info(4) = 0.1;
exit_flag = 0; exit_flag = 0;
return return
end end
if imag(fval)~=0 if imag(fval)~=0
info = 48; fval = Inf;
fval = objective_function_penalty_base + 100; info(1) = 48;
info(4) = 0.1;
exit_flag = 0; exit_flag = 0;
return return
end end

View File

@ -49,8 +49,6 @@ function [dataset_, dataset_info, xparam1, hh, M_, options_, oo_, estim_params_,
% You should have received a copy of the GNU General Public License % You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>. % along with Dynare. If not, see <http://www.gnu.org/licenses/>.
global objective_function_penalty_base
hh = []; hh = [];
if isempty(gsa_flag) if isempty(gsa_flag)
@ -384,9 +382,6 @@ else% Yes!
end end
end end
% Set the "size" of penalty.
objective_function_penalty_base = 1e8;
% Get informations about the variables of the model. % Get informations about the variables of the model.
dr = set_state_space(oo_.dr,M_,options_); dr = set_state_space(oo_.dr,M_,options_);
oo_.dr = dr; oo_.dr = dr;

View File

@ -1,4 +1,4 @@
function [fval,fake_1, fake_2, exit_flag ] = minus_logged_prior_density(xparams,pshape,p6,p7,p3,p4,DynareOptions,DynareModel,EstimatedParams,DynareResults) function [fval,info,exit_flag,fake_1,fake_2] = minus_logged_prior_density(xparams,pshape,p6,p7,p3,p4,DynareOptions,DynareModel,EstimatedParams,DynareResults)
% Evaluates minus the logged prior density. % Evaluates minus the logged prior density.
% %
% INPUTS % INPUTS
@ -11,8 +11,9 @@ function [fval,fake_1, fake_2, exit_flag ] = minus_logged_prior_density(xparams,
% %
% OUTPUTS % OUTPUTS
% f [double] value of minus the logged prior density. % f [double] value of minus the logged prior density.
% info [double] vector: second entry stores penalty, first entry the error code.
% Copyright (C) 2009-2012 Dynare Team %
% Copyright (C) 2009-2016 Dynare Team
% %
% This file is part of Dynare. % This file is part of Dynare.
% %
@ -29,8 +30,6 @@ function [fval,fake_1, fake_2, exit_flag ] = minus_logged_prior_density(xparams,
% You should have received a copy of the GNU General Public License % You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>. % along with Dynare. If not, see <http://www.gnu.org/licenses/>.
global objective_function_penalty_base
fake_1 = 1; fake_1 = 1;
fake_2 = 1; fake_2 = 1;
@ -44,18 +43,20 @@ info = 0;
% Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain. % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
if ~isequal(DynareOptions.mode_compute,1) && any(xparams<p3) if ~isequal(DynareOptions.mode_compute,1) && any(xparams<p3)
k = find(xparams<p3); k = find(xparams<p3);
fval = objective_function_penalty_base+sum((p3(k)-xparams(k)).^2); fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 41; info(1) = 41;
info(2) = sum((p3(k)-xparams(k)).^2);
return return
end end
% Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain. % Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
if ~isequal(DynareOptions.mode_compute,1) && any(xparams>p4) if ~isequal(DynareOptions.mode_compute,1) && any(xparams>p4)
k = find(xparams>p4); k = find(xparams>p4);
fval = objective_function_penalty_base+sum((xparams(k)-p4(k)).^2); fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 42; info(1) = 42;
info(2) = sum((xparams(k)-p4(k)).^2);
return return
end end
@ -71,18 +72,20 @@ if ~issquare(Q) || EstimatedParams.ncx || isfield(EstimatedParams,'calibrated_co
[Q_is_positive_definite, penalty] = ispd(Q); [Q_is_positive_definite, penalty] = ispd(Q);
if ~Q_is_positive_definite if ~Q_is_positive_definite
% The variance-covariance matrix of the structural innovations is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty. % The variance-covariance matrix of the structural innovations is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty.
fval = objective_function_penalty_base+penalty; fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 43; info(1) = 43;
info(2) = penalty;
return return
end end
if isfield(EstimatedParams,'calibrated_covariances') if isfield(EstimatedParams,'calibrated_covariances')
correct_flag=check_consistency_covariances(Q); correct_flag=check_consistency_covariances(Q);
if ~correct_flag if ~correct_flag
penalty = sum(Q(EstimatedParams.calibrated_covariances.position).^2); penalty = sum(Q(EstimatedParams.calibrated_covariances.position).^2);
fval = objective_function_penalty_base+penalty; fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 71; info(1) = 71;
info(2) = penalty;
return return
end end
end end
@ -94,18 +97,20 @@ if ~issquare(H) || EstimatedParams.ncn || isfield(EstimatedParams,'calibrated_co
[H_is_positive_definite, penalty] = ispd(H); [H_is_positive_definite, penalty] = ispd(H);
if ~H_is_positive_definite if ~H_is_positive_definite
% The variance-covariance matrix of the measurement errors is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty. % The variance-covariance matrix of the measurement errors is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty.
fval = objective_function_penalty_base+penalty; fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 44; info(1) = 44;
info(2) = penalty;
return return
end end
if isfield(EstimatedParams,'calibrated_covariances_ME') if isfield(EstimatedParams,'calibrated_covariances_ME')
correct_flag=check_consistency_covariances(H); correct_flag=check_consistency_covariances(H);
if ~correct_flag if ~correct_flag
penalty = sum(H(EstimatedParams.calibrated_covariances_ME.position).^2); penalty = sum(H(EstimatedParams.calibrated_covariances_ME.position).^2);
fval = objective_function_penalty_base+penalty; fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 72; info(1) = 72;
info(2) = penalty;
return return
end end
end end
@ -120,18 +125,24 @@ M_ = set_all_parameters(xparams,EstimatedParams,DynareModel);
[dr,info,DynareModel,DynareOptions,DynareResults] = resol(0,DynareModel,DynareOptions,DynareResults); [dr,info,DynareModel,DynareOptions,DynareResults] = resol(0,DynareModel,DynareOptions,DynareResults);
% Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol). % Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) ... % Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
== 8 || info(1) == 22 || info(1) == 24 || info(1) == 19 if info(1)
fval = objective_function_penalty_base+1; if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ...
info = info(1); info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ...
exit_flag = 0; info(1) == 81 || info(1) == 84 || info(1) == 85
return %meaningful second entry of output that can be used
elseif info(1) == 3 || info(1) == 4 || info(1)==6 || info(1) == 20 || info(1) == 21 || info(1) == 23 fval = Inf;
fval = objective_function_penalty_base+info(2); info(4) = info(2);
info = info(1); exit_flag = 0;
exit_flag = 0; return
return else
fval = Inf;
info(4) = 0.1;
exit_flag = 0;
return
end
end end
fval = - priordens(xparams,pshape,p6,p7,p3,p4); fval = - priordens(xparams,pshape,p6,p7,p3,p4);

View File

@ -32,7 +32,7 @@ function [fval,info,exit_flag,DLIK,Hess,ys,trend_coeff,Model,DynareOptions,Bayes
%! @item fval %! @item fval
%! Double scalar, value of (minus) the likelihood. %! Double scalar, value of (minus) the likelihood.
%! @item info %! @item info
%! Double vector, second entry stores penalty, first entry the error code. %! Double vector, fourth entry stores penalty, first entry the error code.
%! @table @ @code %! @table @ @code
%! @item info==0 %! @item info==0
%! No error. %! No error.
@ -127,7 +127,6 @@ function [fval,info,exit_flag,DLIK,Hess,ys,trend_coeff,Model,DynareOptions,Bayes
% AUTHOR(S) stephane DOT adjemian AT univ DASH lemans DOT fr % AUTHOR(S) stephane DOT adjemian AT univ DASH lemans DOT fr
% frederic DOT karame AT univ DASH lemans DOT fr % frederic DOT karame AT univ DASH lemans DOT fr
global objective_function_penalty_base
% Declaration of the penalty as a persistent variable. % Declaration of the penalty as a persistent variable.
persistent init_flag persistent init_flag
persistent restrict_variables_idx observed_variables_idx state_variables_idx mf0 mf1 persistent restrict_variables_idx observed_variables_idx state_variables_idx mf0 mf1
@ -153,18 +152,20 @@ end
% Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain. % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
if (DynareOptions.mode_compute~=1) && any(xparam1<BoundsInfo.lb) if (DynareOptions.mode_compute~=1) && any(xparam1<BoundsInfo.lb)
k = find(xparam1(:) < BoundsInfo.lb); k = find(xparam1(:) < BoundsInfo.lb);
fval = objective_function_penalty_base+sum((BoundsInfo.lb(k)-xparam1(k)).^2); fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 41; info(1) = 41;
info(4) = sum((BoundsInfo.lb(k)-xparam1(k)).^2);
return return
end end
% Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain. % Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
if (DynareOptions.mode_compute~=1) && any(xparam1>BoundsInfo.ub) if (DynareOptions.mode_compute~=1) && any(xparam1>BoundsInfo.ub)
k = find(xparam1(:)>BoundsInfo.ub); k = find(xparam1(:)>BoundsInfo.ub);
fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2); fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 42; info(1) = 42;
info(4) = sum((xparam1(k)-BoundsInfo.ub(k)).^2);
return return
end end
@ -176,18 +177,20 @@ H = Model.H;
if ~issquare(Q) || EstimatedParameters.ncx || isfield(EstimatedParameters,'calibrated_covariances') if ~issquare(Q) || EstimatedParameters.ncx || isfield(EstimatedParameters,'calibrated_covariances')
[Q_is_positive_definite, penalty] = ispd(Q(EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness,EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness)); [Q_is_positive_definite, penalty] = ispd(Q(EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness,EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness));
if ~Q_is_positive_definite if ~Q_is_positive_definite
fval = objective_function_penalty_base+penalty; fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 43; info(1) = 43;
info(4) = penalty;
return return
end end
if isfield(EstimatedParameters,'calibrated_covariances') if isfield(EstimatedParameters,'calibrated_covariances')
correct_flag=check_consistency_covariances(Q); correct_flag=check_consistency_covariances(Q);
if ~correct_flag if ~correct_flag
penalty = sum(Q(EstimatedParameters.calibrated_covariances.position).^2); penalty = sum(Q(EstimatedParameters.calibrated_covariances.position).^2);
fval = objective_function_penalty_base+penalty; fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 71; info(1) = 71;
info(4) = penalty;
return return
end end
end end
@ -197,18 +200,20 @@ end
if ~issquare(H) || EstimatedParameters.ncn || isfield(EstimatedParameters,'calibrated_covariances_ME') if ~issquare(H) || EstimatedParameters.ncn || isfield(EstimatedParameters,'calibrated_covariances_ME')
[H_is_positive_definite, penalty] = ispd(H(EstimatedParameters.H_entries_to_check_for_positive_definiteness,EstimatedParameters.H_entries_to_check_for_positive_definiteness)); [H_is_positive_definite, penalty] = ispd(H(EstimatedParameters.H_entries_to_check_for_positive_definiteness,EstimatedParameters.H_entries_to_check_for_positive_definiteness));
if ~H_is_positive_definite if ~H_is_positive_definite
fval = objective_function_penalty_base+penalty; fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 44; info(1) = 44;
info(4) = penalty;
return return
end end
if isfield(EstimatedParameters,'calibrated_covariances_ME') if isfield(EstimatedParameters,'calibrated_covariances_ME')
correct_flag=check_consistency_covariances(H); correct_flag=check_consistency_covariances(H);
if ~correct_flag if ~correct_flag
penalty = sum(H(EstimatedParameters.calibrated_covariances_ME.position).^2); penalty = sum(H(EstimatedParameters.calibrated_covariances_ME.position).^2);
fval = objective_function_penalty_base+penalty; fval = Inf;
exit_flag = 0; exit_flag = 0;
info = 72; info(1) = 72;
info(4) = penalty;
return return
end end
end end
@ -222,14 +227,21 @@ end
% Linearize the model around the deterministic sdteadystate and extract the matrices of the state equation (T and R). % Linearize the model around the deterministic sdteadystate and extract the matrices of the state equation (T and R).
[T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict'); [T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 25 || info(1) == 10 || info(1) == 7 || info(1) == 9 if info(1)
fval = objective_function_penalty_base+1; if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ...
exit_flag = 0; info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ...
return info(1) == 81 || info(1) == 84 || info(1) == 85
elseif info(1) == 3 || info(1) == 4 || info(1)==6 ||info(1) == 19 || info(1) == 20 || info(1) == 21 %meaningful second entry of output that can be used
fval = objective_function_penalty_base+info(2); fval = Inf;
exit_flag = 0; info(4) = info(2);
return exit_flag = 0;
return
else
fval = Inf;
info(4) = 0.1;
exit_flag = 0;
return
end
end end
% Define a vector of indices for the observed variables. Is this really usefull?... % Define a vector of indices for the observed variables. Is this really usefull?...
@ -321,13 +333,15 @@ DynareOptions.warning_for_steadystate = 0;
LIK = feval(DynareOptions.particle.algorithm,ReducedForm,Y,start,DynareOptions.particle,DynareOptions.threads); LIK = feval(DynareOptions.particle.algorithm,ReducedForm,Y,start,DynareOptions.particle,DynareOptions.threads);
set_dynare_random_generator_state(s1,s2); set_dynare_random_generator_state(s1,s2);
if imag(LIK) if imag(LIK)
info = 46; likelihood = Inf;
likelihood = objective_function_penalty_base; info(1) = 46;
exit_flag = 0; info(4) = 0.1;
exit_flag = 0;
elseif isnan(LIK) elseif isnan(LIK)
info = 45; likelihood = Inf;
likelihood = objective_function_penalty_base; info(1) = 45;
exit_flag = 0; info(4) = 0.1;
exit_flag = 0;
else else
likelihood = LIK; likelihood = LIK;
end end
@ -339,15 +353,17 @@ lnprior = priordens(xparam1(:),BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesI
fval = (likelihood-lnprior); fval = (likelihood-lnprior);
if isnan(fval) if isnan(fval)
info = 47; fval = Inf;
fval = objective_function_penalty_base + 100; info(1) = 47;
info(4) = 0.1;
exit_flag = 0; exit_flag = 0;
return return
end end
if imag(fval)~=0 if imag(fval)~=0
info = 48; fval = Inf;
fval = objective_function_penalty_base + 100; info(1) = 48;
info(4) = 0.1;
exit_flag = 0; exit_flag = 0;
return return
end end

View File

@ -1,24 +1,28 @@
function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,Verbose,varargin) function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,penalty,f0,g0,badg,H0,Verbose,varargin)
% [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,varargin) % [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,penalty,f0,g0,badg,H0,Verbose,varargin)
% %
% Inputs: % Inputs:
% fcn: [string] string naming the objective function to be minimized % fcn: [string] string naming the objective function to be minimized
% x0: [npar by 1] initial value of the parameter vector % x0: [npar by 1] initial value of the parameter vector
% g0: [npar by 1] initial value of the gradient vector % penalty: [scalar] variable penalty in case of failure of objective function
% H0: [npar by npar] initial value for the inverse Hessian. Must be positive definite. % f0: [scalar] initial value of the fucntion
% varargin: Optional additional inputs that get handed off to fcn each % g0: [npar by 1] initial value of the gradient vector
% time it is called. % badg [scalar] indicator for problem in gradient computation
% H0: [npar by npar] initial value for the inverse Hessian. Must be positive definite.
% Verbose: [scalar] indicator for verbose computations
% varargin: Optional additional inputs that get handed off to fcn each
% time it is called.
% Outputs: % Outputs:
% fhat: [scalar] function value at minimum % fhat: [scalar] function value at minimum
% xhat: [npar by 1] parameter vector at minimum % xhat: [npar by 1] parameter vector at minimum
% fcount [scalar] function iteration count upon termination % fcount [scalar] function iteration count upon termination
% retcode [scalar] 0: normal step % retcode [scalar] 0: normal step
% 1: zero gradient. % 1: zero gradient.
% 5: largest step still improves too fast. % 5: largest step still improves too fast.
% 2,4: back and forth adjustment of stepsize didn't finish. % 2,4: back and forth adjustment of stepsize didn't finish.
% 3: smallest stepsize still improves too slow % 3: smallest stepsize still improves too slow
% 6: no improvement found % 6: no improvement found
%--------------------- %---------------------
% Modified 7/22/96 to omit variable-length P list, for efficiency and compilation. % Modified 7/22/96 to omit variable-length P list, for efficiency and compilation.
% Places where the number of P's need to be altered or the code could be returned to % Places where the number of P's need to be altered or the code could be returned to
@ -34,7 +38,7 @@ function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,Verbose,vara
% http://sims.princeton.edu/yftp/optimize/mfiles/csminit.m % http://sims.princeton.edu/yftp/optimize/mfiles/csminit.m
% %
% Copyright (C) 1993-2007 Christopher Sims % Copyright (C) 1993-2007 Christopher Sims
% Copyright (C) 2008-2015 Dynare Team % Copyright (C) 2008-2016 Dynare Team
% %
% This file is part of Dynare. % This file is part of Dynare.
% %
@ -137,7 +141,7 @@ else
dxtest=x0+dx*lambda; dxtest=x0+dx*lambda;
end end
% home % home
f = feval(fcn,dxtest,varargin{:}); f = penalty_objective_function(dxtest,fcn,penalty,varargin{:});
%ARGLIST %ARGLIST
%f = feval(fcn,dxtest,P1,P2,P3,P4,P5,P6,P7,P8,P9,P10,P11,P12,P13); %f = feval(fcn,dxtest,P1,P2,P3,P4,P5,P6,P7,P8,P9,P10,P11,P12,P13);
% f = feval(fcn,x0+dx*lambda,P1,P2,P3,P4,P5,P6,P7,P8); % f = feval(fcn,x0+dx*lambda,P1,P2,P3,P4,P5,P6,P7,P8);

View File

@ -59,8 +59,8 @@ function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,m
% You should have received a copy of the GNU General Public License % You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>. % along with Dynare. If not, see <http://www.gnu.org/licenses/>.
global objective_function_penalty_base % initialize variable penalty
penalty = 1e8;
fh = []; fh = [];
xh = []; xh = [];
[nx,no]=size(x0); [nx,no]=size(x0);
@ -72,6 +72,14 @@ fcount=0;
gh = []; gh = [];
H = []; H = [];
retcodeh = []; retcodeh = [];
% force fcn, grad to function handle
if ischar(fcn)
fcn = str2func(fcn);
end
if ischar(grad)
grad = str2func(grad);
end
%tailstr = ')'; %tailstr = ')';
%stailstr = []; %stailstr = [];
% Lines below make the number of Pi's optional. This is inefficient, though, and precludes % Lines below make the number of Pi's optional. This is inefficient, though, and precludes
@ -83,7 +91,7 @@ retcodeh = [];
% stailstr=[' P' num2str(i) stailstr]; % stailstr=[' P' num2str(i) stailstr];
%end %end
[f0,junk1,junk2,cost_flag] = feval(fcn,x0,varargin{:}); [f0,cost_flag,arg1] = penalty_objective_function(x0,fcn,penalty,varargin{:});
if ~cost_flag if ~cost_flag
disp_verbose('Bad initial parameter.',Verbose) disp_verbose('Bad initial parameter.',Verbose)
@ -91,11 +99,11 @@ if ~cost_flag
end end
if NumGrad if NumGrad
[g, badg]=get_num_grad(method,fcn,f0,x0,epsilon,varargin{:}); [g, badg]=get_num_grad(method,fcn,penalty,f0,x0,epsilon,varargin{:});
elseif ischar(grad) elseif ischar(grad)
[g,badg] = feval(grad,x0,varargin{:}); [g,badg] = grad(x0,varargin{:});
else else
g=junk1; g=arg1;
badg=0; badg=0;
end end
retcode3=101; retcode3=101;
@ -105,7 +113,7 @@ H=H0;
cliff=0; cliff=0;
while ~done while ~done
% penalty for dsge_likelihood and dsge_var_likelihood % penalty for dsge_likelihood and dsge_var_likelihood
objective_function_penalty_base = f; penalty = f;
g1=[]; g2=[]; g3=[]; g1=[]; g2=[]; g3=[];
%addition fj. 7/6/94 for control %addition fj. 7/6/94 for control
@ -115,7 +123,7 @@ while ~done
% disp_verbose([sprintf('x = ') sprintf('%15.8g %15.8g %15.8g %15.8g\n',x)]); % disp_verbose([sprintf('x = ') sprintf('%15.8g %15.8g %15.8g %15.8g\n',x)]);
%------------------------- %-------------------------
itct=itct+1; itct=itct+1;
[f1, x1, fc, retcode1] = csminit1(fcn,x,f,g,badg,H,Verbose,varargin{:}); [f1, x1, fc, retcode1] = csminit1(fcn,x,penalty,f,g,badg,H,Verbose,varargin{:});
fcount = fcount+fc; fcount = fcount+fc;
% erased on 8/4/94 % erased on 8/4/94
% if (retcode == 1) || (abs(f1-f) < crit) % if (retcode == 1) || (abs(f1-f) < crit)
@ -130,17 +138,17 @@ while ~done
wall1=1; badg1=1; wall1=1; badg1=1;
else else
if NumGrad if NumGrad
[g1, badg1]=get_num_grad(method,fcn,f1,x1,epsilon,varargin{:}); [g1, badg1]=get_num_grad(method,fcn,penalty,f1,x1,epsilon,varargin{:});
elseif ischar(grad), elseif ischar(grad),
[g1, badg1] = feval(grad,x1,varargin{:}); [g1, badg1] = grad(x1,varargin{:});
else else
[junk1,g1,junk2, cost_flag] = feval(fcn,x1,varargin{:}); [junk1,cost_flag,g1] = penalty_objective_function(x1,fcn,penalty,varargin{:});
badg1 = ~cost_flag; badg1 = ~cost_flag;
end end
wall1=badg1; wall1=badg1;
% g1 % g1
if Save_files if Save_files
save g1.mat g1 x1 f1 varargin; save('g1.mat','g1','x1','f1','varargin');
end end
end end
if wall1 % && (~done) by Jinill if wall1 % && (~done) by Jinill
@ -150,18 +158,18 @@ while ~done
%fcliff=fh;xcliff=xh; %fcliff=fh;xcliff=xh;
Hcliff=H+diag(diag(H).*rand(nx,1)); Hcliff=H+diag(diag(H).*rand(nx,1));
disp_verbose('Cliff. Perturbing search direction.',Verbose) disp_verbose('Cliff. Perturbing search direction.',Verbose)
[f2, x2, fc, retcode2] = csminit1(fcn,x,f,g,badg,Hcliff,Verbose,varargin{:}); [f2, x2, fc, retcode2] = csminit1(fcn,x,penalty,f,g,badg,Hcliff,Verbose,varargin{:});
fcount = fcount+fc; % put by Jinill fcount = fcount+fc; % put by Jinill
if f2 < f if f2 < f
if retcode2==2 || retcode2==4 if retcode2==2 || retcode2==4
wall2=1; badg2=1; wall2=1; badg2=1;
else else
if NumGrad if NumGrad
[g2, badg2]=get_num_grad(method,fcn,f2,x2,epsilon,varargin{:}); [g2, badg2]=get_num_grad(method,fcn,penalty,f2,x2,epsilon,varargin{:});
elseif ischar(grad), elseif ischar(grad),
[g2, badg2] = feval(grad,x2,varargin{:}); [g2, badg2] = grad(x2,varargin{:});
else else
[junk1,g2,junk2, cost_flag] = feval(fcn,x1,varargin{:}); [junk2,cost_flag,g2] = penalty_objective_function(x1,fcn,penalty,varargin{:});
badg2 = ~cost_flag; badg2 = ~cost_flag;
end end
wall2=badg2; wall2=badg2;
@ -170,7 +178,7 @@ while ~done
badg2 badg2
end end
if Save_files if Save_files
save g2.mat g2 x2 f2 varargin save('g2.mat','g2','x2','f2','varargin');
end end
end end
if wall2 if wall2
@ -182,24 +190,24 @@ while ~done
if(size(x0,2)>1) if(size(x0,2)>1)
gcliff=gcliff'; gcliff=gcliff';
end end
[f3, x3, fc, retcode3] = csminit1(fcn,x,f,gcliff,0,eye(nx),Verbose,varargin{:}); [f3, x3, fc, retcode3] = csminit1(fcn,x,penalty,f,gcliff,0,eye(nx),Verbose,varargin{:});
fcount = fcount+fc; % put by Jinill fcount = fcount+fc; % put by Jinill
if retcode3==2 || retcode3==4 if retcode3==2 || retcode3==4
wall3=1; wall3=1;
badg3=1; badg3=1;
else else
if NumGrad if NumGrad
[g3, badg3]=get_num_grad(method,fcn,f3,x3,epsilon,varargin{:}); [g3, badg3]=get_num_grad(method,fcn,penalty,f3,x3,epsilon,varargin{:});
elseif ischar(grad), elseif ischar(grad),
[g3, badg3] = feval(grad,x3,varargin{:}); [g3, badg3] = grad(x3,varargin{:});
else else
[junk1,g3,junk2, cost_flag] = feval(fcn,x1,varargin{:}); [junk3,cost_flag,g3] = penalty_objective_function(x1,fcn,penalty,varargin{:});
badg3 = ~cost_flag; badg3 = ~cost_flag;
end end
wall3=badg3; wall3=badg3;
% g3 % g3
if Save_files if Save_files
save g3.mat g3 x3 f3 varargin; save('g3.mat','g3','x3','f3','varargin');
end end
end end
end end
@ -249,11 +257,11 @@ while ~done
end end
if nogh if nogh
if NumGrad if NumGrad
[gh, badgh]=get_num_grad(method,fcn,fh,xh,epsilon,varargin{:}); [gh, badgh]=get_num_grad(method,fcn,penalty,fh,xh,epsilon,varargin{:});
elseif ischar(grad), elseif ischar(grad),
[gh, badgh] = feval(grad, xh,varargin{:}); [gh, badgh] = grad(xh,varargin{:});
else else
[junk1,gh,junk2, cost_flag] = feval(fcn,x1,varargin{:}); [junkh,cost_flag,gh] = penalty_objective_function(x1,penalty,varargin{:});
badgh = ~cost_flag; badgh = ~cost_flag;
end end
end end
@ -303,18 +311,18 @@ end
end end
function [g, badg]=get_num_grad(method,fcn,f0,x0,epsilon,varargin) function [g, badg]=get_num_grad(method,fcn,penalty,f0,x0,epsilon,varargin)
switch method switch method
case 2 case 2
[g,badg] = numgrad2(fcn, f0, x0, epsilon, varargin{:}); [g,badg] = numgrad2(fcn, f0, x0, penalty, epsilon, varargin{:});
case 3 case 3
[g,badg] = numgrad3(fcn, f0, x0, epsilon, varargin{:}); [g,badg] = numgrad3(fcn, f0, x0, penalty, epsilon, varargin{:});
case 5 case 5
[g,badg] = numgrad5(fcn, f0, x0, epsilon, varargin{:}); [g,badg] = numgrad5(fcn, f0, x0, penalty, epsilon, varargin{:});
case 13 case 13
[g,badg] = numgrad3_(fcn, f0, x0, epsilon, varargin{:}); [g,badg] = numgrad3_(fcn, f0, x0, penalty, epsilon, varargin{:});
case 15 case 15
[g,badg] = numgrad5_(fcn, f0, x0, epsilon, varargin{:}); [g,badg] = numgrad5_(fcn, f0, x0, penalty, epsilon, varargin{:});
otherwise otherwise
error('csminwel1: Unknown method for gradient evaluation!') error('csminwel1: Unknown method for gradient evaluation!')
end end

View File

@ -1,5 +1,5 @@
function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,htol0,Verbose,Save_files,varargin) function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,penalty,htol0,Verbose,Save_files,varargin)
% function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,varargin) % [f0, x, ig] = mr_gstep(h1,x,bounds,func0,penalty,htol0,Verbose,Save_files,varargin)
% %
% Gibbs type step in optimisation % Gibbs type step in optimisation
% %
@ -11,7 +11,7 @@ function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,htol0,Verbose,Save_files,varar
% varargin{6} --> BayesInfo % varargin{6} --> BayesInfo
% varargin{1} --> DynareResults % varargin{1} --> DynareResults
% Copyright (C) 2006-2014 Dynare Team % Copyright (C) 2006-2016 Dynare Team
% %
% This file is part of Dynare. % This file is part of Dynare.
% %
@ -42,7 +42,7 @@ end
if length(htol)==1, if length(htol)==1,
htol=htol*ones(n,1); htol=htol*ones(n,1);
end end
f0=feval(func0,x,varargin{:}); f0=penalty_objective_function(x,func0,penalty,varargin{:});
xh1=x; xh1=x;
f1=zeros(size(f0,1),n); f1=zeros(size(f0,1),n);
@ -56,10 +56,10 @@ while i<n
hcheck=0; hcheck=0;
dx=[]; dx=[];
xh1(i)=x(i)+h1(i); xh1(i)=x(i)+h1(i);
fx = feval(func0,xh1,varargin{:}); fx = penalty_objective_function(xh1,func0,penalty,varargin{:});
f1(:,i)=fx; f1(:,i)=fx;
xh1(i)=x(i)-h1(i); xh1(i)=x(i)-h1(i);
fx = feval(func0,xh1,varargin{:}); fx = penalty_objective_function(xh1,func0,penalty,varargin{:});
f_1(:,i)=fx; f_1(:,i)=fx;
if hcheck && htol(i)<1 if hcheck && htol(i)<1
htol(i)=min(1,max(min(abs(dx))*2,htol(i)*10)); htol(i)=min(1,max(min(abs(dx))*2,htol(i)*10));
@ -72,7 +72,7 @@ while i<n
gg(i)=(f1(i)'-f_1(i)')./(2.*h1(i)); gg(i)=(f1(i)'-f_1(i)')./(2.*h1(i));
hh(i) = 1/max(1.e-9,abs( (f1(i)+f_1(i)-2*f0)./(h1(i)*h1(i)) )); hh(i) = 1/max(1.e-9,abs( (f1(i)+f_1(i)-2*f0)./(h1(i)*h1(i)) ));
if gg(i)*(hh(i)*gg(i))/2 > htol(i) if gg(i)*(hh(i)*gg(i))/2 > htol(i)
[f0 x fc retcode] = csminit1(func0,x,f0,gg,0,diag(hh),Verbose,varargin{:}); [f0 x fc retcode] = csminit1(func0,x,penalty,f0,gg,0,diag(hh),Verbose,varargin{:});
ig(i)=1; ig(i)=1;
if Verbose if Verbose
fprintf(['Done for param %s = %8.4f\n'],varargin{6}.name{i},x(i)) fprintf(['Done for param %s = %8.4f\n'],varargin{6}.name{i},x(i))
@ -82,11 +82,11 @@ while i<n
end end
x = check_bounds(x,bounds); x = check_bounds(x,bounds);
if Save_files if Save_files
save gstep.mat x h1 f0 save('gstep.mat','x','h1','f0')
end end
end end
if Save_files if Save_files
save gstep.mat x h1 f0 save('gstep.mat','x','h1','f0')
end end
return return

View File

@ -1,5 +1,5 @@
function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,hflag,htol0,varargin) function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,penalty,hflag,htol0,varargin)
% [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,hflag,htol0,varargin) % [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,penalty,hflag,htol0,varargin)
% %
% numerical gradient and Hessian, with 'automatic' check of numerical % numerical gradient and Hessian, with 'automatic' check of numerical
% error % error
@ -33,7 +33,7 @@ function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,hf
% Copyright (C) 2004-2014 Dynare Team % Copyright (C) 2004-2016 Dynare Team
% %
% This file is part of Dynare. % This file is part of Dynare.
% %
@ -60,7 +60,7 @@ if init
return return
end end
[f0, ff0]=feval(func,x,varargin{:}); [f0,exit_flag, ff0]=penalty_objective_function(x,func,penalty,varargin{:});
h2=varargin{7}.ub-varargin{7}.lb; h2=varargin{7}.ub-varargin{7}.lb;
hmax=varargin{7}.ub-x; hmax=varargin{7}.ub-x;
hmax=min(hmax,x-varargin{7}.lb); hmax=min(hmax,x-varargin{7}.lb);
@ -94,7 +94,7 @@ while i<n
hcheck=0; hcheck=0;
xh1(i)=x(i)+h1(i); xh1(i)=x(i)+h1(i);
try try
[fx, ffx]=feval(func,xh1,varargin{:}); [fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
catch catch
fx=1.e8; fx=1.e8;
end end
@ -115,7 +115,7 @@ while i<n
h1(i) = max(h1(i),1.e-10); h1(i) = max(h1(i),1.e-10);
xh1(i)=x(i)+h1(i); xh1(i)=x(i)+h1(i);
try try
[fx, ffx]=feval(func,xh1,varargin{:}); [fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
catch catch
fx=1.e8; fx=1.e8;
end end
@ -124,14 +124,14 @@ while i<n
h1(i)= htol/abs(dx(it))*h1(i); h1(i)= htol/abs(dx(it))*h1(i);
xh1(i)=x(i)+h1(i); xh1(i)=x(i)+h1(i);
try try
[fx, ffx]=feval(func,xh1,varargin{:}); [fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
catch catch
fx=1.e8; fx=1.e8;
end end
while (fx-f0)==0 while (fx-f0)==0
h1(i)= h1(i)*2; h1(i)= h1(i)*2;
xh1(i)=x(i)+h1(i); xh1(i)=x(i)+h1(i);
[fx, ffx]=feval(func,xh1,varargin{:}); [fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
ic=1; ic=1;
end end
end end
@ -152,7 +152,7 @@ while i<n
end end
end end
xh1(i)=x(i)-h1(i); xh1(i)=x(i)-h1(i);
[fx, ffx]=feval(func,xh1,varargin{:}); [fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
f_1(:,i)=fx; f_1(:,i)=fx;
if outer_product_gradient, if outer_product_gradient,
if any(isnan(ffx)) || isempty(ffx), if any(isnan(ffx)) || isempty(ffx),
@ -193,8 +193,8 @@ if outer_product_gradient,
xh1(j)=x(j)+h_1(j); xh1(j)=x(j)+h_1(j);
xh_1(i)=x(i)-h1(i); xh_1(i)=x(i)-h1(i);
xh_1(j)=x(j)-h_1(j); xh_1(j)=x(j)-h_1(j);
temp1 = feval(func,xh1,varargin{:}); temp1 = penalty_objective_function(xh1,func,penalty,varargin{:});
temp2 = feval(func,xh_1,varargin{:}); temp2 = penalty_objective_function(xh_1,func,penalty,varargin{:});
hessian_mat(:,(i-1)*n+j)=-(-temp1 -temp2+temp(:,i)+temp(:,j))./(2*h1(i)*h_1(j)); hessian_mat(:,(i-1)*n+j)=-(-temp1 -temp2+temp(:,i)+temp(:,j))./(2*h1(i)*h_1(j));
xh1(i)=x(i); xh1(i)=x(i);
xh1(j)=x(j); xh1(j)=x(j);

View File

@ -1,5 +1,5 @@
function [xparam1, hh, gg, fval, igg] = newrat(func0, x, bounds, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, varargin) function [xparam1, hh, gg, fval, igg] = newrat(func0, x, bounds, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, varargin)
% [xparam1, hh, gg, fval, igg] = newrat(func0, x, hh, gg, igg, ftol0, nit, flagg, varargin) % [xparam1, hh, gg, fval, igg] = newrat(func0, x, bounds, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, varargin)
% %
% Optimiser with outer product gradient and with sequences of univariate steps % Optimiser with outer product gradient and with sequences of univariate steps
% uses Chris Sims subroutine for line search % uses Chris Sims subroutine for line search
@ -30,7 +30,7 @@ function [xparam1, hh, gg, fval, igg] = newrat(func0, x, bounds, analytic_deriva
% varargin{1} --> DynareResults % varargin{1} --> DynareResults
% Copyright (C) 2004-2014 Dynare Team % Copyright (C) 2004-2016 Dynare Team
% %
% This file is part of Dynare. % This file is part of Dynare.
% %
@ -47,8 +47,8 @@ function [xparam1, hh, gg, fval, igg] = newrat(func0, x, bounds, analytic_deriva
% You should have received a copy of the GNU General Public License % You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>. % along with Dynare. If not, see <http://www.gnu.org/licenses/>.
global objective_function_penalty_base % initialize variable penalty
penalty = 1e8;
icount=0; icount=0;
nx=length(x); nx=length(x);
@ -62,17 +62,22 @@ htol=htol_base;
htol0=htol_base; htol0=htol_base;
gibbstol=length(varargin{6}.pshape)/50; %25; gibbstol=length(varargin{6}.pshape)/50; %25;
% force fcn, grad to function handle
if ischar(func0)
func0 = str2func(func0);
end
% func0 = str2func([func2str(func0),'_hh']); % func0 = str2func([func2str(func0),'_hh']);
% func0 = func0; % func0 = func0;
[fval0,gg,hh]=feval(func0,x,varargin{:}); [fval0,exit_flag,gg,hh]=penalty_objective_function(x,func0,penalty,varargin{:});
fval=fval0; fval=fval0;
% initialize mr_gstep and mr_hessian % initialize mr_gstep and mr_hessian
outer_product_gradient=1; outer_product_gradient=1;
if isempty(hh) if isempty(hh)
mr_hessian(1,x,[],[],[],varargin{:}); mr_hessian(1,x,[],[],[],[],varargin{:});
[dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,x,func0,flagit,htol,varargin{:}); [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,x,func0,penalty,flagit,htol,varargin{:});
if isempty(dum), if isempty(dum),
outer_product_gradient=0; outer_product_gradient=0;
igg = 1e-4*eye(nx); igg = 1e-4*eye(nx);
@ -110,7 +115,7 @@ if max(eig(hh))<0
pause pause
end end
if Save_files if Save_files
save m1.mat x hh g hhg igg fval0 save('m1.mat','x','hh','g','hhg','igg','fval0')
end end
igrad=1; igrad=1;
@ -124,12 +129,12 @@ while norm(gg)>gtol && check==0 && jit<nit
jit=jit+1; jit=jit+1;
tic1 = tic; tic1 = tic;
icount=icount+1; icount=icount+1;
objective_function_penalty_base = fval0(icount); penalty = fval0(icount);
disp_verbose([' '],Verbose) disp_verbose([' '],Verbose)
disp_verbose(['Iteration ',num2str(icount)],Verbose) disp_verbose(['Iteration ',num2str(icount)],Verbose)
[fval,x0,fc,retcode] = csminit1(func0,xparam1,fval0(icount),gg,0,H,Verbose,varargin{:}); [fval,x0,fc,retcode] = csminit1(func0,xparam1,penalty,fval0(icount),gg,0,H,Verbose,varargin{:});
if igrad if igrad
[fval1,x01,fc,retcode1] = csminit1(func0,x0,fval,gg,0,inx,Verbose,varargin{:}); [fval1,x01,fc,retcode1] = csminit1(func0,x0,penalty,fval,gg,0,inx,Verbose,varargin{:});
if (fval-fval1)>1 if (fval-fval1)>1
disp_verbose('Gradient step!!',Verbose) disp_verbose('Gradient step!!',Verbose)
else else
@ -148,10 +153,10 @@ while norm(gg)>gtol && check==0 && jit<nit
end end
iggx=eye(length(gg)); iggx=eye(length(gg));
iggx(find(ig),find(ig)) = inv( hhx(find(ig),find(ig)) ); iggx(find(ig),find(ig)) = inv( hhx(find(ig),find(ig)) );
[fvala,x0,fc,retcode] = csminit1(func0,x0,fval,ggx,0,iggx,Verbose,varargin{:}); [fvala,x0,fc,retcode] = csminit1(func0,x0,penalty,fval,ggx,0,iggx,Verbose,varargin{:});
end end
x0 = check_bounds(x0,bounds); x0 = check_bounds(x0,bounds);
[fvala, x0, ig] = mr_gstep(h1,x0,bounds,func0,htol0,Verbose,Save_files,varargin{:}); [fvala, x0, ig] = mr_gstep(h1,x0,bounds,func0,penalty,htol0,Verbose,Save_files,varargin{:});
x0 = check_bounds(x0,bounds); x0 = check_bounds(x0,bounds);
nig=[nig ig]; nig=[nig ig];
disp_verbose('Sequence of univariate steps!!',Verbose) disp_verbose('Sequence of univariate steps!!',Verbose)
@ -159,7 +164,7 @@ while norm(gg)>gtol && check==0 && jit<nit
if (fval0(icount)-fval)<ftol && flagit==0 if (fval0(icount)-fval)<ftol && flagit==0
disp_verbose('Try diagonal Hessian',Verbose) disp_verbose('Try diagonal Hessian',Verbose)
ihh=diag(1./(diag(hhg))); ihh=diag(1./(diag(hhg)));
[fval2,x0,fc,retcode2] = csminit1(func0,x0,fval,gg,0,ihh,Verbose,varargin{:}); [fval2,x0,fc,retcode2] = csminit1(func0,x0,penalty,fval,gg,0,ihh,Verbose,varargin{:});
x0 = check_bounds(x0,bounds); x0 = check_bounds(x0,bounds);
if (fval-fval2)>=ftol if (fval-fval2)>=ftol
disp_verbose('Diagonal Hessian successful',Verbose) disp_verbose('Diagonal Hessian successful',Verbose)
@ -169,7 +174,7 @@ while norm(gg)>gtol && check==0 && jit<nit
if (fval0(icount)-fval)<ftol && flagit==0 if (fval0(icount)-fval)<ftol && flagit==0
disp_verbose('Try gradient direction',Verbose) disp_verbose('Try gradient direction',Verbose)
ihh0=inx.*1.e-4; ihh0=inx.*1.e-4;
[fval3,x0,fc,retcode3] = csminit1(func0,x0,fval,gg,0,ihh0,Verbose,varargin{:}); [fval3,x0,fc,retcode3] = csminit1(func0,x0,penalty,fval,gg,0,ihh0,Verbose,varargin{:});
x0 = check_bounds(x0,bounds); x0 = check_bounds(x0,bounds);
if (fval-fval3)>=ftol if (fval-fval3)>=ftol
disp_verbose('Gradient direction successful',Verbose) disp_verbose('Gradient direction successful',Verbose)
@ -183,14 +188,14 @@ while norm(gg)>gtol && check==0 && jit<nit
disp_verbose('No further improvement is possible!',Verbose) disp_verbose('No further improvement is possible!',Verbose)
check=1; check=1;
if analytic_derivation, if analytic_derivation,
[fvalx,gg,hh]=feval(func0,xparam1,varargin{:}); [fvalx,exit_flag,gg,hh]=penalty_objective_function(xparam1,func0,penalty,varargin{:});
hhg=hh; hhg=hh;
H = inv(hh); H = inv(hh);
else else
if flagit==2 if flagit==2
hh=hh0; hh=hh0;
elseif flagg>0 elseif flagg>0
[dum, gg, htol0, igg, hhg,h1]=mr_hessian(0,xparam1,func0,flagg,ftol0,varargin{:}); [dum, gg, htol0, igg, hhg,h1]=mr_hessian(0,xparam1,func0,penalty,flagg,ftol0,varargin{:});
if flagg==2 if flagg==2
hh = reshape(dum,nx,nx); hh = reshape(dum,nx,nx);
ee=eig(hh); ee=eig(hh);
@ -223,14 +228,14 @@ while norm(gg)>gtol && check==0 && jit<nit
if norm(x(:,icount)-xparam1)>1.e-12 && analytic_derivation==0, if norm(x(:,icount)-xparam1)>1.e-12 && analytic_derivation==0,
try try
if Save_files if Save_files
save m1.mat x fval0 nig -append save('m1.mat','x','fval0','nig','-append')
end end
catch catch
if Save_files if Save_files
save m1.mat x fval0 nig save('m1.mat','x','fval0','nig')
end end
end end
[dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,xparam1,func0,flagit,htol,varargin{:}); [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,xparam1,func0,penalty,flagit,htol,varargin{:});
if isempty(dum), if isempty(dum),
outer_product_gradient=0; outer_product_gradient=0;
end end
@ -258,7 +263,7 @@ while norm(gg)>gtol && check==0 && jit<nit
H = igg; H = igg;
end end
elseif analytic_derivation, elseif analytic_derivation,
[fvalx,gg,hh]=feval(func0,xparam1,varargin{:}); [fvalx,exit_flag,gg,hh]=penalty_objective_function(xparam1,func0,penalty,varargin{:});
hhg=hh; hhg=hh;
H = inv(hh); H = inv(hh);
end end
@ -274,12 +279,12 @@ while norm(gg)>gtol && check==0 && jit<nit
disp_verbose(['Elapsed time for iteration ',num2str(t),' s.'],Verbose) disp_verbose(['Elapsed time for iteration ',num2str(t),' s.'],Verbose)
g(:,icount+1)=gg; g(:,icount+1)=gg;
if Save_files if Save_files
save m1.mat x hh g hhg igg fval0 nig H save('m1.mat','x','hh','g','hhg','igg','fval0','nig','H')
end end
end end
end end
if Save_files if Save_files
save m1.mat x hh g hhg igg fval0 nig save('m1.mat','x','hh','g','hhg','igg','fval0','nig')
end end
if ftol>ftol0 if ftol>ftol0
skipline() skipline()

View File

@ -1,10 +1,10 @@
function [g, badg] = numgrad2(fcn,f0,x,epsilon,varargin) function [g, badg] = numgrad2(fcn,f0,x,penalty,epsilon,varargin)
% Original file downloaded from: % Original file downloaded from:
% http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m % http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m
% Copyright (C) 1993-2007 Christopher Sims % Copyright (C) 1993-2007 Christopher Sims
% Copyright (C) 2008-2014 Dynare Team % Copyright (C) 2008-2016 Dynare Team
% %
% This file is part of Dynare. % This file is part of Dynare.
% %
@ -33,11 +33,11 @@ for i=1:n
xiold = x(i); xiold = x(i);
if right_derivative if right_derivative
x(i) = x(i)+h; x(i) = x(i)+h;
fh = feval(fcn, x, varargin{:}); fh = penalty_objective_function(x, fcn, penalty, varargin{:});
g0 = (fh-f0)/h; g0 = (fh-f0)/h;
else else
x(i) = x(i)-h; x(i) = x(i)-h;
fh = feval(fcn, x, varargin{:}); fh = penalty_objective_function(x, fcn, penalty, varargin{:});
g0 = (f0-fh)/h; g0 = (f0-fh)/h;
end end
if abs(g0)< 1e15 if abs(g0)< 1e15

View File

@ -1,4 +1,4 @@
function [g, badg] = numgrad3(fcn,f0,x,epsilon,varargin) function [g, badg] = numgrad3(fcn,f0,x,penalty,epsilon,varargin)
% Computes the gradient of the objective function fcn using a three points % Computes the gradient of the objective function fcn using a three points
% formula if possible. % formula if possible.
% %
@ -11,7 +11,7 @@ function [g, badg] = numgrad3(fcn,f0,x,epsilon,varargin)
% http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m % http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m
% Copyright (C) 1993-2007 Christopher Sims % Copyright (C) 1993-2007 Christopher Sims
% Copyright (C) 2008-2014 Dynare Team % Copyright (C) 2008-2016 Dynare Team
% %
% This file is part of Dynare. % This file is part of Dynare.
% %
@ -38,9 +38,9 @@ badg=0;
for i=1:n for i=1:n
xiold = x(i); xiold = x(i);
x(i) = xiold+h; x(i) = xiold+h;
f1 = feval(fcn, x, varargin{:}); f1 = penalty_objective_function(x, fcn, penalty, varargin{:});
x(i) = xiold-h; x(i) = xiold-h;
f2 = feval(fcn, x, varargin{:}); f2 = penalty_objective_function(x, fcn, penalty, varargin{:});
g0 = (f1-f2)/H; g0 = (f1-f2)/H;
if abs(g0)< 1e15 if abs(g0)< 1e15
g(i)=g0; g(i)=g0;

View File

@ -1,4 +1,4 @@
function [g, badg] = numgrad3_(fcn,f0,x,epsilon,varargin) function [g, badg] = numgrad3_(fcn,f0,x,penalty,epsilon,varargin)
% Computes the gradient of the objective function fcn using a three points % Computes the gradient of the objective function fcn using a three points
% formula if possible. % formula if possible.
% %
@ -46,12 +46,12 @@ for i=1:n
xiold = x(i); xiold = x(i);
h = step_length_correction(xiold,scale,i)*delta; h = step_length_correction(xiold,scale,i)*delta;
x(i) = xiold + h; x(i) = xiold + h;
[f1,junk1,junk2,cost_flag1] = feval(fcn, x, varargin{:}); [f1,junk1,cost_flag1] = penalty_objective_function(x, fcn, penalty, varargin{:});
if ~cost_flag1 if ~cost_flag1
fprintf('Gradient w.r.t. parameter number %3d (x=%16.8f,+h=%16.8f,f0=%16.8f,f1=%16.8f,f2=%16.8f,g0=%16.8f): penalty on the right!\n',i,xiold,h,f0,f1,f2,(f1 - f2) / (2*h)) fprintf('Gradient w.r.t. parameter number %3d (x=%16.8f,+h=%16.8f,f0=%16.8f,f1=%16.8f,f2=%16.8f,g0=%16.8f): penalty on the right!\n',i,xiold,h,f0,f1,f2,(f1 - f2) / (2*h))
end end
x(i) = xiold - h; x(i) = xiold - h;
[f2,junk2,junk2,cost_flag2] = feval(fcn, x, varargin{:}); [f2,junk2,cost_flag2] = penalty_objective_function(x, fcn, penalty, varargin{:});
if ~cost_flag2 if ~cost_flag2
fprintf('Gradient w.r.t. parameter number %3d (x=%16.8f,+h=%16.8f,f0=%16.8f,f1=%16.8f,f2=%16.8f,g0=%16.8f): penalty on the left!\n',i,xiold,h,f0,f1,f2,(f1 - f2) / (2*h)) fprintf('Gradient w.r.t. parameter number %3d (x=%16.8f,+h=%16.8f,f0=%16.8f,f1=%16.8f,f2=%16.8f,g0=%16.8f): penalty on the left!\n',i,xiold,h,f0,f1,f2,(f1 - f2) / (2*h))
end end

View File

@ -1,4 +1,4 @@
function [g, badg] = numgrad5(fcn,f0,x,epsilon,varargin) function [g, badg] = numgrad5(fcn,f0,x,penalty,epsilon,varargin)
% Computes the gradient of the objective function fcn using a five points % Computes the gradient of the objective function fcn using a five points
% formula if possible. % formula if possible.
% %
@ -13,7 +13,7 @@ function [g, badg] = numgrad5(fcn,f0,x,epsilon,varargin)
% http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m % http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m
% Copyright (C) 1993-2007 Christopher Sims % Copyright (C) 1993-2007 Christopher Sims
% Copyright (C) 2008-2014 Dynare Team % Copyright (C) 2008-2016 Dynare Team
% This file is part of Dynare. % This file is part of Dynare.
% %
@ -40,13 +40,13 @@ badg = 0;
for i=1:n for i=1:n
xiold = x(i); xiold = x(i);
x(i) = xiold+h; x(i) = xiold+h;
f1 = feval(fcn, x, varargin{:}); f1 = penalty_objective_function(x, fcn, penalty, varargin{:});
x(i) = xiold-h; x(i) = xiold-h;
f2 = feval(fcn, x, varargin{:}); f2 = penalty_objective_function(x, fcn, penalty, varargin{:});
x(i) = xiold+2*h; x(i) = xiold+2*h;
f3 = feval(fcn, x, varargin{:}); f3 = penalty_objective_function(x, fcn, penalty, varargin{:});
x(i) = xiold-2*h; x(i) = xiold-2*h;
f4 = feval(fcn, x, varargin{:}); f4 = penalty_objective_function(x, fcn, penalty, varargin{:});
g0 = (8*(f1-f2)+f4-f3)/H; g0 = (8*(f1-f2)+f4-f3)/H;
if abs(g0)< 1e15 if abs(g0)< 1e15
g(i) = g0; g(i) = g0;

View File

@ -1,4 +1,4 @@
function [g, badg] = numgrad5(fcn,f0,x,epsilon,varargin) function [g, badg] = numgrad5_(fcn,f0,x,penalty,epsilon,varargin)
% Computes the gradient of the objective function fcn using a five points % Computes the gradient of the objective function fcn using a five points
% formula if possible. % formula if possible.
% %
@ -13,7 +13,7 @@ function [g, badg] = numgrad5(fcn,f0,x,epsilon,varargin)
% http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m % http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m
% Copyright (C) 1993-2007 Christopher Sims % Copyright (C) 1993-2007 Christopher Sims
% Copyright (C) 2008-2014 Dynare Team % Copyright (C) 2008-2016 Dynare Team
% %
% This file is part of Dynare. % This file is part of Dynare.
% %
@ -48,13 +48,13 @@ for i=1:n
xiold = x(i); xiold = x(i);
h = step_length_correction(xiold,scale,i)*delta; h = step_length_correction(xiold,scale,i)*delta;
x(i) = xiold+h; x(i) = xiold+h;
[f1,junk1,junk2,cost_flag1] = feval(fcn, x, varargin{:}); [f1,junk1,cost_flag1,] = penalty_objective_function(x, fcn, penalty, varargin{:});
x(i) = xiold-h; x(i) = xiold-h;
[f2,junk1,junk2,cost_flag2] = feval(fcn, x, varargin{:}); [f2,junk1,cost_flag2] = penalty_objective_function(x, fcn, penalty, varargin{:});
x(i) = xiold+2*h; x(i) = xiold+2*h;
[f3,junk1,junk2,cost_flag3] = feval(fcn, x, varargin{:}); [f3,junk1,cost_flag3] = penalty_objective_function(x, fcn, penalty, varargin{:});
x(i) = xiold-2*h; x(i) = xiold-2*h;
[f4,junk1,junk2,cost_flag4] = feval(fcn, x, varargin{:}); [f4,junk1,cost_flag4] = penalty_objective_function(x, fcn, penalty, varargin{:});
if f0<f1 && f1<f3 && f0<f2 && f2<f4 if f0<f1 && f1<f3 && f0<f2 && f2<f4
g0 = 0; g0 = 0;
else else

View File

@ -2,6 +2,6 @@ function [fval,exit_flag,arg1,arg2] = penalty_objective_function(x0,fcn,penalty,
[fval,info,exit_flag,arg1,arg2] = fcn(x0,varargin{:}); [fval,info,exit_flag,arg1,arg2] = fcn(x0,varargin{:});
if info(1) ~= 0 if info(1) ~= 0
fval = penalty + info(2); fval = penalty + info(4);
end end
end end

View File

@ -43,13 +43,6 @@ while look_for_admissible_initial_condition
end end
end end
% Evaluate the prior density at the initial condition.
objective_function_penalty_base = minus_logged_prior_density(xinit, BayesInfo.pshape, ...
BayesInfo.p6, ...
BayesInfo.p7, ...
BayesInfo.p3, ...
BayesInfo.p4,DynareOptions,ModelInfo,EstimationInfo,DynareResults);
% Maximization of the prior density % Maximization of the prior density
[xparams, lpd, hessian_mat] = ... [xparams, lpd, hessian_mat] = ...
maximize_prior_density(xinit, BayesInfo.pshape, ... maximize_prior_density(xinit, BayesInfo.pshape, ...

View File

@ -40,7 +40,7 @@ global M_ oo_ options_ optimal_Q_ it_
junk = []; junk = [];
exit_flag = 1; exit_flag = 1;
vx = []; vx = [];
info=0; info=zeros(4,1);
loss=[]; loss=[];
% set parameters of the policiy rule % set parameters of the policiy rule
M_.params(i_params) = x; M_.params(i_params) = x;
@ -49,42 +49,18 @@ M_.params(i_params) = x;
it_ = M_.maximum_lag+1; it_ = M_.maximum_lag+1;
[dr,info,M_,options_,oo_] = resol(0,M_,options_,oo_); [dr,info,M_,options_,oo_] = resol(0,M_,options_,oo_);
switch info(1) if info(1)
case 1 if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ...
loss = 1e8; info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ...
return info(1) == 81 || info(1) == 84 || info(1) == 85
case 2 loss = 1e8;
loss = 1e8*min(1e3,info(2)); info(4)=info(2);
return return
case 3 else
loss = 1e8*min(1e3,info(2)); loss = 1e8;
return info(4)=0.1;
case 4 return
loss = 1e8*min(1e3,info(2)); end
return
case 5
loss = 1e8;
return
case 6
loss = 1e8*min(1e3,info(2));
return
case 7
loss = 1e8*min(1e3);
return
case 8
loss = 1e8*min(1e3,info(2));
return
case 9
loss = 1e8*min(1e3,info(2));
return
case 20
loss = 1e8*min(1e3,info(2));
return
otherwise
if info(1)~=0
loss = 1e8;
return;
end
end end
vx = get_variance_of_endogenous_variables(dr,i_var); vx = get_variance_of_endogenous_variables(dr,i_var);

View File

@ -28,8 +28,7 @@ function [fOutVar,nBlockPerCPU, totCPU] = masterParallel(Parallel,fBlock,nBlock,
% The number of parallelized threads will be equal to (nBlock-fBlock+1). % The number of parallelized threads will be equal to (nBlock-fBlock+1).
% %
% Treatment of global variables: % Treatment of global variables:
% Global variables used within the called function (e.g. % Global variables used within the called function are wrapped and passed by storing their
% objective_function_penalty_base) are wrapped and passed by storing their
% values at the start of the parallel computation in a file via % values at the start of the parallel computation in a file via
% storeGlobalVars.m. This file is then loaded in the separate, % storeGlobalVars.m. This file is then loaded in the separate,
% independent slave Matlab sessions. By keeping them separate, no % independent slave Matlab sessions. By keeping them separate, no

View File

@ -54,11 +54,6 @@ function posterior_sampler(TargetFun,ProposalFun,xparam1,sampler_options,mh_boun
% You should have received a copy of the GNU General Public License % You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>. % along with Dynare. If not, see <http://www.gnu.org/licenses/>.
% In Metropolis, we set penalty to Inf so as to reject all parameter sets triggering an error during target density computation
global objective_function_penalty_base
objective_function_penalty_base = Inf;
vv = sampler_options.invhess; vv = sampler_options.invhess;
% Initialization of the sampler % Initialization of the sampler
[ ix2, ilogpo2, ModelName, MetropolisFolder, fblck, fline, npar, nblck, nruns, NewFile, MAX_nruns, d ] = ... [ ix2, ilogpo2, ModelName, MetropolisFolder, fblck, fline, npar, nblck, nruns, NewFile, MAX_nruns, d ] = ...

View File

@ -68,7 +68,6 @@ switch posterior_sampling_method
bayestopt_=varargin{6}; bayestopt_=varargin{6};
npar=length(last_draw); npar=length(last_draw);
%% randomize indices for blocking in this iteration %% randomize indices for blocking in this iteration
global objective_function_penalty_base
indices=randperm(npar)'; indices=randperm(npar)';
blocks=[1; (1+cumsum((rand(length(indices)-1,1)>(1-sampler_options.new_block_probability))))]; blocks=[1; (1+cumsum((rand(length(indices)-1,1)>(1-sampler_options.new_block_probability))))];
nblocks=blocks(end,1); %get number of blocks this iteration nblocks=blocks(end,1); %get number of blocks this iteration
@ -82,7 +81,6 @@ switch posterior_sampling_method
[xopt_current_block, fval, exitflag, hess_mat_optimizer, options_, Scale] = dynare_minimize_objective(@TaRB_optimizer_wrapper,par_start_current_block,sampler_options.mode_compute,options_,[mh_bounds.lb(indices(blocks==block_iter,1),1) mh_bounds.ub(indices(blocks==block_iter,1),1)],bayestopt_.name,bayestopt_,[],... [xopt_current_block, fval, exitflag, hess_mat_optimizer, options_, Scale] = dynare_minimize_objective(@TaRB_optimizer_wrapper,par_start_current_block,sampler_options.mode_compute,options_,[mh_bounds.lb(indices(blocks==block_iter,1),1) mh_bounds.ub(indices(blocks==block_iter,1),1)],bayestopt_.name,bayestopt_,[],...
current_draw,indices(blocks==block_iter,1),TargetFun,...% inputs for wrapper current_draw,indices(blocks==block_iter,1),TargetFun,...% inputs for wrapper
varargin{:}); %inputs for objective varargin{:}); %inputs for objective
objective_function_penalty_base=Inf; %reset penalty that may have been changed by optimizer
%% covariance for proposal density %% covariance for proposal density
hessian_mat = reshape(hessian('TaRB_optimizer_wrapper',xopt_current_block, ... hessian_mat = reshape(hessian('TaRB_optimizer_wrapper',xopt_current_block, ...
options_.gstep,... options_.gstep,...