diff --git a/matlab/cli/prior.m b/matlab/cli/prior.m index 9a4ead849..f25c4a9fa 100644 --- a/matlab/cli/prior.m +++ b/matlab/cli/prior.m @@ -40,7 +40,7 @@ if isempty(varargin) || ( isequal(length(varargin), 1) && isequal(varargin{1},'h return end -global options_ M_ estim_params_ bayestopt_ oo_ objective_function_penalty_base +global options_ M_ estim_params_ bayestopt_ oo_ donesomething = false; diff --git a/matlab/dsge_likelihood.m b/matlab/dsge_likelihood.m index d5dfaefdf..5dac52681 100644 --- a/matlab/dsge_likelihood.m +++ b/matlab/dsge_likelihood.m @@ -55,9 +55,9 @@ function [fval,info,exit_flag,DLIK,Hess,SteadyState,trend_coeff,Model,DynareOpti %! @item info==19 %! The steadystate routine thrown an exception (inconsistent deep parameters). %! @item info==20 -%! Cannot find the steady state, info(2) contains the sum of square residuals (of the static equations). +%! Cannot find the steady state, info(4) contains the sum of square residuals (of the static equations). %! @item info==21 -%! The steady state is complex, info(2) contains the sum of square of imaginary parts of the steady state. +%! The steady state is complex, info(4) contains the sum of square of imaginary parts of the steady state. %! @item info==22 %! The steady has NaNs. %! @item info==23 @@ -134,14 +134,12 @@ function [fval,info,exit_flag,DLIK,Hess,SteadyState,trend_coeff,Model,DynareOpti % AUTHOR(S) stephane DOT adjemian AT univ DASH lemans DOT FR -global objective_function_penalty_base - % Initialization of the returned variables and others... fval = []; SteadyState = []; trend_coeff = []; exit_flag = 1; -info = 0; +info = zeros(4,1); DLIK = []; Hess = []; @@ -181,9 +179,10 @@ end % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain. if ~isequal(DynareOptions.mode_compute,1) && any(xparam1BoundsInfo.ub) k = find(xparam1>BoundsInfo.ub); - fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2); + fval = Inf; exit_flag = 0; - info = 42; + info(1) = 42; + info(4)= sum((xparam1(k)-BoundsInfo.ub(k)).^2); if analytic_derivation, DLIK=ones(length(xparam1),1); end @@ -212,18 +212,20 @@ H = Model.H; if ~issquare(Q) || EstimatedParameters.ncx || isfield(EstimatedParameters,'calibrated_covariances') [Q_is_positive_definite, penalty] = ispd(Q(EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness,EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness)); if ~Q_is_positive_definite - fval = objective_function_penalty_base+penalty; + fval = Inf; exit_flag = 0; - info = 43; + info(1) = 43; + info(4) = penalty; return end if isfield(EstimatedParameters,'calibrated_covariances') correct_flag=check_consistency_covariances(Q); if ~correct_flag penalty = sum(Q(EstimatedParameters.calibrated_covariances.position).^2); - fval = objective_function_penalty_base+penalty; + fval = Inf; exit_flag = 0; - info = 71; + info(1) = 71; + info(4) = penalty; return end end @@ -233,18 +235,20 @@ end if ~issquare(H) || EstimatedParameters.ncn || isfield(EstimatedParameters,'calibrated_covariances_ME') [H_is_positive_definite, penalty] = ispd(H(EstimatedParameters.H_entries_to_check_for_positive_definiteness,EstimatedParameters.H_entries_to_check_for_positive_definiteness)); if ~H_is_positive_definite - fval = objective_function_penalty_base+penalty; + fval = Inf; + info(1) = 44; + info(4) = penalty; exit_flag = 0; - info = 44; return end if isfield(EstimatedParameters,'calibrated_covariances_ME') correct_flag=check_consistency_covariances(H); if ~correct_flag penalty = sum(H(EstimatedParameters.calibrated_covariances_ME.position).^2); - fval = objective_function_penalty_base+penalty; + fval = Inf; exit_flag = 0; - info = 72; + info(1) = 72; + info(4) = penalty; return end end @@ -259,30 +263,36 @@ end [T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict'); % Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol). -if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) == 8 || info(1) == 9 || ... - info(1) == 22 || info(1) == 24 || info(1) == 19 || info(1) == 25 || info(1) == 10 - fval = objective_function_penalty_base+1; - info = info(1); - exit_flag = 0; - if analytic_derivation, - DLIK=ones(length(xparam1),1); +if info(1) + if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ... + info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ... + info(1) == 81 || info(1) == 84 || info(1) == 85 + %meaningful second entry of output that can be used + fval = Inf; + info(4) = info(2); + exit_flag = 0; + if analytic_derivation, + DLIK=ones(length(xparam1),1); + end + + return + else + fval = Inf; + info(4) = 0.1; + exit_flag = 0; + if analytic_derivation, + DLIK=ones(length(xparam1),1); + end + + return end - return -elseif info(1) == 3 || info(1) == 4 || info(1)==6 || info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1)==26 - fval = objective_function_penalty_base+info(2); - info = info(1); - exit_flag = 0; - if analytic_derivation, - DLIK=ones(length(xparam1),1); - end - return end % check endogenous prior restrictions info=endogenous_prior_restrictions(T,R,Model,DynareOptions,DynareResults); if info(1), - fval = objective_function_penalty_base+info(2); - info = info(1); + fval = Inf; + info(4)=info(2); exit_flag = 0; if analytic_derivation, DLIK=ones(length(xparam1),1); @@ -458,8 +468,9 @@ switch DynareOptions.lik_init diffuse_periods = size(dlik,1); end if isnan(dLIK), - info = 45; - fval = objective_function_penalty_base + 100; + fval = Inf; + info(1) = 45; + info(4) = 0.1; exit_flag = 0; return end @@ -698,8 +709,9 @@ if ((kalman_algo==1) || (kalman_algo==3))% Multivariate Kalman Filter end else if isinf(LIK) - info = 66; - fval = objective_function_penalty_base+1; + fval = Inf; + info(1) = 66; + info(4) = 0.1; exit_flag = 0; return end @@ -707,7 +719,7 @@ if ((kalman_algo==1) || (kalman_algo==3))% Multivariate Kalman Filter else if DynareOptions.lik_init==3 LIK = LIK + dLIK; - if analytic_derivation==0 && nargout==2, + if analytic_derivation==0 && nargout>3, if ~singular_diffuse_filter, lik = [dlik; lik]; else @@ -771,7 +783,7 @@ if (kalman_algo==2) || (kalman_algo==4) end if DynareOptions.lik_init==3 LIK = LIK+dLIK; - if analytic_derivation==0 && nargout==2, + if analytic_derivation==0 && nargout>3, lik = [dlik; lik]; end end @@ -797,15 +809,17 @@ if analytic_derivation end if isnan(LIK) - info = 45; - fval = objective_function_penalty_base + 100; + fval = Inf; + info(1) = 45; + info(4) = 0.1; exit_flag = 0; return end if imag(LIK)~=0 - info = 46; - fval = objective_function_penalty_base + 100; + fval = Inf; + info(1) = 46; + info(4) = 0.1; exit_flag = 0; return end @@ -851,15 +865,17 @@ if DynareOptions.prior_restrictions.status end if isnan(fval) - info = 47; - fval = objective_function_penalty_base + 100; + fval = Inf; + info(1) = 47; + info(4) = 0.1; exit_flag = 0; return end if imag(fval)~=0 - info = 48; - fval = objective_function_penalty_base + 100; + fval = Inf; + info(1) = 48; + info(4) = 0.1; exit_flag = 0; return end @@ -867,7 +883,7 @@ end % Update DynareOptions.kalman_algo. DynareOptions.kalman_algo = kalman_algo; -if analytic_derivation==0 && nargout==2, +if analytic_derivation==0 && nargout>3, lik=lik(start:end,:); DLIK=[-lnprior; lik(:)]; end \ No newline at end of file diff --git a/matlab/dsge_var_likelihood.m b/matlab/dsge_var_likelihood.m index 1317f7e6c..2dc670e57 100644 --- a/matlab/dsge_var_likelihood.m +++ b/matlab/dsge_var_likelihood.m @@ -42,13 +42,12 @@ function [fval,info,exit_flag,grad,hess,SteadyState,trend_coeff,PHI,SIGMAu,iXX,p % You should have received a copy of the GNU General Public License % along with Dynare. If not, see . -global objective_function_penalty_base persistent dsge_prior_weight_idx grad=[]; hess=[]; exit_flag = []; -info = []; +info = zeros(4,1); PHI = []; SIGMAu = []; iXX = []; @@ -92,18 +91,20 @@ exit_flag = 1; % Return, with endogenous penalty, if some dsge-parameters are smaller than the lower bound of the prior domain. if DynareOptions.mode_compute ~= 1 && any(xparam1 < BoundsInfo.lb) k = find(xparam1 < BoundsInfo.lb); - fval = objective_function_penalty_base+sum((BoundsInfo.lb(k)-xparam1(k)).^2); + fval = Inf; exit_flag = 0; - info = 41; + info(1) = 41; + info(4)= sum((BoundsInfo.lb(k)-xparam1(k)).^2); return; end % Return, with endogenous penalty, if some dsge-parameters are greater than the upper bound of the prior domain. if DynareOptions.mode_compute ~= 1 && any(xparam1 > BoundsInfo.ub) k = find(xparam1 > BoundsInfo.ub); - fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2); + fval = Inf; exit_flag = 0; - info = 42; + info(1) = 42; + info(4) = sum((xparam1(k)-BoundsInfo.ub(k)).^2); return; end @@ -124,11 +125,12 @@ dsge_prior_weight = Model.params(dsge_prior_weight_idx); % Is the dsge prior proper? if dsge_prior_weight<(NumberOfParameters+NumberOfObservedVariables)/NumberOfObservations; - fval = objective_function_penalty_base+abs(NumberOfObservations*dsge_prior_weight-(NumberOfParameters+NumberOfObservedVariables)); + fval = Inf; exit_flag = 0; - info = 51; + info(1) = 51; info(2)=dsge_prior_weight; info(3)=(NumberOfParameters+NumberOfObservedVariables)/DynareDataset.nobs; + info(4)=abs(NumberOfObservations*dsge_prior_weight-(NumberOfParameters+NumberOfObservedVariables)); return end @@ -141,17 +143,21 @@ end [T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict'); % Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol). -if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) == 8 || ... - info(1) == 22 || info(1) == 24 || info(1) == 25 || info(1) == 10 - fval = objective_function_penalty_base+1; - info = info(1); - exit_flag = 0; - return -elseif info(1) == 3 || info(1) == 4 || info(1) == 6 || info(1) == 19 || info(1) == 20 || info(1) == 21 - fval = objective_function_penalty_base+info(2); - info = info(1); - exit_flag = 0; - return +if info(1) + if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ... + info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ... + info(1) == 81 || info(1) == 84 || info(1) == 85 + %meaningful second entry of output that can be used + fval = Inf; + info(4) = info(2); + exit_flag = 0; + return + else + fval = Inf; + info(4) = 0.1; + exit_flag = 0; + return + end end % Define the mean/steady state vector. @@ -218,8 +224,9 @@ if ~isinf(dsge_prior_weight)% Evaluation of the likelihood of the dsge-var model SIGMAu = tmp0 - tmp1*tmp2*tmp1'; clear('tmp0'); [SIGMAu_is_positive_definite, penalty] = ispd(SIGMAu); if ~SIGMAu_is_positive_definite - fval = objective_function_penalty_base + penalty; - info = 52; + fval = Inf; + info(1) = 52; + info(4) = penalty; exit_flag = 0; return; end @@ -249,15 +256,17 @@ else% Evaluation of the likelihood of the dsge-var model when the dsge prior wei end if isnan(lik) - info = 45; - fval = objective_function_penalty_base + 100; + fval = Inf; + info(1) = 45; + info(4) = 0.1; exit_flag = 0; return end if imag(lik)~=0 - info = 46; - fval = objective_function_penalty_base + 100; + fval = Inf; + info(1) = 46; + info(4) = 0.1; exit_flag = 0; return end @@ -267,15 +276,17 @@ lnprior = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo fval = (lik-lnprior); if isnan(fval) - info = 47; - fval = objective_function_penalty_base + 100; + fval = Inf; + info(1) = 47; + info(4) = 0.1; exit_flag = 0; return end if imag(fval)~=0 - info = 48; - fval = objective_function_penalty_base + 100; + fval = Inf; + info(1) = 48; + info(4) = 0.1; exit_flag = 0; return end diff --git a/matlab/dynare_estimation_init.m b/matlab/dynare_estimation_init.m index 9de2de7e6..6c2e36038 100644 --- a/matlab/dynare_estimation_init.m +++ b/matlab/dynare_estimation_init.m @@ -49,8 +49,6 @@ function [dataset_, dataset_info, xparam1, hh, M_, options_, oo_, estim_params_, % You should have received a copy of the GNU General Public License % along with Dynare. If not, see . -global objective_function_penalty_base - hh = []; if isempty(gsa_flag) @@ -384,9 +382,6 @@ else% Yes! end end -% Set the "size" of penalty. -objective_function_penalty_base = 1e8; - % Get informations about the variables of the model. dr = set_state_space(oo_.dr,M_,options_); oo_.dr = dr; diff --git a/matlab/minus_logged_prior_density.m b/matlab/minus_logged_prior_density.m index 0b965b5f9..38662541e 100644 --- a/matlab/minus_logged_prior_density.m +++ b/matlab/minus_logged_prior_density.m @@ -1,4 +1,4 @@ -function [fval,fake_1, fake_2, exit_flag ] = minus_logged_prior_density(xparams,pshape,p6,p7,p3,p4,DynareOptions,DynareModel,EstimatedParams,DynareResults) +function [fval,info,exit_flag,fake_1,fake_2] = minus_logged_prior_density(xparams,pshape,p6,p7,p3,p4,DynareOptions,DynareModel,EstimatedParams,DynareResults) % Evaluates minus the logged prior density. % % INPUTS @@ -11,8 +11,9 @@ function [fval,fake_1, fake_2, exit_flag ] = minus_logged_prior_density(xparams, % % OUTPUTS % f [double] value of minus the logged prior density. - -% Copyright (C) 2009-2012 Dynare Team +% info [double] vector: second entry stores penalty, first entry the error code. +% +% Copyright (C) 2009-2016 Dynare Team % % This file is part of Dynare. % @@ -29,8 +30,6 @@ function [fval,fake_1, fake_2, exit_flag ] = minus_logged_prior_density(xparams, % You should have received a copy of the GNU General Public License % along with Dynare. If not, see . -global objective_function_penalty_base - fake_1 = 1; fake_2 = 1; @@ -44,18 +43,20 @@ info = 0; % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain. if ~isequal(DynareOptions.mode_compute,1) && any(xparamsp4) k = find(xparams>p4); - fval = objective_function_penalty_base+sum((xparams(k)-p4(k)).^2); + fval = Inf; exit_flag = 0; - info = 42; + info(1) = 42; + info(2) = sum((xparams(k)-p4(k)).^2); return end @@ -71,18 +72,20 @@ if ~issquare(Q) || EstimatedParams.ncx || isfield(EstimatedParams,'calibrated_co [Q_is_positive_definite, penalty] = ispd(Q); if ~Q_is_positive_definite % The variance-covariance matrix of the structural innovations is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty. - fval = objective_function_penalty_base+penalty; + fval = Inf; exit_flag = 0; - info = 43; + info(1) = 43; + info(2) = penalty; return end if isfield(EstimatedParams,'calibrated_covariances') correct_flag=check_consistency_covariances(Q); if ~correct_flag penalty = sum(Q(EstimatedParams.calibrated_covariances.position).^2); - fval = objective_function_penalty_base+penalty; + fval = Inf; exit_flag = 0; - info = 71; + info(1) = 71; + info(2) = penalty; return end end @@ -94,18 +97,20 @@ if ~issquare(H) || EstimatedParams.ncn || isfield(EstimatedParams,'calibrated_co [H_is_positive_definite, penalty] = ispd(H); if ~H_is_positive_definite % The variance-covariance matrix of the measurement errors is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty. - fval = objective_function_penalty_base+penalty; + fval = Inf; exit_flag = 0; - info = 44; + info(1) = 44; + info(2) = penalty; return end if isfield(EstimatedParams,'calibrated_covariances_ME') correct_flag=check_consistency_covariances(H); if ~correct_flag penalty = sum(H(EstimatedParams.calibrated_covariances_ME.position).^2); - fval = objective_function_penalty_base+penalty; + fval = Inf; exit_flag = 0; - info = 72; + info(1) = 72; + info(2) = penalty; return end end @@ -120,18 +125,24 @@ M_ = set_all_parameters(xparams,EstimatedParams,DynareModel); [dr,info,DynareModel,DynareOptions,DynareResults] = resol(0,DynareModel,DynareOptions,DynareResults); % Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol). -if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) ... - == 8 || info(1) == 22 || info(1) == 24 || info(1) == 19 - fval = objective_function_penalty_base+1; - info = info(1); - exit_flag = 0; - return -elseif info(1) == 3 || info(1) == 4 || info(1)==6 || info(1) == 20 || info(1) == 21 || info(1) == 23 - fval = objective_function_penalty_base+info(2); - info = info(1); - exit_flag = 0; - return +% Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol). +if info(1) + if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ... + info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ... + info(1) == 81 || info(1) == 84 || info(1) == 85 + %meaningful second entry of output that can be used + fval = Inf; + info(4) = info(2); + exit_flag = 0; + return + else + fval = Inf; + info(4) = 0.1; + exit_flag = 0; + return + end end + fval = - priordens(xparams,pshape,p6,p7,p3,p4); \ No newline at end of file diff --git a/matlab/non_linear_dsge_likelihood.m b/matlab/non_linear_dsge_likelihood.m index 35685ed2a..034f708ab 100644 --- a/matlab/non_linear_dsge_likelihood.m +++ b/matlab/non_linear_dsge_likelihood.m @@ -32,7 +32,7 @@ function [fval,info,exit_flag,DLIK,Hess,ys,trend_coeff,Model,DynareOptions,Bayes %! @item fval %! Double scalar, value of (minus) the likelihood. %! @item info -%! Double vector, second entry stores penalty, first entry the error code. +%! Double vector, fourth entry stores penalty, first entry the error code. %! @table @ @code %! @item info==0 %! No error. @@ -127,7 +127,6 @@ function [fval,info,exit_flag,DLIK,Hess,ys,trend_coeff,Model,DynareOptions,Bayes % AUTHOR(S) stephane DOT adjemian AT univ DASH lemans DOT fr % frederic DOT karame AT univ DASH lemans DOT fr -global objective_function_penalty_base % Declaration of the penalty as a persistent variable. persistent init_flag persistent restrict_variables_idx observed_variables_idx state_variables_idx mf0 mf1 @@ -153,18 +152,20 @@ end % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain. if (DynareOptions.mode_compute~=1) && any(xparam1BoundsInfo.ub) k = find(xparam1(:)>BoundsInfo.ub); - fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2); + fval = Inf; exit_flag = 0; - info = 42; + info(1) = 42; + info(4) = sum((xparam1(k)-BoundsInfo.ub(k)).^2); return end @@ -176,18 +177,20 @@ H = Model.H; if ~issquare(Q) || EstimatedParameters.ncx || isfield(EstimatedParameters,'calibrated_covariances') [Q_is_positive_definite, penalty] = ispd(Q(EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness,EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness)); if ~Q_is_positive_definite - fval = objective_function_penalty_base+penalty; + fval = Inf; exit_flag = 0; - info = 43; + info(1) = 43; + info(4) = penalty; return end if isfield(EstimatedParameters,'calibrated_covariances') correct_flag=check_consistency_covariances(Q); if ~correct_flag penalty = sum(Q(EstimatedParameters.calibrated_covariances.position).^2); - fval = objective_function_penalty_base+penalty; + fval = Inf; exit_flag = 0; - info = 71; + info(1) = 71; + info(4) = penalty; return end end @@ -197,18 +200,20 @@ end if ~issquare(H) || EstimatedParameters.ncn || isfield(EstimatedParameters,'calibrated_covariances_ME') [H_is_positive_definite, penalty] = ispd(H(EstimatedParameters.H_entries_to_check_for_positive_definiteness,EstimatedParameters.H_entries_to_check_for_positive_definiteness)); if ~H_is_positive_definite - fval = objective_function_penalty_base+penalty; + fval = Inf; exit_flag = 0; - info = 44; + info(1) = 44; + info(4) = penalty; return end if isfield(EstimatedParameters,'calibrated_covariances_ME') correct_flag=check_consistency_covariances(H); if ~correct_flag penalty = sum(H(EstimatedParameters.calibrated_covariances_ME.position).^2); - fval = objective_function_penalty_base+penalty; + fval = Inf; exit_flag = 0; - info = 72; + info(1) = 72; + info(4) = penalty; return end end @@ -222,14 +227,21 @@ end % Linearize the model around the deterministic sdteadystate and extract the matrices of the state equation (T and R). [T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict'); -if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 25 || info(1) == 10 || info(1) == 7 || info(1) == 9 - fval = objective_function_penalty_base+1; - exit_flag = 0; - return -elseif info(1) == 3 || info(1) == 4 || info(1)==6 ||info(1) == 19 || info(1) == 20 || info(1) == 21 - fval = objective_function_penalty_base+info(2); - exit_flag = 0; - return +if info(1) + if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ... + info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ... + info(1) == 81 || info(1) == 84 || info(1) == 85 + %meaningful second entry of output that can be used + fval = Inf; + info(4) = info(2); + exit_flag = 0; + return + else + fval = Inf; + info(4) = 0.1; + exit_flag = 0; + return + end end % Define a vector of indices for the observed variables. Is this really usefull?... @@ -321,13 +333,15 @@ DynareOptions.warning_for_steadystate = 0; LIK = feval(DynareOptions.particle.algorithm,ReducedForm,Y,start,DynareOptions.particle,DynareOptions.threads); set_dynare_random_generator_state(s1,s2); if imag(LIK) - info = 46; - likelihood = objective_function_penalty_base; - exit_flag = 0; + likelihood = Inf; + info(1) = 46; + info(4) = 0.1; + exit_flag = 0; elseif isnan(LIK) - info = 45; - likelihood = objective_function_penalty_base; - exit_flag = 0; + likelihood = Inf; + info(1) = 45; + info(4) = 0.1; + exit_flag = 0; else likelihood = LIK; end @@ -339,15 +353,17 @@ lnprior = priordens(xparam1(:),BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesI fval = (likelihood-lnprior); if isnan(fval) - info = 47; - fval = objective_function_penalty_base + 100; + fval = Inf; + info(1) = 47; + info(4) = 0.1; exit_flag = 0; return end if imag(fval)~=0 - info = 48; - fval = objective_function_penalty_base + 100; + fval = Inf; + info(1) = 48; + info(4) = 0.1; exit_flag = 0; return end diff --git a/matlab/optimization/csminit1.m b/matlab/optimization/csminit1.m index 428c704c6..1a0795195 100644 --- a/matlab/optimization/csminit1.m +++ b/matlab/optimization/csminit1.m @@ -1,24 +1,28 @@ -function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,Verbose,varargin) -% [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,varargin) +function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,penalty,f0,g0,badg,H0,Verbose,varargin) +% [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,penalty,f0,g0,badg,H0,Verbose,varargin) % % Inputs: -% fcn: [string] string naming the objective function to be minimized -% x0: [npar by 1] initial value of the parameter vector -% g0: [npar by 1] initial value of the gradient vector -% H0: [npar by npar] initial value for the inverse Hessian. Must be positive definite. -% varargin: Optional additional inputs that get handed off to fcn each -% time it is called. +% fcn: [string] string naming the objective function to be minimized +% x0: [npar by 1] initial value of the parameter vector +% penalty: [scalar] variable penalty in case of failure of objective function +% f0: [scalar] initial value of the fucntion +% g0: [npar by 1] initial value of the gradient vector +% badg [scalar] indicator for problem in gradient computation +% H0: [npar by npar] initial value for the inverse Hessian. Must be positive definite. +% Verbose: [scalar] indicator for verbose computations +% varargin: Optional additional inputs that get handed off to fcn each +% time it is called. % Outputs: -% fhat: [scalar] function value at minimum -% xhat: [npar by 1] parameter vector at minimum -% fcount [scalar] function iteration count upon termination -% retcode [scalar] 0: normal step -% 1: zero gradient. -% 5: largest step still improves too fast. -% 2,4: back and forth adjustment of stepsize didn't finish. -% 3: smallest stepsize still improves too slow -% 6: no improvement found +% fhat: [scalar] function value at minimum +% xhat: [npar by 1] parameter vector at minimum +% fcount [scalar] function iteration count upon termination +% retcode [scalar] 0: normal step +% 1: zero gradient. +% 5: largest step still improves too fast. +% 2,4: back and forth adjustment of stepsize didn't finish. +% 3: smallest stepsize still improves too slow +% 6: no improvement found %--------------------- % Modified 7/22/96 to omit variable-length P list, for efficiency and compilation. % Places where the number of P's need to be altered or the code could be returned to @@ -34,7 +38,7 @@ function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,Verbose,vara % http://sims.princeton.edu/yftp/optimize/mfiles/csminit.m % % Copyright (C) 1993-2007 Christopher Sims -% Copyright (C) 2008-2015 Dynare Team +% Copyright (C) 2008-2016 Dynare Team % % This file is part of Dynare. % @@ -137,7 +141,7 @@ else dxtest=x0+dx*lambda; end % home - f = feval(fcn,dxtest,varargin{:}); + f = penalty_objective_function(dxtest,fcn,penalty,varargin{:}); %ARGLIST %f = feval(fcn,dxtest,P1,P2,P3,P4,P5,P6,P7,P8,P9,P10,P11,P12,P13); % f = feval(fcn,x0+dx*lambda,P1,P2,P3,P4,P5,P6,P7,P8); diff --git a/matlab/optimization/csminwel1.m b/matlab/optimization/csminwel1.m index 5c641ba9e..4d197c028 100644 --- a/matlab/optimization/csminwel1.m +++ b/matlab/optimization/csminwel1.m @@ -59,8 +59,8 @@ function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,m % You should have received a copy of the GNU General Public License % along with Dynare. If not, see . -global objective_function_penalty_base - +% initialize variable penalty +penalty = 1e8; fh = []; xh = []; [nx,no]=size(x0); @@ -72,6 +72,14 @@ fcount=0; gh = []; H = []; retcodeh = []; + +% force fcn, grad to function handle +if ischar(fcn) + fcn = str2func(fcn); +end +if ischar(grad) + grad = str2func(grad); +end %tailstr = ')'; %stailstr = []; % Lines below make the number of Pi's optional. This is inefficient, though, and precludes @@ -83,7 +91,7 @@ retcodeh = []; % stailstr=[' P' num2str(i) stailstr]; %end -[f0,junk1,junk2,cost_flag] = feval(fcn,x0,varargin{:}); +[f0,cost_flag,arg1] = penalty_objective_function(x0,fcn,penalty,varargin{:}); if ~cost_flag disp_verbose('Bad initial parameter.',Verbose) @@ -91,11 +99,11 @@ if ~cost_flag end if NumGrad - [g, badg]=get_num_grad(method,fcn,f0,x0,epsilon,varargin{:}); + [g, badg]=get_num_grad(method,fcn,penalty,f0,x0,epsilon,varargin{:}); elseif ischar(grad) - [g,badg] = feval(grad,x0,varargin{:}); + [g,badg] = grad(x0,varargin{:}); else - g=junk1; + g=arg1; badg=0; end retcode3=101; @@ -105,7 +113,7 @@ H=H0; cliff=0; while ~done % penalty for dsge_likelihood and dsge_var_likelihood - objective_function_penalty_base = f; + penalty = f; g1=[]; g2=[]; g3=[]; %addition fj. 7/6/94 for control @@ -115,7 +123,7 @@ while ~done % disp_verbose([sprintf('x = ') sprintf('%15.8g %15.8g %15.8g %15.8g\n',x)]); %------------------------- itct=itct+1; - [f1, x1, fc, retcode1] = csminit1(fcn,x,f,g,badg,H,Verbose,varargin{:}); + [f1, x1, fc, retcode1] = csminit1(fcn,x,penalty,f,g,badg,H,Verbose,varargin{:}); fcount = fcount+fc; % erased on 8/4/94 % if (retcode == 1) || (abs(f1-f) < crit) @@ -130,17 +138,17 @@ while ~done wall1=1; badg1=1; else if NumGrad - [g1, badg1]=get_num_grad(method,fcn,f1,x1,epsilon,varargin{:}); + [g1, badg1]=get_num_grad(method,fcn,penalty,f1,x1,epsilon,varargin{:}); elseif ischar(grad), - [g1, badg1] = feval(grad,x1,varargin{:}); + [g1, badg1] = grad(x1,varargin{:}); else - [junk1,g1,junk2, cost_flag] = feval(fcn,x1,varargin{:}); + [junk1,cost_flag,g1] = penalty_objective_function(x1,fcn,penalty,varargin{:}); badg1 = ~cost_flag; end wall1=badg1; % g1 if Save_files - save g1.mat g1 x1 f1 varargin; + save('g1.mat','g1','x1','f1','varargin'); end end if wall1 % && (~done) by Jinill @@ -150,18 +158,18 @@ while ~done %fcliff=fh;xcliff=xh; Hcliff=H+diag(diag(H).*rand(nx,1)); disp_verbose('Cliff. Perturbing search direction.',Verbose) - [f2, x2, fc, retcode2] = csminit1(fcn,x,f,g,badg,Hcliff,Verbose,varargin{:}); + [f2, x2, fc, retcode2] = csminit1(fcn,x,penalty,f,g,badg,Hcliff,Verbose,varargin{:}); fcount = fcount+fc; % put by Jinill if f2 < f if retcode2==2 || retcode2==4 wall2=1; badg2=1; else if NumGrad - [g2, badg2]=get_num_grad(method,fcn,f2,x2,epsilon,varargin{:}); + [g2, badg2]=get_num_grad(method,fcn,penalty,f2,x2,epsilon,varargin{:}); elseif ischar(grad), - [g2, badg2] = feval(grad,x2,varargin{:}); + [g2, badg2] = grad(x2,varargin{:}); else - [junk1,g2,junk2, cost_flag] = feval(fcn,x1,varargin{:}); + [junk2,cost_flag,g2] = penalty_objective_function(x1,fcn,penalty,varargin{:}); badg2 = ~cost_flag; end wall2=badg2; @@ -170,7 +178,7 @@ while ~done badg2 end if Save_files - save g2.mat g2 x2 f2 varargin + save('g2.mat','g2','x2','f2','varargin'); end end if wall2 @@ -182,24 +190,24 @@ while ~done if(size(x0,2)>1) gcliff=gcliff'; end - [f3, x3, fc, retcode3] = csminit1(fcn,x,f,gcliff,0,eye(nx),Verbose,varargin{:}); + [f3, x3, fc, retcode3] = csminit1(fcn,x,penalty,f,gcliff,0,eye(nx),Verbose,varargin{:}); fcount = fcount+fc; % put by Jinill if retcode3==2 || retcode3==4 wall3=1; badg3=1; else if NumGrad - [g3, badg3]=get_num_grad(method,fcn,f3,x3,epsilon,varargin{:}); + [g3, badg3]=get_num_grad(method,fcn,penalty,f3,x3,epsilon,varargin{:}); elseif ischar(grad), - [g3, badg3] = feval(grad,x3,varargin{:}); + [g3, badg3] = grad(x3,varargin{:}); else - [junk1,g3,junk2, cost_flag] = feval(fcn,x1,varargin{:}); + [junk3,cost_flag,g3] = penalty_objective_function(x1,fcn,penalty,varargin{:}); badg3 = ~cost_flag; end wall3=badg3; % g3 if Save_files - save g3.mat g3 x3 f3 varargin; + save('g3.mat','g3','x3','f3','varargin'); end end end @@ -249,11 +257,11 @@ while ~done end if nogh if NumGrad - [gh, badgh]=get_num_grad(method,fcn,fh,xh,epsilon,varargin{:}); + [gh, badgh]=get_num_grad(method,fcn,penalty,fh,xh,epsilon,varargin{:}); elseif ischar(grad), - [gh, badgh] = feval(grad, xh,varargin{:}); + [gh, badgh] = grad(xh,varargin{:}); else - [junk1,gh,junk2, cost_flag] = feval(fcn,x1,varargin{:}); + [junkh,cost_flag,gh] = penalty_objective_function(x1,penalty,varargin{:}); badgh = ~cost_flag; end end @@ -303,18 +311,18 @@ end end -function [g, badg]=get_num_grad(method,fcn,f0,x0,epsilon,varargin) +function [g, badg]=get_num_grad(method,fcn,penalty,f0,x0,epsilon,varargin) switch method case 2 - [g,badg] = numgrad2(fcn, f0, x0, epsilon, varargin{:}); + [g,badg] = numgrad2(fcn, f0, x0, penalty, epsilon, varargin{:}); case 3 - [g,badg] = numgrad3(fcn, f0, x0, epsilon, varargin{:}); + [g,badg] = numgrad3(fcn, f0, x0, penalty, epsilon, varargin{:}); case 5 - [g,badg] = numgrad5(fcn, f0, x0, epsilon, varargin{:}); + [g,badg] = numgrad5(fcn, f0, x0, penalty, epsilon, varargin{:}); case 13 - [g,badg] = numgrad3_(fcn, f0, x0, epsilon, varargin{:}); + [g,badg] = numgrad3_(fcn, f0, x0, penalty, epsilon, varargin{:}); case 15 - [g,badg] = numgrad5_(fcn, f0, x0, epsilon, varargin{:}); + [g,badg] = numgrad5_(fcn, f0, x0, penalty, epsilon, varargin{:}); otherwise error('csminwel1: Unknown method for gradient evaluation!') end diff --git a/matlab/optimization/mr_gstep.m b/matlab/optimization/mr_gstep.m index a2335e1b8..605508d2c 100644 --- a/matlab/optimization/mr_gstep.m +++ b/matlab/optimization/mr_gstep.m @@ -1,5 +1,5 @@ -function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,htol0,Verbose,Save_files,varargin) -% function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,varargin) +function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,penalty,htol0,Verbose,Save_files,varargin) +% [f0, x, ig] = mr_gstep(h1,x,bounds,func0,penalty,htol0,Verbose,Save_files,varargin) % % Gibbs type step in optimisation % @@ -11,7 +11,7 @@ function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,htol0,Verbose,Save_files,varar % varargin{6} --> BayesInfo % varargin{1} --> DynareResults -% Copyright (C) 2006-2014 Dynare Team +% Copyright (C) 2006-2016 Dynare Team % % This file is part of Dynare. % @@ -42,7 +42,7 @@ end if length(htol)==1, htol=htol*ones(n,1); end -f0=feval(func0,x,varargin{:}); +f0=penalty_objective_function(x,func0,penalty,varargin{:}); xh1=x; f1=zeros(size(f0,1),n); @@ -56,10 +56,10 @@ while i htol(i) - [f0 x fc retcode] = csminit1(func0,x,f0,gg,0,diag(hh),Verbose,varargin{:}); + [f0 x fc retcode] = csminit1(func0,x,penalty,f0,gg,0,diag(hh),Verbose,varargin{:}); ig(i)=1; if Verbose fprintf(['Done for param %s = %8.4f\n'],varargin{6}.name{i},x(i)) @@ -82,11 +82,11 @@ while i DynareResults -% Copyright (C) 2004-2014 Dynare Team +% Copyright (C) 2004-2016 Dynare Team % % This file is part of Dynare. % @@ -47,8 +47,8 @@ function [xparam1, hh, gg, fval, igg] = newrat(func0, x, bounds, analytic_deriva % You should have received a copy of the GNU General Public License % along with Dynare. If not, see . -global objective_function_penalty_base - +% initialize variable penalty +penalty = 1e8; icount=0; nx=length(x); @@ -62,17 +62,22 @@ htol=htol_base; htol0=htol_base; gibbstol=length(varargin{6}.pshape)/50; %25; +% force fcn, grad to function handle +if ischar(func0) + func0 = str2func(func0); +end + % func0 = str2func([func2str(func0),'_hh']); % func0 = func0; -[fval0,gg,hh]=feval(func0,x,varargin{:}); +[fval0,exit_flag,gg,hh]=penalty_objective_function(x,func0,penalty,varargin{:}); fval=fval0; % initialize mr_gstep and mr_hessian outer_product_gradient=1; if isempty(hh) - mr_hessian(1,x,[],[],[],varargin{:}); - [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,x,func0,flagit,htol,varargin{:}); + mr_hessian(1,x,[],[],[],[],varargin{:}); + [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,x,func0,penalty,flagit,htol,varargin{:}); if isempty(dum), outer_product_gradient=0; igg = 1e-4*eye(nx); @@ -110,7 +115,7 @@ if max(eig(hh))<0 pause end if Save_files - save m1.mat x hh g hhg igg fval0 + save('m1.mat','x','hh','g','hhg','igg','fval0') end igrad=1; @@ -124,12 +129,12 @@ while norm(gg)>gtol && check==0 && jit1 disp_verbose('Gradient step!!',Verbose) else @@ -148,10 +153,10 @@ while norm(gg)>gtol && check==0 && jitgtol && check==0 && jit=ftol disp_verbose('Diagonal Hessian successful',Verbose) @@ -169,7 +174,7 @@ while norm(gg)>gtol && check==0 && jit=ftol disp_verbose('Gradient direction successful',Verbose) @@ -183,14 +188,14 @@ while norm(gg)>gtol && check==0 && jit0 - [dum, gg, htol0, igg, hhg,h1]=mr_hessian(0,xparam1,func0,flagg,ftol0,varargin{:}); + [dum, gg, htol0, igg, hhg,h1]=mr_hessian(0,xparam1,func0,penalty,flagg,ftol0,varargin{:}); if flagg==2 hh = reshape(dum,nx,nx); ee=eig(hh); @@ -223,14 +228,14 @@ while norm(gg)>gtol && check==0 && jit1.e-12 && analytic_derivation==0, try if Save_files - save m1.mat x fval0 nig -append + save('m1.mat','x','fval0','nig','-append') end catch if Save_files - save m1.mat x fval0 nig + save('m1.mat','x','fval0','nig') end end - [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,xparam1,func0,flagit,htol,varargin{:}); + [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,xparam1,func0,penalty,flagit,htol,varargin{:}); if isempty(dum), outer_product_gradient=0; end @@ -258,7 +263,7 @@ while norm(gg)>gtol && check==0 && jitgtol && check==0 && jitftol0 skipline() diff --git a/matlab/optimization/numgrad2.m b/matlab/optimization/numgrad2.m index f8057bb94..a0f2552aa 100644 --- a/matlab/optimization/numgrad2.m +++ b/matlab/optimization/numgrad2.m @@ -1,10 +1,10 @@ -function [g, badg] = numgrad2(fcn,f0,x,epsilon,varargin) +function [g, badg] = numgrad2(fcn,f0,x,penalty,epsilon,varargin) % Original file downloaded from: % http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m % Copyright (C) 1993-2007 Christopher Sims -% Copyright (C) 2008-2014 Dynare Team +% Copyright (C) 2008-2016 Dynare Team % % This file is part of Dynare. % @@ -33,11 +33,11 @@ for i=1:n xiold = x(i); if right_derivative x(i) = x(i)+h; - fh = feval(fcn, x, varargin{:}); + fh = penalty_objective_function(x, fcn, penalty, varargin{:}); g0 = (fh-f0)/h; else x(i) = x(i)-h; - fh = feval(fcn, x, varargin{:}); + fh = penalty_objective_function(x, fcn, penalty, varargin{:}); g0 = (f0-fh)/h; end if abs(g0)< 1e15 diff --git a/matlab/optimization/numgrad3.m b/matlab/optimization/numgrad3.m index b11c1b039..2ff94bc07 100644 --- a/matlab/optimization/numgrad3.m +++ b/matlab/optimization/numgrad3.m @@ -1,4 +1,4 @@ -function [g, badg] = numgrad3(fcn,f0,x,epsilon,varargin) +function [g, badg] = numgrad3(fcn,f0,x,penalty,epsilon,varargin) % Computes the gradient of the objective function fcn using a three points % formula if possible. % @@ -11,7 +11,7 @@ function [g, badg] = numgrad3(fcn,f0,x,epsilon,varargin) % http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m % Copyright (C) 1993-2007 Christopher Sims -% Copyright (C) 2008-2014 Dynare Team +% Copyright (C) 2008-2016 Dynare Team % % This file is part of Dynare. % @@ -38,9 +38,9 @@ badg=0; for i=1:n xiold = x(i); x(i) = xiold+h; - f1 = feval(fcn, x, varargin{:}); + f1 = penalty_objective_function(x, fcn, penalty, varargin{:}); x(i) = xiold-h; - f2 = feval(fcn, x, varargin{:}); + f2 = penalty_objective_function(x, fcn, penalty, varargin{:}); g0 = (f1-f2)/H; if abs(g0)< 1e15 g(i)=g0; diff --git a/matlab/optimization/numgrad3_.m b/matlab/optimization/numgrad3_.m index 61e8710c3..635813833 100644 --- a/matlab/optimization/numgrad3_.m +++ b/matlab/optimization/numgrad3_.m @@ -1,4 +1,4 @@ -function [g, badg] = numgrad3_(fcn,f0,x,epsilon,varargin) +function [g, badg] = numgrad3_(fcn,f0,x,penalty,epsilon,varargin) % Computes the gradient of the objective function fcn using a three points % formula if possible. % @@ -46,12 +46,12 @@ for i=1:n xiold = x(i); h = step_length_correction(xiold,scale,i)*delta; x(i) = xiold + h; - [f1,junk1,junk2,cost_flag1] = feval(fcn, x, varargin{:}); + [f1,junk1,cost_flag1] = penalty_objective_function(x, fcn, penalty, varargin{:}); if ~cost_flag1 fprintf('Gradient w.r.t. parameter number %3d (x=%16.8f,+h=%16.8f,f0=%16.8f,f1=%16.8f,f2=%16.8f,g0=%16.8f): penalty on the right!\n',i,xiold,h,f0,f1,f2,(f1 - f2) / (2*h)) end x(i) = xiold - h; - [f2,junk2,junk2,cost_flag2] = feval(fcn, x, varargin{:}); + [f2,junk2,cost_flag2] = penalty_objective_function(x, fcn, penalty, varargin{:}); if ~cost_flag2 fprintf('Gradient w.r.t. parameter number %3d (x=%16.8f,+h=%16.8f,f0=%16.8f,f1=%16.8f,f2=%16.8f,g0=%16.8f): penalty on the left!\n',i,xiold,h,f0,f1,f2,(f1 - f2) / (2*h)) end diff --git a/matlab/optimization/numgrad5.m b/matlab/optimization/numgrad5.m index c31936790..f8b8d33e0 100644 --- a/matlab/optimization/numgrad5.m +++ b/matlab/optimization/numgrad5.m @@ -1,4 +1,4 @@ -function [g, badg] = numgrad5(fcn,f0,x,epsilon,varargin) +function [g, badg] = numgrad5(fcn,f0,x,penalty,epsilon,varargin) % Computes the gradient of the objective function fcn using a five points % formula if possible. % @@ -13,7 +13,7 @@ function [g, badg] = numgrad5(fcn,f0,x,epsilon,varargin) % http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m % Copyright (C) 1993-2007 Christopher Sims -% Copyright (C) 2008-2014 Dynare Team +% Copyright (C) 2008-2016 Dynare Team % This file is part of Dynare. % @@ -40,13 +40,13 @@ badg = 0; for i=1:n xiold = x(i); x(i) = xiold+h; - f1 = feval(fcn, x, varargin{:}); + f1 = penalty_objective_function(x, fcn, penalty, varargin{:}); x(i) = xiold-h; - f2 = feval(fcn, x, varargin{:}); + f2 = penalty_objective_function(x, fcn, penalty, varargin{:}); x(i) = xiold+2*h; - f3 = feval(fcn, x, varargin{:}); + f3 = penalty_objective_function(x, fcn, penalty, varargin{:}); x(i) = xiold-2*h; - f4 = feval(fcn, x, varargin{:}); + f4 = penalty_objective_function(x, fcn, penalty, varargin{:}); g0 = (8*(f1-f2)+f4-f3)/H; if abs(g0)< 1e15 g(i) = g0; diff --git a/matlab/optimization/numgrad5_.m b/matlab/optimization/numgrad5_.m index d8395092e..bbce6fd0e 100644 --- a/matlab/optimization/numgrad5_.m +++ b/matlab/optimization/numgrad5_.m @@ -1,4 +1,4 @@ -function [g, badg] = numgrad5(fcn,f0,x,epsilon,varargin) +function [g, badg] = numgrad5_(fcn,f0,x,penalty,epsilon,varargin) % Computes the gradient of the objective function fcn using a five points % formula if possible. % @@ -13,7 +13,7 @@ function [g, badg] = numgrad5(fcn,f0,x,epsilon,varargin) % http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m % Copyright (C) 1993-2007 Christopher Sims -% Copyright (C) 2008-2014 Dynare Team +% Copyright (C) 2008-2016 Dynare Team % % This file is part of Dynare. % @@ -48,13 +48,13 @@ for i=1:n xiold = x(i); h = step_length_correction(xiold,scale,i)*delta; x(i) = xiold+h; - [f1,junk1,junk2,cost_flag1] = feval(fcn, x, varargin{:}); + [f1,junk1,cost_flag1,] = penalty_objective_function(x, fcn, penalty, varargin{:}); x(i) = xiold-h; - [f2,junk1,junk2,cost_flag2] = feval(fcn, x, varargin{:}); + [f2,junk1,cost_flag2] = penalty_objective_function(x, fcn, penalty, varargin{:}); x(i) = xiold+2*h; - [f3,junk1,junk2,cost_flag3] = feval(fcn, x, varargin{:}); + [f3,junk1,cost_flag3] = penalty_objective_function(x, fcn, penalty, varargin{:}); x(i) = xiold-2*h; - [f4,junk1,junk2,cost_flag4] = feval(fcn, x, varargin{:}); + [f4,junk1,cost_flag4] = penalty_objective_function(x, fcn, penalty, varargin{:}); if f0. - -% In Metropolis, we set penalty to Inf so as to reject all parameter sets triggering an error during target density computation -global objective_function_penalty_base -objective_function_penalty_base = Inf; - vv = sampler_options.invhess; % Initialization of the sampler [ ix2, ilogpo2, ModelName, MetropolisFolder, fblck, fline, npar, nblck, nruns, NewFile, MAX_nruns, d ] = ... diff --git a/matlab/posterior_sampler_iteration.m b/matlab/posterior_sampler_iteration.m index 5b09680ee..09e5d8851 100644 --- a/matlab/posterior_sampler_iteration.m +++ b/matlab/posterior_sampler_iteration.m @@ -68,7 +68,6 @@ switch posterior_sampling_method bayestopt_=varargin{6}; npar=length(last_draw); %% randomize indices for blocking in this iteration - global objective_function_penalty_base indices=randperm(npar)'; blocks=[1; (1+cumsum((rand(length(indices)-1,1)>(1-sampler_options.new_block_probability))))]; nblocks=blocks(end,1); %get number of blocks this iteration @@ -82,7 +81,6 @@ switch posterior_sampling_method [xopt_current_block, fval, exitflag, hess_mat_optimizer, options_, Scale] = dynare_minimize_objective(@TaRB_optimizer_wrapper,par_start_current_block,sampler_options.mode_compute,options_,[mh_bounds.lb(indices(blocks==block_iter,1),1) mh_bounds.ub(indices(blocks==block_iter,1),1)],bayestopt_.name,bayestopt_,[],... current_draw,indices(blocks==block_iter,1),TargetFun,...% inputs for wrapper varargin{:}); %inputs for objective - objective_function_penalty_base=Inf; %reset penalty that may have been changed by optimizer %% covariance for proposal density hessian_mat = reshape(hessian('TaRB_optimizer_wrapper',xopt_current_block, ... options_.gstep,...