diff --git a/matlab/optimization/dynare_minimize_objective.m b/matlab/optimization/dynare_minimize_objective.m index d8ffbb4b1..a7e6a83f5 100644 --- a/matlab/optimization/dynare_minimize_objective.m +++ b/matlab/optimization/dynare_minimize_objective.m @@ -51,7 +51,7 @@ if isempty(bounds) end if isempty(parameter_names) - parameter_names=[repmat('parameter ',n_params,1),num2str((1:n_params)')]; + parameter_names=cellstr([repmat('parameter ',n_params,1),num2str((1:n_params)')]); end %% initialize function outputs @@ -268,7 +268,7 @@ switch minimizer_algorithm hess_info.gstep=options_.gstep; hess_info.htol = 1.e-4; hess_info.h1=options_.gradient_epsilon*ones(n_params,1); - [opt_par_values,hessian_mat,gg,fval,invhess,new_rat_hess_info] = newrat(objective_function,start_par_value,bounds,analytic_grad,crit,nit,0,Verbose, Save_files,hess_info,prior_information.p2,varargin{:}); %hessian_mat is the plain outer product gradient Hessian + [opt_par_values,hessian_mat,gg,fval,invhess,new_rat_hess_info] = newrat(objective_function,start_par_value,bounds,analytic_grad,crit,nit,0,Verbose,Save_files,hess_info,prior_information.p2,options_.gradient_epsilon,parameter_names,varargin{:}); %hessian_mat is the plain outer product gradient Hessian case 6 if isempty(prior_information) %Inf will be reset prior_information.p2=Inf(n_params,1); diff --git a/matlab/optimization/mr_gstep.m b/matlab/optimization/mr_gstep.m index 0836a7a87..048601484 100644 --- a/matlab/optimization/mr_gstep.m +++ b/matlab/optimization/mr_gstep.m @@ -1,5 +1,5 @@ -function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,penalty,htol0,Verbose,Save_files,varargin) -% [f0, x, ig] = mr_gstep(h1,x,bounds,func0,penalty,htol0,Verbose,Save_files,varargin) +function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,penalty,htol0,Verbose,Save_files,gradient_epsilon,parameter_names,varargin) +% [f0, x, ig] = mr_gstep(h1,x,bounds,func0,penalty,htol0,Verbose,Save_files,gradient_epsilon,parameter_names,varargin) % % Gibbs type step in optimisation % @@ -11,7 +11,7 @@ function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,penalty,htol0,Verbose,Save_fil % varargin{6} --> BayesInfo % varargin{1} --> DynareResults -% Copyright (C) 2006-2017 Dynare Team +% Copyright (C) 2006-2020 Dynare Team % % This file is part of Dynare. % @@ -30,7 +30,7 @@ function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,penalty,htol0,Verbose,Save_fil n=size(x,1); if isempty(h1) - h1=varargin{3}.gradient_epsilon*ones(n,1); + h1=gradient_epsilon*ones(n,1); end @@ -72,10 +72,10 @@ while i htol(i) - [f0, x, fc, retcode] = csminit1(func0,x,penalty,f0,gg,0,diag(hh),Verbose,varargin{:}); + [f0, x, ~, ~] = csminit1(func0,x,penalty,f0,gg,0,diag(hh),Verbose,varargin{:}); ig(i)=1; if Verbose - fprintf(['Done for param %s = %8.4f\n'],varargin{6}.name{i},x(i)) + fprintf(['Done for param %s = %8.4f\n'],parameter_names{i},x(i)) end end xh1=x; diff --git a/matlab/optimization/newrat.m b/matlab/optimization/newrat.m index e1ec4c1ed..e94d56554 100644 --- a/matlab/optimization/newrat.m +++ b/matlab/optimization/newrat.m @@ -1,5 +1,5 @@ -function [xparam1, hh, gg, fval, igg, hess_info] = newrat(func0, x, bounds, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, hess_info, prior_std, varargin) -% [xparam1, hh, gg, fval, igg, hess_info] = newrat(func0, x, bounds, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, hess_info, varargin) +function [xparam1, hh, gg, fval, igg, hess_info] = newrat(func0, x, bounds, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, hess_info, prior_std, gradient_epsilon, parameter_names, varargin) +% [xparam1, hh, gg, fval, igg, hess_info] = newrat(func0, x, bounds, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, hess_info, gradient_epsilon, parameter_names, varargin) % % Optimiser with outer product gradient and with sequences of univariate steps % uses Chris Sims subroutine for line search @@ -24,6 +24,8 @@ function [xparam1, hh, gg, fval, igg, hess_info] = newrat(func0, x, bounds, anal % computation of Hessian % - prior_std prior standard devation of parameters (can be NaN); % passed to mr_hessian +% - gradient_epsilon [double] step size in gradient +% - parameter_names [cell] names of parameters for error messages % - varargin other inputs % e.g. in dsge_likelihood and others: % varargin{1} --> DynareDataset @@ -167,7 +169,7 @@ while norm(gg)>gtol && check==0 && jit