Merge branch 'csminwel' of git.dynare.org:JohannesPfeifer/dynare

Ref. !2172
kalman-mex
Sébastien Villemot 2023-09-13 11:31:56 +02:00
commit 43af789eda
No known key found for this signature in database
GPG Key ID: 2CECE9350ECEBE4A
6 changed files with 23 additions and 67 deletions

View File

@ -74,7 +74,7 @@ if strcmp(options_mom_.mom.mom_method,'GMM') || strcmp(options_mom_.mom.mom_meth
df = nan(size(oo_.mom.data_moments,1),length(xparam));
end
else
df = nan(1,length(xparam));
df = nan(length(xparam),1);
end
end
end
@ -284,9 +284,9 @@ if strcmp(options_mom_.mom.mom_method,'GMM') || strcmp(options_mom_.mom.mom_meth
df(:,jp) = dresiduals;
end
else
df(:,jp) = dresiduals'*residuals + residuals'*dresiduals;
df(jp,1) = dresiduals'*residuals + residuals'*dresiduals;
if options_mom_.mom.penalized_estimator
df(:,jp)=df(:,jp)+(dxparam1(:,jp))'/oo_.mom.prior.variance*(xparam-oo_.mom.prior.mean)+(xparam-oo_.mom.prior.mean)'/oo_.mom.prior.variance*(dxparam1(:,jp));
df(jp,1)=df(jp,1)+(dxparam1(:,jp))'/oo_.mom.prior.variance*(xparam-oo_.mom.prior.mean)+(xparam-oo_.mom.prior.mean)'/oo_.mom.prior.variance*(dxparam1(:,jp));
end
end
end

View File

@ -38,7 +38,7 @@ function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,penalty,f0,g0,badg,H0,Verb
% http://sims.princeton.edu/yftp/optimize/mfiles/csminit.m
%
% Copyright © 1993-2007 Christopher Sims
% Copyright © 2008-2017 Dynare Team
% Copyright © 2008-2023 Dynare Team
%
% This file is part of Dynare.
%
@ -55,24 +55,14 @@ function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,penalty,f0,g0,badg,H0,Verb
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <https://www.gnu.org/licenses/>.
%tailstr = ')';
%for i=nargin-6:-1:1
% tailstr=[ ',P' num2str(i) tailstr];
%end
%ANGLE = .03;
ANGLE = .005;
%THETA = .03;
THETA = .3; %(0<THETA<.5) THETA near .5 makes long line searches, possibly fewer iterations.
FCHANGE = 1000;
MINLAMB = 1e-9;
% fixed 7/15/94
% MINDX = .0001;
% MINDX = 1e-6;
MINDFAC = .01;
fcount=0;
lambda=1;
xhat=x0;
f=f0;
fhat=f0;
g = g0;
gnorm = norm(g);
@ -84,34 +74,13 @@ if (gnorm < 1.e-12) && ~badg % put ~badg 8/4/94
else
% with badg true, we don't try to match rate of improvement to directional
% derivative. We're satisfied just to get some improvement in f.
%
%if(badg)
% dx = -g*FCHANGE/(gnorm*gnorm);
% dxnorm = norm(dx);
% if dxnorm > 1e12
% disp('Bad, small gradient problem.')
% dx = dx*FCHANGE/dxnorm;
% end
%else
% Gauss-Newton step;
%---------- Start of 7/19/93 mod ---------------
%[v d] = eig(H0);
%toc
%d=max(1e-10,abs(diag(d)));
%d=abs(diag(d));
%dx = -(v.*(ones(size(v,1),1)*d'))*(v'*g);
% toc
dx = -H0*g;
% toc
dxnorm = norm(dx);
if dxnorm > 1e12
disp_verbose('Near-singular H problem.',Verbose)
dx = dx*FCHANGE/dxnorm;
end
dfhat = dx'*g0;
%end
%
%
if ~badg
% test for alignment of dx with gradient and fix if necessary
a = -dfhat/(gnorm*dxnorm);
@ -123,7 +92,6 @@ else
end
end
disp_verbose(sprintf('Predicted improvement: %18.9f',-dfhat/2),Verbose)
%
% Have OK dx, now adjust length of step (lambda) until min and
% max improvement rate criteria are met.
done=0;
@ -132,7 +100,6 @@ else
lambdaMax=inf;
lambdaPeak=0;
fPeak=f0;
lambdahat=0;
while ~done
if size(x0,2)>1
dxtest=x0+dx'*lambda;
@ -141,16 +108,10 @@ else
end
% home
f = penalty_objective_function(dxtest,fcn,penalty,varargin{:});
%ARGLIST
%f = feval(fcn,dxtest,P1,P2,P3,P4,P5,P6,P7,P8,P9,P10,P11,P12,P13);
% f = feval(fcn,x0+dx*lambda,P1,P2,P3,P4,P5,P6,P7,P8);
disp_verbose(sprintf('lambda = %10.5g; f = %20.7f',lambda,f ),Verbose)
%debug
%disp(sprintf('Improvement too great? f0-f: %g, criterion: %g',f0-f,-(1-THETA)*dfhat*lambda))
if f<fhat
fhat=f;
xhat=dxtest;
lambdahat = lambda;
end
fcount=fcount+1;
shrinkSignal = (~badg & (f0-f < max([-THETA*dfhat*lambda 0]))) | (badg & (f0-f) < 0) ;
@ -162,7 +123,6 @@ else
while lambda/factor <= lambdaPeak
factor=factor^.6;
end
%if (abs(lambda)*(factor-1)*dxnorm < MINDX) || (abs(lambda)*(factor-1) < MINLAMB)
if abs(factor-1)<MINDFAC
if abs(lambda)<4
retcode=2;
@ -197,7 +157,6 @@ else
if shrink
shrink=0;
factor = factor^.6;
%if ( abs(lambda)*(factor-1)*dxnorm< MINDX ) || ( abs(lambda)*(factor-1)< MINLAMB)
if abs(factor-1)<MINDFAC
if abs(lambda)<4
retcode=4;

View File

@ -4,9 +4,9 @@ function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,m
% fcn: [string] string naming the objective function to be minimized
% x0: [npar by 1] initial value of the parameter vector
% H0: [npar by npar] initial value for the inverse Hessian. Must be positive definite.
% grad: [string or empty matrix] Either a string naming a function that calculates the gradient, or the null matrix.
% If it's null, the program calculates a numerical gradient. In this case fcn must
% be written so that it can take a matrix argument and produce a row vector of values.
% grad: [string or boolean] Either a string naming a function that calculates the gradient, or a boolean
% indicating whether the function returns a gradient (column) vector. If false, the program
% calculates a numerical gradient.
% crit: [scalar] Convergence criterion. Iteration will cease when it proves impossible to improve the
% function value by more than crit.
% nit: [scalar] Maximum number of iterations.
@ -42,7 +42,7 @@ function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,m
% http://sims.princeton.edu/yftp/optimize/mfiles/csminwel.m
%
% Copyright © 1993-2007 Christopher Sims
% Copyright © 2006-2021 Dynare Team
% Copyright © 2006-2023 Dynare Team
%
% This file is part of Dynare.
%

View File

@ -263,7 +263,7 @@ switch minimizer_algorithm
Verbose = 0;
end
% Set flag for analytical gradient.
if options_.analytic_derivation
if options_.analytic_derivation || (isfield(options_,'mom') && options_.mom.analytic_jacobian==1)
analytic_grad=1;
else
analytic_grad=[];
@ -276,7 +276,7 @@ switch minimizer_algorithm
if isempty(prior_information) %mr_hessian requires it, but can be NaN
prior_information.p2=NaN(n_params,1);
end
if options_.analytic_derivation
if options_.analytic_derivation
old_analytic_derivation = options_.analytic_derivation;
options_.analytic_derivation=-1; %force analytic outer product gradient hessian for each iteration
analytic_grad=1;

View File

@ -67,7 +67,7 @@ function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1, hess_info] = mr_hessian(x,f
n=size(x,1);
[f0,exit_flag, ff0]=penalty_objective_function(x,func,penalty,varargin{:});
[f0,~, ff0]=penalty_objective_function(x,func,penalty,varargin{:});
h2=bounds(:,2)-bounds(:,1);
hmax=bounds(:,2)-x;
hmax=min(hmax,x-bounds(:,1));
@ -100,7 +100,7 @@ while i<n
h10=hess_info.h1(i);
hcheck=0;
xh1(i)=x(i)+hess_info.h1(i);
[fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
[fx,~,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
it=1;
dx=(fx-f0);
ic=0;
@ -117,18 +117,18 @@ while i<n
hess_info.h1(i) = min(hess_info.h1(i),0.5*hmax(i));
hess_info.h1(i) = max(hess_info.h1(i),1.e-10);
xh1(i)=x(i)+hess_info.h1(i);
[fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
[fx,~,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
end
if abs(dx(it))>(3*hess_info.htol)
hess_info.h1(i)= hess_info.htol/abs(dx(it))*hess_info.h1(i);
hess_info.h1(i) = max(hess_info.h1(i),1e-10);
xh1(i)=x(i)+hess_info.h1(i);
[fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
[fx,~,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
iter=0;
while (fx-f0)==0 && iter<50
hess_info.h1(i)= hess_info.h1(i)*2;
xh1(i)=x(i)+hess_info.h1(i);
[fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
[fx,~,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
ic=1;
iter=iter+1;
end
@ -150,7 +150,7 @@ while i<n
end
end
xh1(i)=x(i)-hess_info.h1(i);
[fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
[fx,~,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
f_1(:,i)=fx;
if outer_product_gradient
if any(isnan(ffx)) || isempty(ffx)

View File

@ -81,8 +81,6 @@ if ischar(func0)
func0 = str2func(func0);
end
% func0 = str2func([func2str(func0),'_hh']);
% func0 = func0;
[fval0,exit_flag,gg,hh]=penalty_objective_function(x,func0,penalty,varargin{:});
if ~exit_flag
disp_verbose('Bad initial parameter.',Verbose)
@ -137,7 +135,6 @@ if Save_files
end
igrad=1;
igibbs=1;
inx=eye(nx);
jit=0;
if Save_files
@ -152,9 +149,9 @@ while norm(gg)>gtol && check==0 && jit<nit
penalty = fval0(icount);
disp_verbose([' '],Verbose)
disp_verbose(['Iteration ',num2str(icount)],Verbose)
[fval,x0,fc,retcode] = csminit1(func0,xparam1,penalty,fval0(icount),gg,0,H,Verbose,varargin{:});
[fval,x0] = csminit1(func0,xparam1,penalty,fval0(icount),gg,0,H,Verbose,varargin{:});
if igrad
[fval1,x01,fc,retcode1] = csminit1(func0,x0,penalty,fval,gg,0,inx,Verbose,varargin{:});
[fval1,x01] = csminit1(func0,x0,penalty,fval,gg,0,inx,Verbose,varargin{:});
if (fval-fval1)>1
disp_verbose('Gradient step!!',Verbose)
else
@ -174,7 +171,7 @@ while norm(gg)>gtol && check==0 && jit<nit
end
iggx=eye(length(gg));
iggx(ig_pos,ig_pos) = inv( hhx(ig_pos,ig_pos) );
[fvala,x0,fc,retcode] = csminit1(func0,x0,penalty,fval,ggx,0,iggx,Verbose,varargin{:});
[~,x0] = csminit1(func0,x0,penalty,fval,ggx,0,iggx,Verbose,varargin{:});
end
x0 = check_bounds(x0,bounds);
[fvala, x0, ig] = mr_gstep(h1,x0,bounds,func0,penalty,htol0,Verbose,Save_files,gradient_epsilon, parameter_names,varargin{:});
@ -187,7 +184,7 @@ while norm(gg)>gtol && check==0 && jit<nit
if (fval0(icount)-fval)<ftol && flagit==0
disp_verbose('Try diagonal Hessian',Verbose)
ihh=diag(1./(diag(hhg)));
[fval2,x0,fc,retcode2] = csminit1(func0,x0,penalty,fval,gg,0,ihh,Verbose,varargin{:});
[fval2,x0] = csminit1(func0,x0,penalty,fval,gg,0,ihh,Verbose,varargin{:});
x0 = check_bounds(x0,bounds);
if (fval-fval2)>=ftol
disp_verbose('Diagonal Hessian successful',Verbose)
@ -197,7 +194,7 @@ while norm(gg)>gtol && check==0 && jit<nit
if (fval0(icount)-fval)<ftol && flagit==0
disp_verbose('Try gradient direction',Verbose)
ihh0=inx.*1.e-4;
[fval3,x0,fc,retcode3] = csminit1(func0,x0,penalty,fval,gg,0,ihh0,Verbose,varargin{:});
[fval3,x0] = csminit1(func0,x0,penalty,fval,gg,0,ihh0,Verbose,varargin{:});
x0 = check_bounds(x0,bounds);
if (fval-fval3)>=ftol
disp_verbose('Gradient direction successful',Verbose)
@ -211,7 +208,7 @@ while norm(gg)>gtol && check==0 && jit<nit
disp_verbose('No further improvement is possible!',Verbose)
check=1;
if analytic_derivation
[fvalx,exit_flag,gg,hh]=penalty_objective_function(xparam1,func0,penalty,varargin{:});
[~,~,gg,hh]=penalty_objective_function(xparam1,func0,penalty,varargin{:});
hhg=hh;
H = inv(hh);
else
@ -285,7 +282,7 @@ while norm(gg)>gtol && check==0 && jit<nit
H = igg;
end
elseif analytic_derivation
[fvalx,exit_flag,gg,hh]=penalty_objective_function(xparam1,func0,penalty,varargin{:});
[~,~,gg,hh]=penalty_objective_function(xparam1,func0,penalty,varargin{:});
hhg=hh;
H = inv(hh);
end