Add verbosity and SaveFile options to some optimizers

Also moves additional optimizer-related files to corresponding subfolder
Closes #894
time-shift
Johannes Pfeifer 2015-05-13 20:58:47 +02:00
parent 8bc946c148
commit 0769fd1cce
10 changed files with 217 additions and 122 deletions

View File

@ -487,6 +487,9 @@ options_.homotopy_force_continue = 0;
%csminwel optimization routine
csminwel.tolerance.f=1e-7;
csminwel.maxiter=1000;
csminwel.verbosity=1;
csminwel.Save_files=1;
options_.csminwel=csminwel;
%newrat optimization routine
@ -494,6 +497,9 @@ newrat.hess=1; % dynare numerical hessian
newrat.tolerance.f=1e-5;
newrat.tolerance.f_analytic=1e-7;
newrat.maxiter=1000;
newrat.verbosity=1;
newrat.Save_files=1;
options_.newrat=newrat;
% Simplex optimization routine (variation on Nelder Mead algorithm).

View File

@ -1,13 +1,15 @@
function H = bfgsi1(H0,dg,dx)
% H = bfgsi1(H0,dg,dx)
function H = bfgsi1(H0,dg,dx,Verbose,Save_files)
% H = bfgsi1(H0,dg,dx,Verbose,Save_files)
% Update Inverse Hessian matrix
%
% Inputs:
% H0 [npar by npar] initial inverse Hessian matrix
% dg [npar by 1] previous change in gradient
% dx [npar by 1] previous change in x;
% Verbose [scalar] Indicator for silent mode
% Save_files [scalar] Indicator whether to save files
%
% 6/8/93 version that updates inverse hessian instead of hessian
% 6/8/93 version that updates inverse Hessian instead of Hessian
% itself.
%
% Original file downloaded from:
@ -42,10 +44,12 @@ dgdx = dg'*dx;
if (abs(dgdx) >1e-12)
H = H0 + (1+(dg'*Hdg)/dgdx)*(dx*dx')/dgdx - (dx*Hdg'+Hdg*dx')/dgdx;
else
disp('bfgs update failed.')
disp(['|dg| = ' num2str(sqrt(dg'*dg)) '|dx| = ' num2str(sqrt(dx'*dx))]);
disp(['dg''*dx = ' num2str(dgdx)])
disp(['|H*dg| = ' num2str(Hdg'*Hdg)])
disp_verbose('bfgs update failed.',Verbose)
disp_verbose(['|dg| = ' num2str(sqrt(dg'*dg)) '|dx| = ' num2str(sqrt(dx'*dx))],Verbose);
disp_verbose(['dg''*dx = ' num2str(dgdx)],Verbose)
disp_verbose(['|H*dg| = ' num2str(Hdg'*Hdg)],Verbose)
H=H0;
end
save('H.dat','H')
if Save_files
save('H.dat','H')
end

View File

@ -1,4 +1,4 @@
function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,varargin)
function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,Verbose,varargin)
% [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,varargin)
%
% Inputs:
@ -101,7 +101,7 @@ else
% toc
dxnorm = norm(dx);
if dxnorm > 1e12
disp('Near-singular H problem.')
disp_verbose('Near-singular H problem.',Verbose)
dx = dx*FCHANGE/dxnorm;
end
dfhat = dx'*g0;
@ -115,10 +115,10 @@ else
dx = dx - (ANGLE*dxnorm/gnorm+dfhat/(gnorm*gnorm))*g;
dfhat = dx'*g;
dxnorm = norm(dx);
disp(sprintf('Correct for low angle: %g',a))
disp_verbose(sprintf('Correct for low angle: %g',a),Verbose)
end
end
disp(sprintf('Predicted improvement: %18.9f',-dfhat/2))
disp_verbose(sprintf('Predicted improvement: %18.9f',-dfhat/2),Verbose)
%
% Have OK dx, now adjust length of step (lambda) until min and
% max improvement rate criteria are met.
@ -141,7 +141,7 @@ else
%ARGLIST
%f = feval(fcn,dxtest,P1,P2,P3,P4,P5,P6,P7,P8,P9,P10,P11,P12,P13);
% f = feval(fcn,x0+dx*lambda,P1,P2,P3,P4,P5,P6,P7,P8);
disp(sprintf('lambda = %10.5g; f = %20.7f',lambda,f ))
disp_verbose(sprintf('lambda = %10.5g; f = %20.7f',lambda,f ),Verbose)
%debug
%disp(sprintf('Improvement too great? f0-f: %g, criterion: %g',f0-f,-(1-THETA)*dfhat*lambda))
if f<fhat
@ -176,7 +176,11 @@ else
if abs(lambda) < MINLAMB
if (lambda > 0) && (f0 <= fhat)
% try going against gradient, which may be inaccurate
lambda = -lambda*factor^6
if Verbose
lambda = -lambda*factor^6
else
lambda = -lambda*factor^6;
end
else
if lambda < 0
retcode = 6;
@ -222,4 +226,5 @@ else
end
end
end
disp(sprintf('Norm of dx %10.5g', dxnorm))
disp_verbose(sprintf('Norm of dx %10.5g', dxnorm),Verbose)

View File

@ -1,4 +1,4 @@
function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,method,epsilon,varargin)
function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,method,epsilon,Verbose,Save_files,varargin)
%[fhat,xhat,ghat,Hhat,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,method,epsilon,varargin)
% Inputs:
% fcn: [string] string naming the objective function to be minimized
@ -65,7 +65,6 @@ fh = [];
xh = [];
[nx,no]=size(x0);
nx=max(nx,no);
Verbose=1;
NumGrad= isempty(grad);
done=0;
itct=0;
@ -87,7 +86,7 @@ retcodeh = [];
[f0,junk1,junk2,cost_flag] = feval(fcn,x0,varargin{:});
if ~cost_flag
disp('Bad initial parameter.')
disp_verbose('Bad initial parameter.',Verbose)
return
end
@ -110,15 +109,13 @@ while ~done
g1=[]; g2=[]; g3=[];
%addition fj. 7/6/94 for control
if Verbose
disp('-----------------')
disp(sprintf('f at the beginning of new iteration, %20.10f',f))
end
disp_verbose('-----------------',Verbose)
disp_verbose(sprintf('f at the beginning of new iteration, %20.10f',f),Verbose)
%-----------Comment out this line if the x vector is long----------------
% disp([sprintf('x = ') sprintf('%15.8g %15.8g %15.8g %15.8g\n',x)]);
% disp_verbose([sprintf('x = ') sprintf('%15.8g %15.8g %15.8g %15.8g\n',x)]);
%-------------------------
itct=itct+1;
[f1, x1, fc, retcode1] = csminit1(fcn,x,f,g,badg,H,varargin{:});
[f1, x1, fc, retcode1] = csminit1(fcn,x,f,g,badg,H,Verbose,varargin{:});
fcount = fcount+fc;
% erased on 8/4/94
% if (retcode == 1) || (abs(f1-f) < crit)
@ -142,7 +139,9 @@ while ~done
end
wall1=badg1;
% g1
save g1.mat g1 x1 f1 varargin;
if Save_files
save g1.mat g1 x1 f1 varargin;
end
end
if wall1 % && (~done) by Jinill
% Bad gradient or back and forth on step length. Possibly at
@ -150,10 +149,8 @@ while ~done
%
%fcliff=fh;xcliff=xh;
Hcliff=H+diag(diag(H).*rand(nx,1));
if Verbose
disp('Cliff. Perturbing search direction.')
end
[f2, x2, fc, retcode2] = csminit1(fcn,x,f,g,badg,Hcliff,varargin{:});
disp_verbose('Cliff. Perturbing search direction.',Verbose)
[f2, x2, fc, retcode2] = csminit1(fcn,x,f,g,badg,Hcliff,Verbose,varargin{:});
fcount = fcount+fc; % put by Jinill
if f2 < f
if retcode2==2 || retcode2==4
@ -169,11 +166,15 @@ while ~done
end
wall2=badg2;
% g2
badg2
save g2.mat g2 x2 f2 varargin
if Verbose
badg2
end
if Save_files
save g2.mat g2 x2 f2 varargin
end
end
if wall2
disp('Cliff again. Try traversing')
disp_verbose('Cliff again. Try traversing',Verbose)
if norm(x2-x1) < 1e-13
f3=f; x3=x; badg3=1;retcode3=101;
else
@ -181,7 +182,7 @@ while ~done
if(size(x0,2)>1)
gcliff=gcliff';
end
[f3, x3, fc, retcode3] = csminit1(fcn,x,f,gcliff,0,eye(nx),varargin{:});
[f3, x3, fc, retcode3] = csminit1(fcn,x,f,gcliff,0,eye(nx),Verbose,varargin{:});
fcount = fcount+fc; % put by Jinill
if retcode3==2 || retcode3==4
wall3=1;
@ -197,7 +198,9 @@ while ~done
end
wall3=badg3;
% g3
save g3.mat g3 x3 f3 varargin;
if Save_files
save g3.mat g3 x3 f3 varargin;
end
end
end
else
@ -225,7 +228,7 @@ while ~done
fh=f1;xh=x1;gh=g1;badgh=badg1;retcodeh=retcode1;
else
[fh,ih] = min([f1,f2,f3]);
%disp(sprintf('ih = %d',ih))
%disp_verbose(sprintf('ih = %d',ih))
%eval(['xh=x' num2str(ih) ';'])
switch ih
case 1
@ -259,18 +262,16 @@ while ~done
%end of picking
stuck = (abs(fh-f) < crit);
if (~badg) && (~badgh) && (~stuck)
H = bfgsi1(H,gh-g,xh-x);
end
if Verbose
disp('----')
disp(sprintf('Improvement on iteration %d = %18.9f',itct,f-fh))
H = bfgsi1(H,gh-g,xh-x,Verbose,Save_files);
end
disp_verbose('----',Verbose)
disp_verbose(sprintf('Improvement on iteration %d = %18.9f',itct,f-fh),Verbose)
% if Verbose
if itct > nit
disp('iteration count termination')
disp_verbose('iteration count termination',Verbose)
done = 1;
elseif stuck
disp('improvement < crit termination')
disp_verbose('improvement < crit termination',Verbose)
done = 1;
end
rc=retcodeh;
@ -278,19 +279,19 @@ while ~done
if rc ==0
%do nothing, just a normal step
elseif rc == 1
disp('zero gradient')
disp_verbose('zero gradient',Verbose)
elseif rc == 6
disp('smallest step still improving too slow, reversed gradient')
disp_verbose('smallest step still improving too slow, reversed gradient',Verbose)
elseif rc == 5
disp('largest step still improving too fast')
disp_verbose('largest step still improving too fast',Verbose)
elseif (rc == 4) || (rc==2)
disp('back and forth on step length never finished')
disp_verbose('back and forth on step length never finished',Verbose)
elseif rc == 3
disp('smallest step still improving too slow')
disp_verbose('smallest step still improving too slow',Verbose)
elseif rc == 7
disp('warning: possible inaccuracy in H matrix')
disp_verbose('warning: possible inaccuracy in H matrix',Verbose)
else
error('Unaccounted Case, please contact the developers')
error('Unaccounted Case, please contact the developers',Verbose)
end
end

View File

@ -112,13 +112,15 @@ switch minimizer_algorithm
end
npar=length(start_par_value);
[LB, UB]=set_bounds_to_finite_values(bounds, options_.huge_number);
fprintf('\nNumber of parameters= %d, initial temperatur= %4.3f \n', npar,sa_options.initial_temperature);
fprintf('rt= %4.3f; TolFun= %4.3f; ns= %4.3f;\n',sa_options.rt,sa_options.TolFun,sa_options.ns);
fprintf('nt= %4.3f; neps= %4.3f; MaxIter= %d\n',sa_options.nt,sa_options.neps,sa_options.MaxIter);
fprintf('Initial step length(vm): %4.3f; step_length_c: %4.3f\n', sa_options.initial_step_length,sa_options.step_length_c);
fprintf('%-20s %-6s %-6s %-6s\n','Name:', 'LB;','Start;','UB;');
for pariter=1:npar
fprintf('%-20s %6.4f; %6.4f; %6.4f;\n',parameter_names{pariter}, LB(pariter),start_par_value(pariter),UB(pariter));
if sa_options.verbosity
fprintf('\nNumber of parameters= %d, initial temperatur= %4.3f \n', npar,sa_options.initial_temperature);
fprintf('rt= %4.3f; TolFun= %4.3f; ns= %4.3f;\n',sa_options.rt,sa_options.TolFun,sa_options.ns);
fprintf('nt= %4.3f; neps= %4.3f; MaxIter= %d\n',sa_options.nt,sa_options.neps,sa_options.MaxIter);
fprintf('Initial step length(vm): %4.3f; step_length_c: %4.3f\n', sa_options.initial_step_length,sa_options.step_length_c);
fprintf('%-20s %-6s %-6s %-6s\n','Name:', 'LB;','Start;','UB;');
for pariter=1:npar
fprintf('%-20s %6.4f; %6.4f; %6.4f;\n',parameter_names{pariter}, LB(pariter),start_par_value(pariter),UB(pariter));
end
end
sa_options.initial_step_length= sa_options.initial_step_length*ones(npar,1); %bring step length to correct vector size
sa_options.step_length_c= sa_options.step_length_c*ones(npar,1); %bring step_length_c to correct vector size
@ -152,6 +154,8 @@ switch minimizer_algorithm
nit = options_.csminwel.maxiter;
numgrad = options_.gradient_method;
epsilon = options_.gradient_epsilon;
Verbose = options_.csminwel.verbosity;
Save_files = options_.csminwel.Save_files;
% Change some options.
if ~isempty(options_.optim_opt)
options_list = read_key_value_string(options_.optim_opt);
@ -167,6 +171,10 @@ switch minimizer_algorithm
numgrad = options_list{i,2};
case 'NumgradEpsilon'
epsilon = options_list{i,2};
case 'verbosity'
Verbose = options_list{i,2};
case 'SaveFiles'
Save_files = options_list{i,2};
otherwise
warning(['csminwel: Unknown option (' options_list{i,1} ')!'])
end
@ -180,7 +188,7 @@ switch minimizer_algorithm
end
% Call csminwell.
[fval,opt_par_values,grad,inverse_hessian_mat,itct,fcount,exitflag] = ...
csminwel1(objective_function, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, varargin{:});
csminwel1(objective_function, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, Verbose, Save_files, varargin{:});
hessian_mat=inv(inverse_hessian_mat);
case 5
if options_.analytic_derivation==-1 %set outside as code for use of analytic derivation
@ -193,6 +201,8 @@ switch minimizer_algorithm
newratflag = options_.newrat.hess; %default
end
nit=options_.newrat.maxiter;
Verbose = options_.newrat.verbosity;
Save_files = options_.newrat.Save_files;
if ~isempty(options_.optim_opt)
options_list = read_key_value_string(options_.optim_opt);
for i=1:rows(options_list)
@ -208,12 +218,16 @@ switch minimizer_algorithm
end
case 'TolFun'
crit = options_list{i,2};
case 'verbosity'
Verbose = options_list{i,2};
case 'SaveFiles'
Save_files = options_list{i,2};
otherwise
warning(['newrat: Unknown option (' options_list{i,1} ')!'])
end
end
end
[opt_par_values,hessian_mat,gg,fval,invhess] = newrat(objective_function,start_par_value,analytic_grad,crit,nit,0,varargin{:});
[opt_par_values,hessian_mat,gg,fval,invhess] = newrat(objective_function,start_par_value,analytic_grad,crit,nit,0,Verbose, Save_files,varargin{:});
%hessian_mat is the plain outer product gradient Hessian
case 6
[opt_par_values, hessian_mat, Scale, fval] = gmhmaxlik(objective_function, start_par_value, ...
@ -255,6 +269,8 @@ switch minimizer_algorithm
simplexOptions.maxfcallfactor = options_list{i,2};
case 'InitialSimplexSize'
simplexOptions.delta_factor = options_list{i,2};
case 'verbosity'
simplexOptions.verbose = options_list{i,2};
otherwise
warning(['simplex: Unknown option (' options_list{i,1} ')!'])
end
@ -278,6 +294,17 @@ switch minimizer_algorithm
cmaesOptions.TolX = options_list{i,2};
case 'MaxFunEvals'
cmaesOptions.MaxFunEvals = options_list{i,2};
case 'verbosity'
if options_list{i,2}==0
cmaesOptions.DispFinal = 'off'; % display messages like initial and final message';
cmaesOptions.DispModulo = '0'; % [0:Inf], disp messages after every i-th iteration';
end
case 'SaveFiles'
if options_list{i,2}==0
cmaesOptions.SaveVariables='off';
cmaesOptions.LogModulo = '0'; % [0:Inf] if >1 record data less frequently after gen=100';
cmaesOptions.LogTime = '0'; % [0:100] max. percentage of time for recording data';
end
otherwise
warning(['cmaes: Unknown option (' options_list{i,1} ')!'])
end
@ -309,6 +336,12 @@ switch minimizer_algorithm
simpsaOptions.TEMP_END = options_list{i,2};
case 'MaxFunEvals'
simpsaOptions.MAX_FUN_EVALS = options_list{i,2};
case 'verbosity'
if options_list{i,2} == 0
simpsaOptions.DISPLAY = 'none';
else
simpsaOptions.DISPLAY = 'iter';
end
otherwise
warning(['simpsa: Unknown option (' options_list{i,1} ')!'])
end

View File

@ -1,4 +1,4 @@
function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,varargin)
function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,Verbose,Save_files,varargin)
% function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,varargin)
%
% Gibbs type step in optimisation
@ -69,14 +69,19 @@ while i<n
gg(i)=(f1(i)'-f_1(i)')./(2.*h1(i));
hh(i) = 1/max(1.e-9,abs( (f1(i)+f_1(i)-2*f0)./(h1(i)*h1(i)) ));
if gg(i)*(hh(i)*gg(i))/2 > htol
[f0 x fc retcode] = csminit1(func0,x,f0,gg,0,diag(hh),varargin{:});
[f0 x fc retcode] = csminit1(func0,x,f0,gg,0,diag(hh),Verbose,varargin{:});
ig(i)=1;
fprintf(['Done for param %s = %8.4f\n'],varargin{6}.name{i},x(i))
if Verbose
fprintf(['Done for param %s = %8.4f\n'],varargin{6}.name{i},x(i))
end
end
xh1=x;
end
if Save_files
save gstep.mat x h1 f0
end
end
if Save_files
save gstep.mat x h1 f0
end
save gstep.mat x h1 f0

View File

@ -1,4 +1,4 @@
function [xparam1, hh, gg, fval, igg] = newrat(func0, x, analytic_derivation, ftol0, nit, flagg, varargin)
function [xparam1, hh, gg, fval, igg] = newrat(func0, x, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, varargin)
% [xparam1, hh, gg, fval, igg] = newrat(func0, x, hh, gg, igg, ftol0, nit, flagg, varargin)
%
% Optimiser with outer product gradient and with sequences of univariate steps
@ -49,6 +49,7 @@ function [xparam1, hh, gg, fval, igg] = newrat(func0, x, analytic_derivation, ft
global objective_function_penalty_base
icount=0;
nx=length(x);
xparam1=x;
@ -95,14 +96,19 @@ else
h1=[];
end
H = igg;
disp(['Gradient norm ',num2str(norm(gg))])
disp_verbose(['Gradient norm ',num2str(norm(gg))],Verbose)
ee=eig(hh);
disp(['Minimum Hessian eigenvalue ',num2str(min(ee))])
disp(['Maximum Hessian eigenvalue ',num2str(max(ee))])
disp_verbose(['Minimum Hessian eigenvalue ',num2str(min(ee))],Verbose)
disp_verbose(['Maximum Hessian eigenvalue ',num2str(max(ee))],Verbose)
g=gg;
check=0;
if max(eig(hh))<0, disp('Negative definite Hessian! Local maximum!'), pause, end,
save m1.mat x hh g hhg igg fval0
if max(eig(hh))<0
disp_verbose('Negative definite Hessian! Local maximum!',Verbose)
pause
end
if Save_files
save m1.mat x hh g hhg igg fval0
end
igrad=1;
igibbs=1;
@ -116,13 +122,13 @@ while norm(gg)>gtol && check==0 && jit<nit
tic
icount=icount+1;
objective_function_penalty_base = fval0(icount);
disp([' '])
disp(['Iteration ',num2str(icount)])
[fval,x0,fc,retcode] = csminit1(func0,xparam1,fval0(icount),gg,0,H,varargin{:});
disp_verbose([' '],Verbose)
disp_verbose(['Iteration ',num2str(icount)],Verbose)
[fval,x0,fc,retcode] = csminit1(func0,xparam1,fval0(icount),gg,0,H,Verbose,varargin{:});
if igrad
[fval1,x01,fc,retcode1] = csminit1(func0,x0,fval,gg,0,inx,varargin{:});
[fval1,x01,fc,retcode1] = csminit1(func0,x0,fval,gg,0,inx,Verbose,varargin{:});
if (fval-fval1)>1
disp('Gradient step!!')
disp_verbose('Gradient step!!',Verbose)
else
igrad=0;
end
@ -139,27 +145,27 @@ while norm(gg)>gtol && check==0 && jit<nit
end
iggx=eye(length(gg));
iggx(find(ig),find(ig)) = inv( hhx(find(ig),find(ig)) );
[fvala,x0,fc,retcode] = csminit1(func0,x0,fval,ggx,0,iggx,varargin{:});
[fvala,x0,fc,retcode] = csminit1(func0,x0,fval,ggx,0,iggx,Verbose,varargin{:});
end
[fvala, x0, ig] = mr_gstep(h1,x0,func0,htol,varargin{:});
[fvala, x0, ig] = mr_gstep(h1,x0,func0,htol,Verbose,Save_files,varargin{:});
nig=[nig ig];
disp('Sequence of univariate steps!!')
disp_verbose('Sequence of univariate steps!!',Verbose)
fval=fvala;
if (fval0(icount)-fval)<ftol && flagit==0
disp('Try diagonal Hessian')
disp_verbose('Try diagonal Hessian',Verbose)
ihh=diag(1./(diag(hhg)));
[fval2,x0,fc,retcode2] = csminit1(func0,x0,fval,gg,0,ihh,varargin{:});
[fval2,x0,fc,retcode2] = csminit1(func0,x0,fval,gg,0,ihh,Verbose,varargin{:});
if (fval-fval2)>=ftol
disp('Diagonal Hessian successful')
disp_verbose('Diagonal Hessian successful',Verbose)
end
fval=fval2;
end
if (fval0(icount)-fval)<ftol && flagit==0
disp('Try gradient direction')
disp_verbose('Try gradient direction',Verbose)
ihh0=inx.*1.e-4;
[fval3,x0,fc,retcode3] = csminit1(func0,x0,fval,gg,0,ihh0,varargin{:});
[fval3,x0,fc,retcode3] = csminit1(func0,x0,fval,gg,0,ihh0,Verbose,varargin{:});
if (fval-fval3)>=ftol
disp('Gradient direction successful')
disp_verbose('Gradient direction successful',Verbose)
end
fval=fval3;
end
@ -167,7 +173,7 @@ while norm(gg)>gtol && check==0 && jit<nit
x(:,icount+1)=xparam1;
fval0(icount+1)=fval;
if (fval0(icount)-fval)<ftol
disp('No further improvement is possible!')
disp_verbose('No further improvement is possible!',Verbose)
check=1;
if analytic_derivation,
[fvalx,gg,hh]=feval(func0,xparam1,varargin{:});
@ -189,29 +195,33 @@ while norm(gg)>gtol && check==0 && jit<nit
end
end
end
disp(['Actual dxnorm ',num2str(norm(x(:,end)-x(:,end-1)))])
disp(['FVAL ',num2str(fval)])
disp(['Improvement ',num2str(fval0(icount)-fval)])
disp(['Ftol ',num2str(ftol)])
disp(['Htol ',num2str(htol0)])
disp(['Gradient norm ',num2str(norm(gg))])
disp_verbose(['Actual dxnorm ',num2str(norm(x(:,end)-x(:,end-1)))],Verbose)
disp_verbose(['FVAL ',num2str(fval)],Verbose)
disp_verbose(['Improvement ',num2str(fval0(icount)-fval)],Verbose)
disp_verbose(['Ftol ',num2str(ftol)],Verbose)
disp_verbose(['Htol ',num2str(htol0)],Verbose)
disp_verbose(['Gradient norm ',num2str(norm(gg))],Verbose)
ee=eig(hh);
disp(['Minimum Hessian eigenvalue ',num2str(min(ee))])
disp(['Maximum Hessian eigenvalue ',num2str(max(ee))])
disp_verbose(['Minimum Hessian eigenvalue ',num2str(min(ee))],Verbose)
disp_verbose(['Maximum Hessian eigenvalue ',num2str(max(ee))],Verbose)
g(:,icount+1)=gg;
else
df = fval0(icount)-fval;
disp(['Actual dxnorm ',num2str(norm(x(:,end)-x(:,end-1)))])
disp(['FVAL ',num2str(fval)])
disp(['Improvement ',num2str(df)])
disp(['Ftol ',num2str(ftol)])
disp(['Htol ',num2str(htol0)])
disp_verbose(['Actual dxnorm ',num2str(norm(x(:,end)-x(:,end-1)))],Verbose)
disp_verbose(['FVAL ',num2str(fval)],Verbose)
disp_verbose(['Improvement ',num2str(df)],Verbose)
disp_verbose(['Ftol ',num2str(ftol)],Verbose)
disp_verbose(['Htol ',num2str(htol0)],Verbose)
htol=htol_base;
if norm(x(:,icount)-xparam1)>1.e-12 && analytic_derivation==0,
try
save m1.mat x fval0 nig -append
if Save_files
save m1.mat x fval0 nig -append
end
catch
save m1.mat x fval0 nig
if Save_files
save m1.mat x fval0 nig
end
end
[dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,xparam1,func0,flagit,htol,varargin{:});
if isempty(dum),
@ -220,12 +230,12 @@ while norm(gg)>gtol && check==0 && jit<nit
if htol0>htol
htol=htol0;
skipline()
disp('Numerical noise in the likelihood')
disp('Tolerance has to be relaxed')
disp_verbose('Numerical noise in the likelihood',Verbose)
disp_verbose('Tolerance has to be relaxed',Verbose)
skipline()
end
if ~outer_product_gradient,
H = bfgsi1(H,gg-g(:,icount),xparam1-x(:,icount));
H = bfgsi1(H,gg-g(:,icount),xparam1-x(:,icount),Verbose,Save_files);
hh=inv(H);
hhg=hh;
else
@ -246,39 +256,44 @@ while norm(gg)>gtol && check==0 && jit<nit
hhg=hh;
H = inv(hh);
end
disp(['Gradient norm ',num2str(norm(gg))])
disp_verbose(['Gradient norm ',num2str(norm(gg))],Verbose)
ee=eig(hh);
disp(['Minimum Hessian eigenvalue ',num2str(min(ee))])
disp(['Maximum Hessian eigenvalue ',num2str(max(ee))])
if max(eig(hh))<0, disp('Negative definite Hessian! Local maximum!'), pause(1), end,
disp_verbose(['Minimum Hessian eigenvalue ',num2str(min(ee))],Verbose)
disp_verbose(['Maximum Hessian eigenvalue ',num2str(max(ee))],Verbose)
if max(eig(hh))<0
disp_verbose('Negative definite Hessian! Local maximum!',Verbose)
pause(1)
end
t=toc;
disp(['Elapsed time for iteration ',num2str(t),' s.'])
disp_verbose(['Elapsed time for iteration ',num2str(t),' s.'],Verbose)
g(:,icount+1)=gg;
save m1.mat x hh g hhg igg fval0 nig H
if Save_files
save m1.mat x hh g hhg igg fval0 nig H
end
end
end
save m1.mat x hh g hhg igg fval0 nig
if Save_files
save m1.mat x hh g hhg igg fval0 nig
end
if ftol>ftol0
skipline()
disp('Numerical noise in the likelihood')
disp('Tolerance had to be relaxed')
disp_verbose('Numerical noise in the likelihood',Verbose)
disp_verbose('Tolerance had to be relaxed',Verbose)
skipline()
end
if jit==nit
skipline()
disp('Maximum number of iterations reached')
disp_verbose('Maximum number of iterations reached',Verbose)
skipline()
end
if norm(gg)<=gtol
disp(['Estimation ended:'])
disp(['Gradient norm < ', num2str(gtol)])
disp_verbose(['Estimation ended:'],Verbose)
disp_verbose(['Gradient norm < ', num2str(gtol)],Verbose)
end
if check==1,
disp(['Estimation successful.'])
disp_verbose(['Estimation successful.'],Verbose)
end
return
return

View File

@ -507,12 +507,12 @@ fval = fv(1);
exitflag = 1;
if func_count>= max_func_calls
disp('Maximum number of objective function calls has been exceeded!')
disp_verbose('Maximum number of objective function calls has been exceeded!',verbose)
exitflag = 0;
end
if iter_count>= max_iterations
disp('Maximum number of iterations has been exceeded!')
disp_verbose('Maximum number of iterations has been exceeded!',verbose)
exitflag = 0;
end

View File

@ -0,0 +1,25 @@
function disp_verbose(input_string,Verbose)
% function disp_verbose(input_string,Verbose)
% Prints input_string unless Verbose=0 is requested
%
% Copyright (C) 2015 Dynare Team
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
if Verbose
disp(input_string)
end

View File

@ -9,8 +9,9 @@ crit = 1e-7;
numgrad = 2;
epsilon = 1e-6;
analytic_grad=[];
Verbose=1;
Save_files=1;
%call optimizer
[fval,opt_par_values,grad,hessian_mat,itct,fcount,exitflag] = ...
csminwel1(objective_function_handle, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, varargin{:});
csminwel1(objective_function_handle, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, Verbose,Save_files, varargin{:});
end