From 405b1f7368882e1e4bf071c29b10bb07e2151fa1 Mon Sep 17 00:00:00 2001 From: Johannes Pfeifer Date: Mon, 6 Apr 2015 11:05:12 +0200 Subject: [PATCH] Add solvopt.m to Dynare optimizers Restores compatibility of mode_compute=101 --- matlab/global_initialization.m | 11 + matlab/optimization/apprgrdn.m | 73 ++ .../optimization/dynare_minimize_objective.m | 28 +- matlab/optimization/solvopt.m | 991 ++++++++++++++++++ 4 files changed, 1098 insertions(+), 5 deletions(-) create mode 100644 matlab/optimization/apprgrdn.m create mode 100644 matlab/optimization/solvopt.m diff --git a/matlab/global_initialization.m b/matlab/global_initialization.m index f169bfe6a..bd6f7140b 100644 --- a/matlab/global_initialization.m +++ b/matlab/global_initialization.m @@ -525,6 +525,17 @@ simpsa.MAX_FUN_EVALS = 20000; simpsa.DISPLAY = 'iter'; options_.simpsa = simpsa; +%solveopt optimizer +solveopt.minimizer_indicator=-1; %use minimizer +solveopt.TolX=1e-6; %accuracy of argument +solveopt.TolFun=1e-6; %accuracy of function +solveopt.MaxIter=15000; +solveopt.verbosity=1; +solveopt.TolXConstraint=1.e-8; +solveopt.SpaceDilation=2.5; +solveopt.LBGradientStep=1.e-11; +options_.solveopt=solveopt; + % prior analysis options_.prior_mc = 20000; options_.prior_analysis_endo_var_list = []; diff --git a/matlab/optimization/apprgrdn.m b/matlab/optimization/apprgrdn.m new file mode 100644 index 000000000..9f16e4466 --- /dev/null +++ b/matlab/optimization/apprgrdn.m @@ -0,0 +1,73 @@ +function g = apprgrdn(x,f,fun,deltax,obj,varargin) +% g = apprgrdn(x,f,fun,deltax,obj,varargin) +% Performs the finite difference approximation of the gradient at a +% point used in solveopt +% +% Inputs: +% x: point at which to evaluate gradient +% f: calculated function value at a point x; +% fun: Name of the Matlab function calculating the function values +% deltax: vector of the relative stepsizes, +% obj flag indicating whether the gradient of the objective +% function (1) or the constraint function (0) is to be calculated. +% +% Modified by Giovanni Lombardo and Johannes Pfeifer to accomodate Dynare +% structure +% +% +% Copyright (C) 1997-2008, Alexei Kuntsevich and Franz Kappel +% Copyright (C) 2008-2015 Giovanni Lombardo +% Copyright (C) 2015 Dynare Team +% +% This file is part of Dynare. +% +% Dynare is free software: you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation, either version 3 of the License, or +% (at your option) any later version. +% +% Dynare is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +% GNU General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with Dynare. If not, see . + +n=max(size(x)); ee=ones(size(x)); +di=abs(x); idx=find(di<5e-15); di(idx)=5e-15*ee(idx); +di=deltax.*di; +if obj + idx=find(abs(di)<2e-10); + di(idx)=2e-10*sign(di(idx)); +else + idx=find(abs(di)<5e-15); + di(idx)=5e-15*sign(di(idx)); +end +y=x; + +g=NaN(n,1); +for i=1:n + y(i)=x(i)+di(i); + fi=feval(fun,y,varargin{:}); + if obj + if fi==f, + for j=1:3 + di(i)=di(i)*10; y(i)=x(i)+di(i); + fi=feval(fun,y,varargin{:}); + if fi~=f + break + end + end + end + end + g(i)=(fi-f)/di(i); + if obj + if ~isempty(idx) && any(idx==i) + y(i)=x(i)-di(i); + fi=feval(fun,y,varargin{:}); + g(i)=.5*(g(i)+(f-fi)/di(i)); + end + end + y(i)=x(i); +end diff --git a/matlab/optimization/dynare_minimize_objective.m b/matlab/optimization/dynare_minimize_objective.m index 4113bf119..773b6a9d0 100644 --- a/matlab/optimization/dynare_minimize_objective.m +++ b/matlab/optimization/dynare_minimize_objective.m @@ -278,11 +278,29 @@ switch minimizer_algorithm options_.cova_compute = 0 ; [opt_par_values,stdh,lb_95,ub_95,med_param] = online_auxiliary_filter(start_par_value,varargin{:}) ; case 101 - myoptions=soptions; - myoptions(2)=1e-6; %accuracy of argument - myoptions(3)=1e-6; %accuracy of function (see Solvopt p.29) - myoptions(5)= 1.0; - [opt_par_values,fval]=solvopt(start_par_value,objective_function,[],myoptions,varargin{:}); + solveoptoptions = options_.solveopt; + if ~isempty(options_.optim_opt) + options_list = read_key_value_string(options_.optim_opt); + for i=1:rows(options_list) + switch options_list{i,1} + case 'TolX' + solveoptoptions.TolX = options_list{i,2}; + case 'TolFun' + solveoptoptions.TolFun = options_list{i,2}; + case 'MaxIter' + solveoptoptions.MaxIter = options_list{i,2}; + case 'verbosity' + solveoptoptions.verbosity = options_list{i,2}; + case 'SpaceDilation' + solveoptoptions.SpaceDilation = options_list{i,2}; + case 'LBGradientStep' + solveoptoptions.LBGradientStep = options_list{i,2}; + otherwise + warning(['solveopt: Unknown option (' options_list{i,1} ')!']) + end + end + end + [opt_par_values,fval]=solvopt(start_par_value,objective_function,[],[],[],solveoptoptions,varargin{:}); case 102 %simulating annealing LB = start_par_value - 1; diff --git a/matlab/optimization/solvopt.m b/matlab/optimization/solvopt.m new file mode 100644 index 000000000..9e5aa6139 --- /dev/null +++ b/matlab/optimization/solvopt.m @@ -0,0 +1,991 @@ +function [x,f,exitflag,n_f_evals,n_grad_evals,n_constraint_evals,n_constraint_gradient_evals]=solvopt(x,fun,grad,func,gradc,optim,varargin) +% [x,f,options]=solvopt(x,fun,grad,func,gradc,options,varargin) +% +% The function SOLVOPT, developed by Alexei Kuntsevich and Franz Kappe, +% performs a modified version of Shor's r-algorithm in +% order to find a local minimum resp. maximum of a nonlinear function +% defined on the n-dimensional Euclidean space or % a solution of a nonlinear +% constrained problem: +% min { f(x): g(x) (<)= 0, g(x) in R(m), x in R(n) } +% +% Inputs: +% x n-vector (row or column) of the coordinates of the starting +% point, +% fun name of an M-file (M-function) which computes the value +% of the objective function at a point x, +% synopsis: f=fun(x) +% grad name of an M-file (M-function) which computes the gradient +% vector of the function at a point x, +% synopsis: g=grad(x) +% func name of an M-file (M-function) which computes the MAXIMAL +% RESIDUAL(!) for a set of constraints at a point x, +% synopsis: fc=func(x) +% gradc name of an M-file (M-function) which computes the gradient +% vector for the maximal residual constraint at a point x, +% synopsis: gc=gradc(x) +% optim Options structure with fields: +% optim.minimizer_indicator= H, where sign(H)=-1 resp. sign(H)=+1 means minimize +% resp. maximize (valid only for unconstrained problem) +% and H itself is a factor for the initial trial step size +% (optim.minimizer_indicator=-1 by default), +% optim.TolX= relative error for the argument in terms of the +% infinity-norm (1.e-4 by default), +% optim.TolFun= relative error for the function value (1.e-6 by default), +% optim.MaxIter= limit for the number of iterations (15000 by default), +% optim.verbosity= control of the display of intermediate results and error +% resp. warning messages (default value is 0, i.e., no intermediate +% output but error and warning messages, see more in the manual), +% optim.TolXConstraint= admissible maximal residual for a set of constraints +% (optim.TolXConstraint=1e-8 by default, see more in the manual), +% *optim.SpaceDilation= the coefficient of space dilation (2.5 by default), +% *optim.LBGradientStep= lower bound for the stepsize used for the difference +% approximation of gradients (1e-12 by default, see more in the manual). +% (* ... changes should be done with care) +% +% Outputs: +% x optimal parameter vector (row resp. column), +% f optimum function value +% exitflag: the number of iterations, if positive, +% or an abnormal stop code, if negative (see more in the manual), +% n_f_evals: number of objective evaluations +% n_grad_evals: number of gradient evaluations, +% n_constraint_evals: number of constraint function evaluations, +% n_constraint_gradient_evals number of constraint gradient evaluations. +% +% +% Algorithm: Kuntsevich, A.V., Kappel, F., The solver for local nonlinear optimization problems +% (version 1.1, Matlab, C, FORTRAN). University of Graz, Graz, 1997. +% +% +% Copyright (C) 1997-2008, Alexei Kuntsevich and Franz Kappel +% Copyright (C) 2008-2015 Giovanni Lombardo +% Copyright (C) 2015 Dynare Team +% +% This file is part of Dynare. +% +% Dynare is free software: you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation, either version 3 of the License, or +% (at your option) any later version. +% +% Dynare is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +% GNU General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with Dynare. If not, see . + + + +% strings: ----{ +errmes='SolvOpt error:'; +wrnmes='SolvOpt warning:'; +error1='No function name and/or starting point passed to the function.'; +error2='Argument X has to be a row or column vector of dimension > 1.'; +error30=' returns an empty string.'; +error31='Function value does not exist (NaN is returned).'; +error32='Function equals infinity at the point.'; +error40=' returns an improper matrix. Check the dimension.'; +error41='Gradient does not exist (NaN is returned by ).'; +error42='Gradient equals infinity at the starting point.'; +error43='Gradient equals zero at the starting point.'; +error50=' returns an empty string.'; +error51=' returns NaN at the point.'; +error52=' returns infinite value at the point.'; +error60=' returns an improper vector. Check the dimension'; +error61=' returns NaN at the point.'; +error62=' returns infinite vector at the point.'; +error63=' returns zero vector at an infeasible point.'; +error5='Function is unbounded.'; +error6='Choose another starting point.'; +warn1= 'Gradient is zero at the point, but stopping criteria are not fulfilled.'; +warn20='Normal re-setting of a transformation matrix.' ; +warn21='Re-setting due to the use of a new penalty coefficient.' ; +warn4= 'Iterations limit exceeded.'; +warn31='The function is flat in certain directions.'; +warn32='Trying to recover by shifting insensitive variables.'; +warn09='Re-run from recorded point.'; +warn08='Ravine with a flat bottom is detected.'; +termwarn0='SolvOpt: Normal termination.'; +termwarn1='SolvOpt: Termination warning:'; +appwarn='The above warning may be reasoned by inaccurate gradient approximation'; +endwarn=[... + 'Premature stop is possible. Try to re-run the routine from the obtained point. ';... + 'Result may not provide the optimum. The function apparently has many extremum points. ';... + 'Result may be inaccurate in the coordinates. The function is flat at the optimum. ';... + 'Result may be inaccurate in a function value. The function is extremely steep at the optimum.']; +% ----} + +% ARGUMENTS PASSED ----{ +if nargin<2 % Function and/or starting point are not specified + exitflag=-1; + disp(errmes); + disp(error1); + return +end +if nargin<3 + app=1; % No user-supplied gradients +elseif isempty(grad) + app=1; +else + app=0; % Exact gradients are supplied +end + +% OPTIONS ----{ +doptions.minimizer_indicator=1; +doptions.TolX=1e-6; %accuracy of argument +doptions.TolFun=1e-6; %accuracy of function (see Solvopt p.29) +doptions.MaxIter=15000; +doptions.verbosity=1; +doptions.TolXConstraint=1.e-8; +doptions.SpaceDilation=2.5; +doptions.LBGradientStep=1.e-11; + +if nargin<4 + optim=doptions; +elseif isempty(optim) + optim=doptions; +end +% Check the values: +optim.TolX=max(optim.TolX,1.e-12); +optim.TolFun=max(optim.TolFun,1.e-12); +optim.TolX=max(optim.LBGradientStep*1.e2,optim.TolX); +optim.TolX=min(optim.TolX,1); +optim.TolFun=min(optim.TolFun,1); +optim.TolXConstraint=max(optim.TolXConstraint,1e-12); +optim.SpaceDilation=max([optim.SpaceDilation,1.5]); +optim.LBGradientStep=max(optim.LBGradientStep,1e-11); +% ----} + +if isempty(func) + constr=0; +else + constr=1; % Constrained problem + if isempty(gradc), + appconstr=1; + else + appconstr=0; % Exact gradients of constraints are supplied + end +end +% ----} + +% STARTING POINT ----{ +if max(size(x))<=1 + disp(errmes); + disp(error2); + exitflag=-2; + return +elseif size(x,2)==1 + n=size(x,1); + x=x'; + trx=1; +elseif size(x,1)==1 + n=size(x,2); + trx=0; +else + disp(errmes); + disp(error2); + exitflag=-2; + return +end +% ----} + +% WORKING CONSTANTS AND COUNTERS ----{ + +n_f_evals=0; n_grad_evals=0; % function and gradient calculations +if constr + n_constraint_evals=0; + n_constraint_gradient_evals=0; % same for constraints +end +epsnorm=1.e-15; +epsnorm2=1.e-30; % epsilon & epsilon^2 + +if constr, h1=-1; % NLP: restricted to minimization + cnteps=optim.TolXConstraint; % Max. admissible residual +else + h1=sign(optim.minimizer_indicator); % Minimize resp. maximize a function +end + +k=0; % Iteration counter + +wdef=1/optim.SpaceDilation-1; % Default space transf. coeff. + +%Gamma control ---{ +ajb=1+.1/n^2; % Base I +ajp=20; +ajpp=ajp; % Start value for the power +ajs=1.15; % Base II +knorms=0; gnorms=zeros(1,10); % Gradient norms stored +%---} + +%Display control ---{ +if optim.verbosity<=0, dispdata=0; + if optim.verbosity==-1 + dispwarn=0; + else + dispwarn=1; + end +else + dispdata=round(optim.verbosity); + dispwarn=1; +end +ld=dispdata; +%---} + +%Stepsize control ---{ +dq=5.1; % Step divider (at f_{i+1}>gamma*f_{i}) +du20=2;du10=1.5;du03=1.05; % Step multipliers (at certain steps made) +kstore=3;nsteps=zeros(1,kstore); % Steps made at the last 'kstore' iterations +if app + des=6.3; % Desired number of steps per 1-D search +else + des=3.3; +end +mxtc=3; % Number of trial cycles (steep wall detect) +%---} +termx=0; limxterm=50; % Counter and limit for x-criterion + +ddx =max(1e-11,optim.LBGradientStep); % stepsize for gradient approximation + +low_bound=-1+1e-4; % Lower bound cosine used to detect a ravine + +ZeroGrad=n*1.e-16; % Lower bound for a gradient norm + +nzero=0; % Zero-gradient events counter +% Lower bound for values of variables taking into account +lowxbound=max([optim.TolX,1e-3]); +% Lower bound for function values to be considered as making difference +lowfbound=optim.TolFun^2; +krerun=0; % Re-run events counter +detfr=optim.TolFun*100; % relative error for f/f_{record} +detxr=optim.TolX*10; % relative error for norm(x)/norm(x_{record}) + +warnno=0; % the number of warn.mess. to end with + +kflat=0; % counter for points of flatness +stepvanish=0; % counter for vanished steps +stopf=0; +% ----} End of setting constants +% ----} End of the preamble + +% COMPUTE THE FUNCTION ( FIRST TIME ) ----{ +if trx + f=feval(fun,x',varargin{:}); +else + f=feval(fun,x,varargin{:}); +end +n_f_evals=n_f_evals+1; +if isempty(f), if dispwarn,disp(errmes);disp(error30);end + exitflag=-3; if trx, x=x';end, return +elseif isnan(f), if dispwarn,disp(errmes);disp(error31);disp(error6);end + exitflag=-3; if trx, x=x';end, return +elseif abs(f)==Inf, if dispwarn,disp(errmes);disp(error32);disp(error6);end + exitflag=-3; if trx, x=x';end, return +end +xrec=x; frec=f; % record point and function value +% Constrained problem +if constr, fp=f; kless=0; + if trx, + fc=feval(func,x'); + else + fc=feval(func,x); + end + if isempty(fc), + if dispwarn,disp(errmes);disp(error50);end + exitflag=-5; if trx, x=x';end, return + elseif isnan(fc), + if dispwarn,disp(errmes);disp(error51);disp(error6);end + exitflag=-5; if trx, x=x';end, return + elseif abs(fc)==Inf, + if dispwarn,disp(errmes);disp(error52);disp(error6);end + exitflag=-5; if trx, x=x';end, return + end + n_constraint_evals=n_constraint_evals+1; + PenCoef=1; % first rough approximation + if fc<=cnteps + FP=1; fc=0; % feasible point + else + FP=0; % infeasible point + end + f=f+PenCoef*fc; +end +% ----} +% COMPUTE THE GRADIENT ( FIRST TIME ) ----{ +if app + deltax=h1*ddx*ones(size(x)); + if constr + if trx + g=apprgrdn(x',fp,fun,deltax',1,varargin{:}); + else + g=apprgrdn(x ,fp,fun,deltax,1,varargin{:}); + end + else + if trx + g=apprgrdn(x',f,fun,deltax',1,varargin{:}); + else + g=apprgrdn(x ,f,fun,deltax,1,varargin{:}); + end + end + n_f_evals=n_f_evals+n; +else + if trx + g=feval(grad,x',varargin{:}); + else + g=feval(grad,x,varargin{:}); + end + n_grad_evals=n_grad_evals+1; +end +if size(g,2)==1, g=g'; end, ng=norm(g); +if size(g,2)~=n, if dispwarn,disp(errmes);disp(error40);end + exitflag=-4; if trx, x=x';end, return +elseif isnan(ng), if dispwarn,disp(errmes);disp(error41);disp(error6);end + exitflag=-4; if trx, x=x';end, return +elseif ng==Inf, if dispwarn,disp(errmes);disp(error42);disp(error6);end + exitflag=-4; if trx, x=x';end, return +elseif ng2*kd, kd=kd+1; warnno=1; + if any(abs(x-xx)epsnorm*norm(gt)) + z=z/nrmz; + g1=gt+w*(z*gt')*z; B=B+w*(B*z')*z; + else + z=zeros(1,n); + nrmz=0; + g1=gt; + end + d1=norm(g1); g0=(g1/d1)*B'; + % ----} + % RESETTING ----{ + if kcheck>1 + idx=find(abs(g)>ZeroGrad); numelem=size(idx,2); + if numelem>0, grbnd=epsnorm*numelem^2; + if all(abs(g1(idx))<=abs(g(idx))*grbnd) | nrmz==0 + if dispwarn, disp(wrnmes); disp(warn20); end + if abs(fst-f)1.2*PenCoef, + PenCoef=PenCoefNew; Reset=1; kless=0; f=f+PenCoef*fc; break + end + end + end + end + f=f+PenCoef*fc; + end + if abs(f)==Inf || isnan(f) + if dispwarn, disp(wrnmes); + if isnan(f) + disp(error31); + else + disp(error32); + end + end + if ksm || kc>=mxtc + exitflag=-3; + if trx + x=x'; + end + return + else + k2=k2+1; + k1=0; + hp=hp/dq; + x=x1; + f=f1; + knan=1; + if constr + FP=FP1; + fp=fp1; + end + end + % STEP SIZE IS ZERO TO THE EXTENT OF EPSNORM + elseif all(abs(x-x1)=5, + exitflag=-14; + if dispwarn, disp(termwarn1); + disp(endwarn(4,:)); end + if trx,x=x';end, return + else + x=x1; + f=f1; + hp=hp*10; + ksm=1; + if constr + FP=FP1; + fp=fp1; + end + end + % USE SMALLER STEP + elseif h1*f=mxtc, break, end + % 1-D OPTIMIZER IS LEFT BEHIND + else if h1*f<=h1*f1, break, end + % USE LARGER STEP + k1=k1+1; if k2>0, kc=kc+1; end, k2=0; + if k1>=20, hp=du20*hp; + elseif k1>=10, hp=du10*hp; + elseif k1>=3, hp=du03*hp; + end + end + end + % ----} End of 1-D search + % ADJUST THE TRIAL STEP SIZE ----{ + dx=norm(xopt-x); + if kg=2, nsteps(2:kg)=nsteps(1:kg-1); end + nsteps(1)=dx/(abs(h)*norm(g0)); + kk=sum(nsteps(1:kg).*[kg:-1:1])/sum([kg:-1:1]); + if kk>des + if kg==1 + h=h*(kk-des+1); + else + h=h*sqrt(kk-des+1); + end + elseif kk=20, PenCoef=PenCoef/10; Reset=1; kless=0; end + else + kless=0; + end + if appconstr, + deltax=sign(x); idx=find(deltax==0); + deltax(idx)=ones(size(idx)); deltax=ddx*deltax; + if trx + gc=apprgrdn(x',fc,func,deltax',0); + else + gc=apprgrdn(x ,fc,func,deltax ,0); + end + n_constraint_evals=n_constraint_evals+n; + else + if trx + gc=feval(gradc,x'); + else + gc=feval(gradc,x ); + end + n_constraint_gradient_evals=n_constraint_gradient_evals+1; + end + if size(gc,2)==1, gc=gc'; end, ngc=norm(gc); + if isnan(ngc), + if dispwarn,disp(errmes);disp(error61);end + exitflag=-6; if trx, x=x';end, return + elseif ngc==Inf, + if dispwarn,disp(errmes);disp(error62);end + exitflag=-6; if trx, x=x';end, return + elseif ngch1*frec, frec=f; xrec=x; grec=g; end + % ----} + if ng>ZeroGrad, + if knorms<10, knorms=knorms+1; end + if knorms>=2, gnorms(2:knorms)=gnorms(1:knorms-1); end + gnorms(1)=ng; + nng=(prod(gnorms(1:knorms)))^(1/knorms); + end + + % DISPLAY THE CURRENT VALUES ----{ + if k==ld + disp('Iter.# ..... Function ... Step Value ... Gradient Norm '); + disp(sprintf('%5i %13.5e %13.5e %13.5e',k,f,dx,ng)); + ld=k+dispdata; + end + %----} + % CHECK THE STOPPING CRITERIA ----{ + termflag=1; + if constr, if ~FP, termflag=0; end, end + if kcheck<=5, termflag=0; end + if knan, termflag=0; end + if kc>=mxtc, termflag=0; end + % ARGUMENT + if termflag + idx=find(abs(x)>=lowxbound); + if isempty(idx) || all(abs(xopt(idx)-x(idx))<=optim.TolX*abs(x(idx))) + termx=termx+1; + % FUNCTION + if abs(f-frec)> detfr * abs(f) && ... + abs(f-fopt)<=optim.TolFun*abs(f) && ... + krerun<=3 && ... + ~constr + if any(abs(xrec(idx)-x(idx))> detxr * abs(x(idx))) + if dispwarn + disp(wrnmes); + disp(warn09); + end + x=xrec; + f=frec; + g=grec; + ng=norm(g); + krerun=krerun+1; + h=h1*max([dx,detxr*norm(x)])/krerun; + warnno=2; + break + else + h=h*10; + end + elseif abs(f-frec)> optim.TolFun*abs(f) && ... + norm(x-xrec)=limxterm ) + if stopf + if dx<=laststep + if warnno==1 && ng=limxterm + exitflag=-14; + if dispwarn, disp(termwarn1); disp(endwarn(4,:)); + if app, disp(appwarn); end + end + x=xrec; f=frec; + if trx,x=x';end, return + else + stopf=0; + end + end + end + % ITERATIONS LIMIT + if(k==optim.MaxIter) + exitflag=-9; + if trx + x=x'; + end, + if dispwarn, disp(wrnmes); disp(warn4); end + return + end + % ----} + % ZERO GRADIENT ----{ + if constr + if ng<=ZeroGrad, + if dispwarn, disp(termwarn1); disp(warn1); end + exitflag=-8; + if trx + x=x'; + end + return + end + else + if ng<=ZeroGrad, nzero=nzero+1; + if dispwarn, disp(wrnmes); disp(warn1); end + if nzero>=3 + exitflag=-8; + if trx + x=x'; + end + return + end + g0=-h*g0/2; + for i=1:10, + x=x+g0; + if trx + f=feval(fun,x',varargin{:}); + else + f=feval(fun,x,varargin{:}); + end + n_f_evals=n_f_evals+1; + if abs(f)==Inf + if dispwarn + disp(errmes); + disp(error32); + end + exitflag=-3; + if trx + x=x'; + end + return + elseif isnan(f), + if dispwarn + disp(errmes); + disp(error32); + end + exitflag=-3; + if trx + x=x'; + end + return + end + if app, + deltax=sign(g0); + idx=find(deltax==0); + deltax(idx)=ones(size(idx)); + deltax=h1*ddx*deltax; + if trx + g=apprgrdn(x',f,fun,deltax',1,varargin{:}); + else + g=apprgrdn(x,f,fun,deltax,1,varargin{:}); + end + n_f_evals=n_f_evals+n; + else + if trx + g=feval(grad,x',varargin{:}); + else + g=feval(grad,x,varargin{:}); + end + n_grad_evals=n_grad_evals+1; + end + if size(g,2)==1 + g=g'; + end + ng=norm(g); + if ng==Inf + if dispwarn + disp(errmes); + disp(error42); + end + exitflag=-4; + if trx + x=x'; + end + return + elseif isnan(ng) + if dispwarn + disp(errmes); + disp(error41); + end + exitflag=-4; + if trx + x=x'; + end + return + end + if ng>ZeroGrad + break + end + end + if ng<=ZeroGrad, + if dispwarn + disp(termwarn1); + disp(warn1); + end + exitflag=-8; + if trx + x=x'; + end + return + end + h=h1*dx; + break + end + end + % ----} + % FUNCTION IS FLAT AT THE POINT ----{ + if ~constr && abs(f-fopt)5 && ng<1 + idx=find(abs(g)<=epsnorm2); + ni=size(idx,2); + if ni>=1 && ni<=n/2 && kflat<=3, kflat=kflat+1; + if dispwarn, disp(wrnmes); disp(warn31); end, warnno=1; + x1=x; fm=f; + for j=idx, y=x(j); f2=fm; + if y==0, x1(j)=1; elseif abs(y)<1, x1(j)=sign(y); else, x1(j)=y; end + for i=1:20, x1(j)=x1(j)/1.15; + if trx + f1=feval(fun,x1',varargin{:}); + else + f1=feval(fun,x1,varargin{:}); + end + n_f_evals=n_f_evals+1; + if abs(f1)~=Inf && ~isnan(f1), + if h1*f1>h1*fm + y=x1(j); + fm=f1; + elseif h1*f2>h1*f1 + break + elseif f2==f1 + x1(j)=x1(j)/1.5; + end + f2=f1; + end + end + x1(j)=y; + end + if h1*fm>h1*f + if app, + deltax=h1*ddx*ones(size(deltax)); + if trx + gt=apprgrdn(x1',fm,fun,deltax',1,varargin{:}); + else + gt=apprgrdn(x1 ,fm,fun,deltax ,1,varargin{:}); + end + n_f_evals=n_f_evals+n; + else + if trx + gt=feval(grad,x1',varargin{:}); + else + gt=feval(grad,x1,varargin{:}); + end + n_grad_evals=n_grad_evals+1; + end + if size(gt,2)==1 + gt=gt'; + end + ngt=norm(gt); + if ~isnan(ngt) && ngt>epsnorm2, + if dispwarn + disp(warn32); + end + optim.TolFun=optim.TolFun/5; + x=x1; + g=gt; + ng=ngt; + f=fm; + h=h1*dx/3; + break + end + end + end + end + % ----} + end % iterations +end % restart +% end of the function +% +end \ No newline at end of file