Merge branch 'experimental-optimizers'

time-shift
Stéphane Adjemian (Charybdis) 2015-02-24 20:44:39 +01:00
commit 0bb413e8fa
22 changed files with 536 additions and 434 deletions

View File

@ -59,6 +59,7 @@ addpath([dynareroot '/particle/'])
addpath([dynareroot '/gsa/'])
addpath([dynareroot '/ep/'])
addpath([dynareroot '/lmmcp/'])
addpath([dynareroot '/optimization/'])
addpath([dynareroot '/modules/dates/src/'])
addpath([dynareroot '/modules/dseries/src/'])
addpath([dynareroot '/modules/dseries/src/read'])

View File

@ -231,414 +231,57 @@ end
% Estimation of the posterior mode or likelihood mode
if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
switch options_.mode_compute
case 1
if isoctave
error('Option mode_compute=1 is not available under Octave')
elseif ~user_has_matlab_license('optimization_toolbox')
error('Option mode_compute=1 requires the Optimization Toolbox')
end
% Set default optimization options for fmincon.
optim_options = optimset('display','iter', 'LargeScale','off', 'MaxFunEvals',100000, 'TolFun',1e-8, 'TolX',1e-6);
if isfield(options_,'optim_opt')
eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']);
end
if options_.analytic_derivation,
optim_options = optimset(optim_options,'GradObj','on','TolX',1e-7);
end
[xparam1,fval,exitflag,output,lamdba,grad,hessian_fmincon] = ...
fmincon(objective_function,xparam1,[],[],[],[],bounds.lb,bounds.ub,[],optim_options,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
case 2
error('ESTIMATION: mode_compute=2 option (Lester Ingber''s Adaptive Simulated Annealing) is no longer available')
case 3
if isoctave && ~user_has_octave_forge_package('optim')
error('Option mode_compute=3 requires the optim package')
elseif ~isoctave && ~user_has_matlab_license('optimization_toolbox')
error('Option mode_compute=3 requires the Optimization Toolbox')
end
% Set default optimization options for fminunc.
optim_options = optimset('display','iter','MaxFunEvals',100000,'TolFun',1e-8,'TolX',1e-6);
if isfield(options_,'optim_opt')
eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']);
end
if options_.analytic_derivation,
optim_options = optimset(optim_options,'GradObj','on');
end
if ~isoctave
[xparam1,fval,exitflag] = fminunc(objective_function,xparam1,optim_options,dataset_,dataset_info_,options_,M_,estim_params_,bayestopt_,bounds,oo_);
else
% Under Octave, use a wrapper, since fminunc() does not have a 4th arg
func = @(x) objective_function(x, dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
[xparam1,fval,exitflag] = fminunc(func,xparam1,optim_options);
end
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
case 4
% Set default options.
H0 = 1e-4*eye(nx);
crit = 1e-7;
nit = 1000;
numgrad = options_.gradient_method;
epsilon = options_.gradient_epsilon;
% Change some options.
%prepare settings for newrat
if options_.mode_compute==5
%get whether analytical Hessian with non-analytical mode-finding is requested
newratflag=[];
if isfield(options_,'optim_opt')
options_list = read_key_value_string(options_.optim_opt);
for i=1:rows(options_list)
switch options_list{i,1}
case 'MaxIter'
nit = options_list{i,2};
case 'InitialInverseHessian'
H0 = eval(options_list{i,2});
case 'TolFun'
crit = options_list{i,2};
case 'NumgradAlgorithm'
numgrad = options_list{i,2};
case 'NumgradEpsilon'
epsilon = options_list{i,2};
otherwise
warning(['csminwel: Unknown option (' options_list{i,1} ')!'])
if strcmp(options_list{i,1},'Hessian')
newratflag=options_list{i,2};
end
end
end
% Set flag for analytical gradient.
if options_.analytic_derivation
analytic_grad=1;
else
analytic_grad=[];
end
% Call csminwell.
[fval,xparam1,grad,hessian_csminwel,itct,fcount,retcodehat] = ...
csminwel1(objective_function, xparam1, H0, analytic_grad, crit, nit, numgrad, epsilon, dataset_, dataset_info, options_, M_, estim_params_, bayestopt_,bounds, oo_);
% Disp value at the mode.
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
case 5
if isfield(options_,'hess')
flag = options_.hess;
else
flag = 1;
end
if isfield(options_,'ftol')
crit = options_.ftol;
else
crit = 1.e-5;
end
if options_.analytic_derivation,
analytic_grad=1;
ana_deriv = options_.analytic_derivation;
options_analytic_derivation_old = options_.analytic_derivation;
options_.analytic_derivation = -1;
crit = 1.e-7;
flag = 0;
else
analytic_grad=0;
end
if isfield(options_,'nit')
nit = options_.nit;
else
nit=1000;
end
[xparam1,hh,gg,fval,invhess] = newrat(objective_function,xparam1,analytic_grad,crit,nit,0,dataset_, dataset_info, options_,M_,estim_params_,bayestopt_,bounds,oo_);
if options_.analytic_derivation,
options_.analytic_derivation = ana_deriv;
else
if flag ==0,
options_.cova_compute = 1;
kalman_algo0 = options_.kalman_algo;
if ~((options_.kalman_algo == 2) || (options_.kalman_algo == 4)),
options_.kalman_algo=2;
if options_.lik_init == 3,
options_.kalman_algo=4;
end
end
hh = reshape(mr_hessian(0,xparam1,objective_function,1,crit,dataset_, dataset_info, options_,M_,estim_params_,bayestopt_,bounds,oo_), nx, nx);
options_.kalman_algo = kalman_algo0;
if ~isempty(newratflag) && newratflag~=0 %gradient explicitly specified
error('newrat: analytic_derivation is incompatible with numerical Hessian.')
else %use default
newratflag=0; %use analytical gradient
end
end
parameter_names = bayestopt_.name;
save([M_.fname '_mode.mat'],'xparam1','hh','parameter_names');
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
case 6
% Set default options
gmhmaxlikOptions = options_.gmhmaxlik;
if ~isempty(hh);
gmhmaxlikOptions.varinit = 'previous';
else
gmhmaxlikOptions.varinit = 'prior';
end
if isfield(options_,'optim_opt')
options_list = read_key_value_string(options_.optim_opt);
for i=1:rows(options_list)
switch options_list{i,1}
case 'NumberOfMh'
gmhmaxlikOptions.iterations = options_list{i,2};
case 'ncov-mh'
gmhmaxlikOptions.number = options_list{i,2};
case 'nscale'
gmhmaxlikOptions.nscale = options_list{i,2};
case 'nclimb'
gmhmaxlikOptions.nclimb = options_list{i,2};
case 'InitialCovarianceMatrix'
switch options_list{i,2}
case 'previous'
if isempty(hh)
error('gmhmaxlik: No previous estimate of the Hessian matrix available!')
else
gmhmaxlikOptions.varinit = 'previous'
end
case {'prior', 'identity'}
gmhmaxlikOptions.varinit = options_list{i,2};
otherwise
error('gmhmaxlik: Unknown value for option ''InitialCovarianceMatrix''!')
end
case 'AcceptanceRateTarget'
gmhmaxlikOptions.target = options_list{i,2};
if gmhmaxlikOptions.target>1 || gmhmaxlikOptions.target<eps
error('gmhmaxlik: The value of option AcceptanceRateTarget should be a double between 0 and 1!')
end
otherwise
warning(['gmhmaxlik: Unknown option (' options_list{i,1} ')!'])
elseif ~options_.analytic_derivation
if isempty(newratflag)
newratflag=options_.newrat.hess; %use default gradient
end
if newratflag==0 %Analytic Hessian wanted, but not automatically computed by newrat itself
if ~((options_.kalman_algo == 2) || (options_.kalman_algo == 4)) %kalman_algo not compatible
error('Analytical Hessian with non-analytical mode-finding requires kalman_algo=2 or 4.')
end
end
end
% Evaluate the objective function.
fval = feval(objective_function,xparam1,dataset_, dataset_info, options_,M_,estim_params_,bayestopt_,bounds,oo_);
OldMode = fval;
if ~exist('MeanPar','var')
MeanPar = xparam1;
end
switch gmhmaxlikOptions.varinit
case 'previous'
CovJump = inv(hh);
case 'prior'
% The covariance matrix is initialized with the prior
% covariance (a diagonal matrix) %%Except for infinite variances ;-)
stdev = bayestopt_.p2;
indx = find(isinf(stdev));
stdev(indx) = ones(length(indx),1)*sqrt(10);
vars = stdev.^2;
CovJump = diag(vars);
case 'identity'
vars = ones(length(bayestopt_.p2),1)*0.1;
CovJump = diag(vars);
otherwise
error('gmhmaxlik: This is a bug! Please contact the developers.')
end
OldPostVar = CovJump;
Scale = options_.mh_jscale;
for i=1:gmhmaxlikOptions.iterations
if i == 1
if gmhmaxlikOptions.iterations>1
flag = '';
else
flag = 'LastCall';
end
[xparam1,PostVar,Scale,PostMean] = ...
gmhmaxlik(objective_function,xparam1,[bounds.lb bounds.ub],gmhmaxlikOptions,Scale,flag,MeanPar,CovJump,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
fval = feval(objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
options_.mh_jscale = Scale;
mouvement = max(max(abs(PostVar-OldPostVar)));
skipline()
disp('========================================================== ')
disp([' Change in the covariance matrix = ' num2str(mouvement) '.'])
disp([' Mode improvement = ' num2str(abs(OldMode-fval))])
disp([' New value of jscale = ' num2str(Scale)])
disp('========================================================== ')
OldMode = fval;
else
OldPostVar = PostVar;
if i<gmhmaxlikOptions.iterations
flag = '';
else
flag = 'LastCall';
end
[xparam1,PostVar,Scale,PostMean] = ...
gmhmaxlik(objective_function,xparam1,[bounds.lb bounds.ub],...
gmhmaxlikOptions,Scale,flag,PostMean,PostVar,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
fval = feval(objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
options_.mh_jscale = Scale;
mouvement = max(max(abs(PostVar-OldPostVar)));
fval = feval(objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
skipline()
disp('========================================================== ')
disp([' Change in the covariance matrix = ' num2str(mouvement) '.'])
disp([' Mode improvement = ' num2str(abs(OldMode-fval))])
disp([' New value of jscale = ' num2str(Scale)])
disp('========================================================== ')
OldMode = fval;
end
hh = inv(PostVar);
parameter_names = bayestopt_.name;
save([M_.fname '_mode.mat'],'xparam1','hh','parameter_names');
save([M_.fname '_optimal_mh_scale_parameter.mat'],'Scale');
bayestopt_.jscale = ones(length(xparam1),1)*Scale;
end
skipline()
disp(['Optimal value of the scale parameter = ' num2str(Scale)])
skipline()
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
skipline()
parameter_names = bayestopt_.name;
save([M_.fname '_mode.mat'],'xparam1','hh','parameter_names');
case 7
% Matlab's simplex (Optimization toolbox needed).
if isoctave && ~user_has_octave_forge_package('optim')
error('Option mode_compute=7 requires the optim package')
elseif ~isoctave && ~user_has_matlab_license('optimization_toolbox')
error('Option mode_compute=7 requires the Optimization Toolbox')
end
optim_options = optimset('display','iter','MaxFunEvals',1000000,'MaxIter',6000,'TolFun',1e-8,'TolX',1e-6);
if isfield(options_,'optim_opt')
eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']);
end
[xparam1,fval,exitflag] = fminsearch(objective_function,xparam1,optim_options,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
case 8
% Dynare implementation of the simplex algorithm.
simplexOptions = options_.simplex;
if isfield(options_,'optim_opt')
options_list = read_key_value_string(options_.optim_opt);
for i=1:rows(options_list)
switch options_list{i,1}
case 'MaxIter'
simplexOptions.maxiter = options_list{i,2};
case 'TolFun'
simplexOptions.tolerance.f = options_list{i,2};
case 'TolX'
simplexOptions.tolerance.x = options_list{i,2};
case 'MaxFunEvals'
simplexOptions.maxfcall = options_list{i,2};
case 'MaxFunEvalFactor'
simplexOptions.maxfcallfactor = options_list{i,2};
case 'InitialSimplexSize'
simplexOptions.delta_factor = options_list{i,2};
otherwise
warning(['simplex: Unknown option (' options_list{i,1} ')!'])
end
end
end
[xparam1,fval,exitflag] = simplex_optimization_routine(objective_function,xparam1,simplexOptions,bayestopt_.name,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
case 9
% Set defaults
H0 = 1e-4*ones(nx,1);
cmaesOptions = options_.cmaes;
% Modify defaults
if isfield(options_,'optim_opt')
options_list = read_key_value_string(options_.optim_opt);
for i=1:rows(options_list)
switch options_list{i,1}
case 'MaxIter'
cmaesOptions.MaxIter = options_list{i,2};
case 'TolFun'
cmaesOptions.TolFun = options_list{i,2};
case 'TolX'
cmaesOptions.TolX = options_list{i,2};
case 'MaxFunEvals'
cmaesOptions.MaxFunEvals = options_list{i,2};
otherwise
warning(['cmaes: Unknown option (' options_list{i,1} ')!'])
end
end
end
warning('off','CMAES:NonfinitenessRange');
warning('off','CMAES:InitialSigma');
[x, fval, COUNTEVAL, STOPFLAG, OUT, BESTEVER] = cmaes(func2str(objective_function),xparam1,H0,cmaesOptions,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
xparam1=BESTEVER.x;
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', BESTEVER.f);
case 10
simpsaOptions = options_.simpsa;
if isfield(options_,'optim_opt')
options_list = read_key_value_string(options_.optim_opt);
for i=1:rows(options_list)
switch options_list{i,1}
case 'MaxIter'
simpsaOptions.MAX_ITER_TOTAL = options_list{i,2};
case 'TolFun'
simpsaOptions.TOLFUN = options_list{i,2};
case 'TolX'
tolx = options_list{i,2};
if tolx<0
simpsaOptions = rmfield(simpsaOptions,'TOLX'); % Let simpsa choose the default.
else
simpsaOptions.TOLX = tolx;
end
case 'EndTemparature'
simpsaOptions.TEMP_END = options_list{i,2};
case 'MaxFunEvals'
simpsaOptions.MAX_FUN_EVALS = options_list{i,2};
otherwise
warning(['simpsa: Unknown option (' options_list{i,1} ')!'])
end
end
end
simpsaOptionsList = options2cell(simpsaOptions);
simpsaOptions = simpsaset(simpsaOptionsList{:});
[xparam1, fval, exitflag] = simpsa(func2str(objective_function),xparam1,bounds.lb,bounds.ub,simpsaOptions,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
case 11
options_.cova_compute = 0 ;
[xparam1,stdh,lb_95,ub_95,med_param] = online_auxiliary_filter(xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_) ;
case 101
myoptions=soptions;
myoptions(2)=1e-6; %accuracy of argument
myoptions(3)=1e-6; %accuracy of function (see Solvopt p.29)
myoptions(5)= 1.0;
[xparam1,fval]=solvopt(xparam1,objective_function,[],myoptions,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
case 102
%simulating annealing
% LB=zeros(size(xparam1))-20;
% UB=zeros(size(xparam1))+20;
LB = xparam1 - 1;
UB = xparam1 + 1;
neps=10;
% Set input parameters.
maxy=0;
epsilon=1.0e-9;
rt_=.10;
t=15.0;
ns=10;
nt=10;
maxevl=100000000;
idisp =1;
npar=length(xparam1);
disp(['size of param',num2str(length(xparam1))])
c=.1*ones(npar,1);
%* Set input values of the input/output parameters.*
vm=1*ones(npar,1);
disp(['number of parameters= ' num2str(npar) 'max= ' num2str(maxy) 't= ' num2str(t)]);
disp(['rt_= ' num2str(rt_) 'eps= ' num2str(epsilon) 'ns= ' num2str(ns)]);
disp(['nt= ' num2str(nt) 'neps= ' num2str(neps) 'maxevl= ' num2str(maxevl)]);
% disp(['iprint= ' num2str(iprint) 'seed= ' num2str(seed)]);
disp ' ';
disp ' ';
disp(['starting values(x) ' num2str(xparam1')]);
disp(['initial step length(vm) ' num2str(vm')]);
disp(['lower bound(lb)', 'initial conditions', 'upper bound(ub)' ]);
disp([LB xparam1 UB]);
disp(['c vector ' num2str(c')]);
[xparam1, fval, nacc, nfcnev, nobds, ier, t, vm] = sa(objective_function,xparam1,maxy,rt_,epsilon,ns,nt ...
,neps,maxevl,LB,UB,c,idisp ,t,vm,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
otherwise
if ischar(options_.mode_compute)
[xparam1, fval, retcode ] = feval(options_.mode_compute,objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
else
error(['dynare_estimation:: mode_compute = ' int2str(options_.mode_compute) ' option is unknown!'])
end
end
[xparam1, fval, exitflag, hh, options_, Scale] = dynare_minimize_objective(objective_function,xparam1,options_.mode_compute,options_,[bounds.lb bounds.ub],bayestopt_.name,bayestopt_,hh,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
fprintf('\nFinal value of minus the log posterior (or likelihood):%f \n', fval);
if options_.mode_compute==5 && options_.analytic_derivation==-1 %reset options changed by newrat
options_.analytic_derivation = options_analytic_derivation_old; %reset
elseif options_.mode_compute==6 %save scaling factor
save([M_.fname '_optimal_mh_scale_parameter.mat'],'Scale');
options_.mh_jscale = Scale;
bayestopt_.jscale = ones(length(xparam1),1)*Scale;
end
if ~isequal(options_.mode_compute,6) %always already computes covariance matrix
if options_.cova_compute == 1 %user did not request covariance not to be computed
if options_.analytic_derivation && strcmp(func2str(objective_function),'dsge_likelihood'),
ana_deriv = options_.analytic_derivation;
ana_deriv_old = options_.analytic_derivation;
options_.analytic_derivation = 2;
[junk1, junk2, hh] = feval(objective_function,xparam1, ...
dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
options_.analytic_derivation = ana_deriv;
elseif ~(isequal(options_.mode_compute,5) && flag==0),
options_.analytic_derivation = ana_deriv_old;
elseif ~(isequal(options_.mode_compute,5) && newratflag==0),
% with flag==0, we force to use the hessian from outer
% product gradient of optimizer 5
hh = reshape(hessian(objective_function,xparam1, ...
@ -647,17 +290,13 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
end
end
parameter_names = bayestopt_.name;
if options_.cova_compute
if options_.cova_compute || options_.mode_compute==5 || options_.mode_compute==6
save([M_.fname '_mode.mat'],'xparam1','hh','parameter_names');
else
save([M_.fname '_mode.mat'],'xparam1','parameter_names');
end
end
if options_.cova_compute == 0
hh = [];%NaN(length(xparam1),length(xparam1));
end
switch options_.MCMC_jumping_covariance
case 'hessian' %Baseline
%do nothing and use hessian from mode_compute
@ -721,10 +360,10 @@ if ~options_.mh_posterior_mode_estimation && options_.cova_compute
end
if options_.mode_check.status == 1 && ~options_.mh_posterior_mode_estimation
ana_deriv = options_.analytic_derivation;
ana_deriv_old = options_.analytic_derivation;
options_.analytic_derivation = 0;
mode_check(objective_function,xparam1,hh,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
options_.analytic_derivation = ana_deriv;
options_.analytic_derivation = ana_deriv_old;
end
oo_.posterior.optimization.mode = xparam1;
@ -767,8 +406,6 @@ elseif ~any(bayestopt_.pshape > 0) && ~options_.mh_posterior_mode_estimation
oo_=display_estimation_results_table(xparam1,stdh,M_,options_,estim_params_,bayestopt_,oo_,pnames,'Maximum Likelihood','mle');
end
OutputDirectoryName = CheckPath('Output',M_.dname);
if np > 0
pindx = estim_params_.param_vals(:,1);
save([M_.fname '_params.mat'],'pindx');
@ -797,14 +434,14 @@ if (any(bayestopt_.pshape >0 ) && options_.mh_replic) || ...
if options_.load_mh_file && options_.use_mh_covariance_matrix
invhess = compute_mh_covariance_matrix;
end
ana_deriv = options_.analytic_derivation;
ana_deriv_old = options_.analytic_derivation;
options_.analytic_derivation = 0;
if options_.cova_compute
feval(options_.posterior_sampling_method,objective_function,options_.proposal_distribution,xparam1,invhess,bounds,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
else
error('I Cannot start the MCMC because the Hessian of the posterior kernel at the mode was not computed.')
end
options_.analytic_derivation = ana_deriv;
options_.analytic_derivation = ana_deriv_old;
end
if options_.mh_posterior_mode_estimation
CutSample(M_, options_, estim_params_);

View File

@ -53,6 +53,7 @@ order = options_.order;
options_.order = 1;
[xparam1,estim_params_,bayestopt_,lb,ub,M_] = set_prior(estim_params_,M_,options_);
if plt_flag
plot_priors(bayestopt_,M_,estim_params_,options_)
end
@ -100,37 +101,37 @@ if size(M_.param_names,1)==size(M_.param_names_tex,1)% All the parameters have a
case { 1 , 5 }
LowerBound = bayestopt_.p3(i);
UpperBound = bayestopt_.p4(i);
if ~isinf(bayestopt_.lb(i))
LowerBound=max(LowerBound,bayestopt_.lb(i));
if ~isinf(lb(i))
LowerBound=max(LowerBound,lb(i));
end
if ~isinf(bayestopt_.ub(i))
UpperBound=min(UpperBound,bayestopt_.ub(i));
if ~isinf(ub(i))
UpperBound=min(UpperBound,ub(i));
end
case { 2 , 4 , 6 }
LowerBound = bayestopt_.p3(i);
if ~isinf(bayestopt_.lb(i))
LowerBound=max(LowerBound,bayestopt_.lb(i));
if ~isinf(lb(i))
LowerBound=max(LowerBound,lb(i));
end
if ~isinf(bayestopt_.ub(i))
UpperBound=bayestopt_.ub(i);
if ~isinf(ub(i))
UpperBound=ub(i);
else
UpperBound = '$\infty$';
end
case 3
if isinf(bayestopt_.p3(i)) && isinf(bayestopt_.lb(i))
if isinf(bayestopt_.p3(i)) && isinf(lb(i))
LowerBound = '$-\infty$';
else
LowerBound = bayestopt_.p3(i);
if ~isinf(bayestopt_.lb(i))
LowerBound=max(LowerBound,bayestopt_.lb(i));
if ~isinf(lb(i))
LowerBound=max(LowerBound,lb(i));
end
end
if isinf(bayestopt_.p4(i)) && isinf(bayestopt_.ub(i))
if isinf(bayestopt_.p4(i)) && isinf(ub(i))
UpperBound = '$\infty$';
else
UpperBound = bayestopt_.p4(i);
if ~isinf(bayestopt_.ub(i))
UpperBound=min(UpperBound,bayestopt_.ub(i));
if ~isinf(ub(i))
UpperBound=min(UpperBound,ub(i));
end
end
otherwise
@ -206,7 +207,7 @@ if info==2% Prior optimization.
bayestopt_.p6, ...
bayestopt_.p7, ...
bayestopt_.p3, ...
bayestopt_.p4,options_,M_,estim_params_,oo_);
bayestopt_.p4,options_,M_,bayestopt_,estim_params_,oo_);
% Display the results.
skipline(2)
disp('------------------')

View File

@ -103,6 +103,10 @@ options_.bvar_prior_omega = 1;
options_.bvar_prior_flat = 0;
options_.bvar_prior_train = 0;
% Initialize the field that will contain the optimization algorigthm's options declared in the
% estimation command (if anny).
options_.optim_opt = [];
% Optimization algorithm [6] gmhmaxlik
gmhmaxlik.iterations = 3;
gmhmaxlik.number = 20000;
@ -422,8 +426,8 @@ options_.student_degrees_of_freedom = 3;
options_.posterior_max_subsample_draws = 1200;
options_.sub_draws = [];
options_.use_mh_covariance_matrix = 0;
options_.gradient_method = 2;
options_.gradient_epsilon = 1e-6;
options_.gradient_method = 2; %used by csminwel and newrat
options_.gradient_epsilon = 1e-6; %used by csminwel and newrat
options_.posterior_sampling_method = 'random_walk_metropolis_hastings';
options_.proposal_distribution = 'rand_multivariate_normal';
options_.student_degrees_of_freedom = 3;
@ -469,6 +473,18 @@ options_.homotopy_mode = 0;
options_.homotopy_steps = 1;
options_.homotopy_force_continue = 0;
%csminwel optimization routine
csminwel.tolerance.f=1e-7;
csminwel.maxiter=1000;
options_.csminwel=csminwel;
%newrat optimization routine
newrat.hess=1; %analytic hessian
newrat.tolerance.f=1e-5;
newrat.tolerance.f_analytic=1e-7;
newrat.maxiter=1000;
options_.newrat=newrat;
% Simplex optimization routine (variation on Nelder Mead algorithm).
simplex.tolerance.x = 1e-4;
simplex.tolerance.f = 1e-4;

View File

@ -1,5 +1,5 @@
function [xparams,lpd,hessian] = ...
maximize_prior_density(iparams, prior_shape, prior_hyperparameter_1, prior_hyperparameter_2, prior_inf_bound, prior_sup_bound,DynareOptions,DynareModel,EstimatedParams,DynareResults)
maximize_prior_density(iparams, prior_shape, prior_hyperparameter_1, prior_hyperparameter_2, prior_inf_bound, prior_sup_bound,DynareOptions,DynareModel,BayesInfo,EstimatedParams,DynareResults)
% Maximizes the logged prior density using Chris Sims' optimization routine.
%
% INPUTS
@ -15,7 +15,7 @@ function [xparams,lpd,hessian] = ...
% lpd [double] scalar, value of the logged prior density at the mode.
% hessian [double] matrix, Hessian matrix at the prior mode.
% Copyright (C) 2009-2012 Dynare Team
% Copyright (C) 2009-2015 Dynare Team
%
% This file is part of Dynare.
%
@ -32,15 +32,10 @@ function [xparams,lpd,hessian] = ...
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
number_of_estimated_parameters = length(iparams);
H0 = 1e-4*eye(number_of_estimated_parameters);
crit = 1e-7;
nit = 1000;
gradient_method = 2;
gradient_epsilon = 1e-6;
[lpd,xparams,grad,hessian,itct,fcount,retcodehat] = ...
csminwel1('minus_logged_prior_density',iparams,H0,[],crit,nit,gradient_method, gradient_epsilon, ...
prior_shape, prior_hyperparameter_1, prior_hyperparameter_2, prior_inf_bound, prior_sup_bound,DynareOptions,DynareModel,EstimatedParams,DynareResults);
[xparams, lpd, exitflag, hessian]=dynare_minimize_objective('minus_logged_prior_density', ...
iparams, DynareOptions.mode_compute, DynareOptions, [prior_inf_bound, prior_sup_bound], ...
BayesInfo.name, BayesInfo, [], ...
prior_shape, prior_hyperparameter_1, prior_hyperparameter_2, prior_inf_bound, prior_sup_bound, ...
DynareOptions,DynareModel,EstimatedParams,DynareResults);
lpd = -lpd;

View File

@ -0,0 +1,335 @@
function [opt_par_values,fval,exitflag,hessian_mat,options_,Scale]=dynare_minimize_objective(objective_function,start_par_value,minimizer_algorithm,options_,bounds,parameter_names,prior_information,Initial_Hessian,varargin)
% Calls a minimizer
%
% INPUTS
% objective_function [function handle] handle to the objective function
% start_par_value [n_params by 1] vector of doubles starting values for the parameters
% minimizer_algorithm [scalar double] code of the optimizer algorithm
% options_ [matlab structure] Dynare options structure
% bounds [n_params by 2] vector of doubles 2 row vectors containing lower and upper bound for parameters
% parameter_names [n_params by 1] cell array strings containing the parameters names
% prior_information [matlab structure] Dynare prior information structure (bayestopt_) provided for algorithm 6
% Initial_Hessian [n_params by n_params] matrix initial hessian matrix provided for algorithm 6
% varargin [cell array] Input arguments for objective function
%
% OUTPUTS
% opt_par_values [n_params by 1] vector of doubles optimal parameter values minimizing the objective
% fval [scalar double] value of the objective function at the minimum
% exitflag [scalar double] return code of the respective optimizer
% hessian_mat [n_params by n_params] matrix hessian matrix at the mode returned by optimizer
% options_ [matlab structure] Dynare options structure (to return options set by algorithms 5)
% Scale [scalar double] scaling parameter returned by algorith 6
%
% SPECIAL REQUIREMENTS
% none.
%
%
% Copyright (C) 2014-2015 Dynare Team
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
%% set bounds and parameter names if not already set
n_params=size(start_par_value,1);
if isempty(bounds)
if minimizer_algorithm==10
error('Algorithm 10 (simpsa) requires upper and lower bounds')
else
bounds=[-Inf(n_params,1) Inf(n_params,1)];
end
end
if minimizer_algorithm==10 && any(any(isinf(bounds)))
error('Algorithm 10 (simpsa) requires finite upper and lower bounds')
end
if isempty(parameter_names)
parameter_names=[repmat('parameter ',n_params,1),num2str((1:n_params)')];
end
%% initialize function outputs
hessian_mat=[];
Scale=[];
exitflag=1;
fval=NaN;
opt_par_values=NaN(size(start_par_value));
switch minimizer_algorithm
case 1
if isoctave
error('Optimization algorithm 1 is not available under Octave')
elseif ~user_has_matlab_license('optimization_toolbox')
error('Optimization algorithm 1 requires the Optimization Toolbox')
end
% Set default optimization options for fmincon.
optim_options = optimset('display','iter', 'LargeScale','off', 'MaxFunEvals',100000, 'TolFun',1e-8, 'TolX',1e-6);
if isfield(options_,'optim_opt')
eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']);
end
if options_.analytic_derivation,
optim_options = optimset(optim_options,'GradObj','on','TolX',1e-7);
end
[opt_par_values,fval,exitflag,output,lamdba,grad,hessian_mat] = ...
fmincon(objective_function,start_par_value,[],[],[],[],bounds(:,1),bounds(:,2),[],optim_options,varargin{:});
case 2
error('Optimization algorithm 1 option (Lester Ingber''s Adaptive Simulated Annealing) is no longer available')
case 3
if isoctave && ~user_has_octave_forge_package('optim')
error('Optimization algorithm 3 requires the optim package')
elseif ~isoctave && ~user_has_matlab_license('optimization_toolbox')
error('Optimization algorithm 3 requires the Optimization Toolbox')
end
% Set default optimization options for fminunc.
optim_options = optimset('display','iter','MaxFunEvals',100000,'TolFun',1e-8,'TolX',1e-6);
if isfield(options_,'optim_opt')
eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']);
end
if options_.analytic_derivation,
optim_options = optimset(optim_options,'GradObj','on');
end
if ~isoctave
[opt_par_values,fval,exitflag] = fminunc(objective_function,start_par_value,optim_options,varargin{:});
else
% Under Octave, use a wrapper, since fminunc() does not have a 4th arg
func = @(x) objective_function(x,varargin{:});
[opt_par_values,fval,exitflag] = fminunc(func,start_par_value,optim_options);
end
case 4
% Set default options.
H0 = 1e-4*eye(n_params);
crit = options_.csminwel.tolerance.f;
nit = options_.csminwel.maxiter;
numgrad = options_.gradient_method;
epsilon = options_.gradient_epsilon;
% Change some options.
if isfield(options_,'optim_opt')
options_list = read_key_value_string(options_.optim_opt);
for i=1:rows(options_list)
switch options_list{i,1}
case 'MaxIter'
nit = options_list{i,2};
case 'InitialInverseHessian'
H0 = eval(options_list{i,2});
case 'TolFun'
crit = options_list{i,2};
case 'NumgradAlgorithm'
numgrad = options_list{i,2};
case 'NumgradEpsilon'
epsilon = options_list{i,2};
otherwise
warning(['csminwel: Unknown option (' options_list{i,1} ')!'])
end
end
end
% Set flag for analytical gradient.
if options_.analytic_derivation
analytic_grad=1;
else
analytic_grad=[];
end
% Call csminwell.
[fval,opt_par_values,grad,hessian_mat,itct,fcount,exitflag] = ...
csminwel1(objective_function, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, varargin{:});
case 5
if options_.analytic_derivation==-1 %set outside as code for use of analytic derivation
analytic_grad=1;
crit = options_.newrat.tolerance.f_analytic;
newratflag = 0; %analytical Hessian
else
analytic_grad=0;
crit=options_.newrat.tolerance.f;
newratflag = options_.newrat.hess; %default
end
nit=options_.newrat.maxiter;
if isfield(options_,'optim_opt')
options_list = read_key_value_string(options_.optim_opt);
for i=1:rows(options_list)
switch options_list{i,1}
case 'MaxIter'
nit = options_list{i,2};
case 'Hessian'
flag=options_list{i,2};
if options_.analytic_derivation && flag~=0
error('newrat: analytic_derivation is incompatible with numerical Hessian. Using analytic Hessian')
else
newratflag=flag;
end
case 'TolFun'
crit = options_list{i,2};
otherwise
warning(['newrat: Unknown option (' options_list{i,1} ')!'])
end
end
end
[opt_par_values,hessian_mat,gg,fval,invhess] = newrat(objective_function,start_par_value,analytic_grad,crit,nit,0,varargin{:});
if options_.analytic_derivation %Hessian is already analytic one, reset option
options_.analytic_derivation = ana_deriv;
elseif ~options_.analytic_derivation && newratflag ==0 %Analytic Hessian wanted, but not computed yet
hessian_mat = reshape(mr_hessian(0,opt_par_values,objective_function,1,crit,varargin{:}), n_params, n_params);
end
case 6
[opt_par_values, hessian_mat, Scale, fval] = gmhmaxlik(objective_function, start_par_value, ...
Initial_Hessian, options_.mh_jscale, bounds, prior_information.p2, options_.gmhmaxlik, options_.optim_opt, varargin{:});
case 7
% Matlab's simplex (Optimization toolbox needed).
if isoctave && ~user_has_octave_forge_package('optim')
error('Option mode_compute=7 requires the optim package')
elseif ~isoctave && ~user_has_matlab_license('optimization_toolbox')
error('Option mode_compute=7 requires the Optimization Toolbox')
end
optim_options = optimset('display','iter','MaxFunEvals',1000000,'MaxIter',6000,'TolFun',1e-8,'TolX',1e-6);
if isfield(options_,'optim_opt')
eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']);
end
[opt_par_values,fval,exitflag] = fminsearch(objective_function,start_par_value,optim_options,varargin{:});
case 8
% Dynare implementation of the simplex algorithm.
simplexOptions = options_.simplex;
if isfield(options_,'optim_opt')
options_list = read_key_value_string(options_.optim_opt);
for i=1:rows(options_list)
switch options_list{i,1}
case 'MaxIter'
simplexOptions.maxiter = options_list{i,2};
case 'TolFun'
simplexOptions.tolerance.f = options_list{i,2};
case 'TolX'
simplexOptions.tolerance.x = options_list{i,2};
case 'MaxFunEvals'
simplexOptions.maxfcall = options_list{i,2};
case 'MaxFunEvalFactor'
simplexOptions.maxfcallfactor = options_list{i,2};
case 'InitialSimplexSize'
simplexOptions.delta_factor = options_list{i,2};
otherwise
warning(['simplex: Unknown option (' options_list{i,1} ')!'])
end
end
end
[opt_par_values,fval,exitflag] = simplex_optimization_routine(objective_function,start_par_value,simplexOptions,parameter_names,varargin{:});
case 9
% Set defaults
H0 = 1e-4*ones(n_params,1);
cmaesOptions = options_.cmaes;
% Modify defaults
if isfield(options_,'optim_opt')
options_list = read_key_value_string(options_.optim_opt);
for i=1:rows(options_list)
switch options_list{i,1}
case 'MaxIter'
cmaesOptions.MaxIter = options_list{i,2};
case 'TolFun'
cmaesOptions.TolFun = options_list{i,2};
case 'TolX'
cmaesOptions.TolX = options_list{i,2};
case 'MaxFunEvals'
cmaesOptions.MaxFunEvals = options_list{i,2};
otherwise
warning(['cmaes: Unknown option (' options_list{i,1} ')!'])
end
end
end
warning('off','CMAES:NonfinitenessRange');
warning('off','CMAES:InitialSigma');
[x, fval, COUNTEVAL, STOPFLAG, OUT, BESTEVER] = cmaes(func2str(objective_function),start_par_value,H0,cmaesOptions,varargin{:});
opt_par_values=BESTEVER.x;
fval=BESTEVER.f;
case 10
simpsaOptions = options_.simpsa;
if isfield(options_,'optim_opt')
options_list = read_key_value_string(options_.optim_opt);
for i=1:rows(options_list)
switch options_list{i,1}
case 'MaxIter'
simpsaOptions.MAX_ITER_TOTAL = options_list{i,2};
case 'TolFun'
simpsaOptions.TOLFUN = options_list{i,2};
case 'TolX'
tolx = options_list{i,2};
if tolx<0
simpsaOptions = rmfield(simpsaOptions,'TOLX'); % Let simpsa choose the default.
else
simpsaOptions.TOLX = tolx;
end
case 'EndTemparature'
simpsaOptions.TEMP_END = options_list{i,2};
case 'MaxFunEvals'
simpsaOptions.MAX_FUN_EVALS = options_list{i,2};
otherwise
warning(['simpsa: Unknown option (' options_list{i,1} ')!'])
end
end
end
simpsaOptionsList = options2cell(simpsaOptions);
simpsaOptions = simpsaset(simpsaOptionsList{:});
[opt_par_values, fval, exitflag] = simpsa(func2str(objective_function),start_par_value,bounds(:,1),bounds(:,2),simpsaOptions,varargin{:});
case 11
options_.cova_compute = 0 ;
[opt_par_values,stdh,lb_95,ub_95,med_param] = online_auxiliary_filter(start_par_value,varargin{:}) ;
case 101
myoptions=soptions;
myoptions(2)=1e-6; %accuracy of argument
myoptions(3)=1e-6; %accuracy of function (see Solvopt p.29)
myoptions(5)= 1.0;
[opt_par_values,fval]=solvopt(start_par_value,objective_function,[],myoptions,varargin{:});
case 102
%simulating annealing
LB = start_par_value - 1;
UB = start_par_value + 1;
neps=10;
% Set input parameters.
maxy=0;
epsilon=1.0e-9;
rt_=.10;
t=15.0;
ns=10;
nt=10;
maxevl=100000000;
idisp =1;
npar=length(start_par_value);
disp(['size of param',num2str(length(start_par_value))])
c=.1*ones(npar,1);
%* Set input values of the input/output parameters.*
vm=1*ones(npar,1);
disp(['number of parameters= ' num2str(npar) 'max= ' num2str(maxy) 't= ' num2str(t)]);
disp(['rt_= ' num2str(rt_) 'eps= ' num2str(epsilon) 'ns= ' num2str(ns)]);
disp(['nt= ' num2str(nt) 'neps= ' num2str(neps) 'maxevl= ' num2str(maxevl)]);
disp ' ';
disp ' ';
disp(['starting values(x) ' num2str(start_par_value')]);
disp(['initial step length(vm) ' num2str(vm')]);
disp(['lower bound(lb)', 'initial conditions', 'upper bound(ub)' ]);
disp([LB start_par_value UB]);
disp(['c vector ' num2str(c')]);
[opt_par_values, fval, nacc, nfcnev, nobds, ier, t, vm] = sa(objective_function,xparstart_par_valueam1,maxy,rt_,epsilon,ns,nt ...
,neps,maxevl,LB,UB,c,idisp ,t,vm,varargin{:});
otherwise
if ischar(minimizer_algorithm)
if exist(options_.mode_compute)
[opt_par_values, fval, exitflag] = feval(options_.mode_compute,objective_function,start_par_value,varargin{:});
else
error('No minimizer with the provided name detected.')
end
else
error(['Optimization algorithm ' int2str(minimizer_algorithm) ' is unknown!'])
end
end

View File

@ -0,0 +1,120 @@
function [PostMode, HessianMatrix, Scale, ModeValue] = gmhmaxlik(fun, xinit, Hinit, iscale, bounds, priorstd, gmhmaxlikOptions, OptimizationOptions, varargin)
% Copyright (C) 2006-2015 Dynare Team
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
% Set default options
if ~isempty(Hinit);
gmhmaxlikOptionsptions.varinit = 'previous';
else
gmhmaxlikOptions.varinit = 'prior';
end
if ~isempty(OptimizationOptions)
DynareOptionslist = read_key_value_string(OptimizationOptions);
for i=1:rows(DynareOptionslist)
switch DynareOptionslist{i,1}
case 'NumberOfMh'
gmhmaxlikOptions.iterations = DynareOptionslist{i,2};
case 'ncov-mh'
gmhmaxlikOptions.number = DynareOptionslist{i,2};
case 'nscale'
gmhmaxlikOptions.nscale = DynareOptionslist{i,2};
case 'nclimb'
gmhmaxlikOptions.nclimb = DynareOptionslist{i,2};
case 'InitialCovarianceMatrix'
switch DynareOptionslist{i,2}
case 'previous'
if isempty(Hinit)
error('gmhmaxlik: No previous estimate of the Hessian matrix available! You cannot use the InitialCovarianceMatrix option!')
else
gmhmaxlikOptions.varinit = 'previous';
end
case {'prior', 'identity'}
gmhmaxlikOptions.varinit = DynareOptionslist{i,2};
otherwise
error('gmhmaxlik: Unknown value for option ''InitialCovarianceMatrix''!')
end
case 'AcceptanceRateTarget'
gmhmaxlikOptions.target = DynareOptionslist{i,2};
if gmhmaxlikOptions.target>1 || gmhmaxlikOptions.target<eps
error('gmhmaxlik: The value of option AcceptanceRateTarget should be a double between 0 and 1!')
end
otherwise
warning(['gmhmaxlik: Unknown option (' DynareOptionslist{i,1} ')!'])
end
end
end
% Evaluate the objective function.
OldModeValue = feval(fun,xinit,varargin{:});
if ~exist('MeanPar','var')
MeanPar = xinit;
end
switch gmhmaxlikOptions.varinit
case 'previous'
CovJump = inv(Hinit);
case 'prior'
% The covariance matrix is initialized with the prior
% covariance (a diagonal matrix) %%Except for infinite variances ;-)
stdev = priorstd;
indx = find(isinf(stdev));
stdev(indx) = ones(length(indx),1)*sqrt(10);
vars = stdev.^2;
CovJump = diag(vars);
case 'identity'
vars = ones(length(priorstd),1)*0.1;
CovJump = diag(vars);
otherwise
error('gmhmaxlik: This is a bug! Please contact the developers.')
end
OldPostVariance = CovJump;
OldPostMean = xinit;
OldPostMode = xinit;
Scale = iscale;
for i=1:gmhmaxlikOptions.iterations
if i<gmhmaxlikOptions.iterations
flag = '';
else
flag = 'LastCall';
end
[PostMode, PostVariance, Scale, PostMean] = gmhmaxlik_core(fun, OldPostMode, bounds, gmhmaxlikOptions, Scale, flag, MeanPar, OldPostVariance, varargin{:});
ModeValue = feval(fun, PostMode, varargin{:});
dVariance = max(max(abs(PostVariance-OldPostVariance)));
dMean = max(abs(PostMean-OldPostMean));
skipline()
printline(58,'=')
disp([' Change in the posterior covariance matrix = ' num2str(dVariance) '.'])
disp([' Change in the posterior mean = ' num2str(dMean) '.'])
disp([' Mode improvement = ' num2str(abs(OldModeValue-ModeValue))])
disp([' New value of jscale = ' num2str(Scale)])
printline(58,'=')
OldModeValue = ModeValue;
OldPostMean = PostMean;
OldPostVariance = PostVariance;
end
HessianMatrix = inv(PostVariance);
skipline()
disp(['Optimal value of the scale parameter = ' num2str(Scale)])
skipline()

View File

@ -1,8 +1,5 @@
function [PostMod,PostVar,Scale,PostMean] = ...
gmhmaxlik(ObjFun,xparam1,mh_bounds,options,iScale,info,MeanPar,VarCov,varargin)
function [PostMod,PostVar,Scale,PostMean] = gmhmaxlik_core(ObjFun,xparam1,mh_bounds,options,iScale,info,MeanPar,VarCov,varargin)
%function [PostMod,PostVar,Scale,PostMean] = ...
%gmhmaxlik(ObjFun,xparam1,mh_bounds,num,iScale,info,MeanPar,VarCov,varargin)
% (Dirty) Global minimization routine of (minus) a likelihood (or posterior density) function.
%
% INPUTS
@ -59,7 +56,7 @@ function [PostMod,PostVar,Scale,PostMean] = ...
% SPECIAL REQUIREMENTS
% None.
% Copyright (C) 2006-2012 Dynare Team
% Copyright (C) 2006-2015 Dynare Team
%
% This file is part of Dynare.
%