Added the possibility to use Gauss-Newton in pac/nls.

Also added the computation of the covariance matrix of the NLS estimator (using
White and Domovitz approach) and integration test.
time-shift
Stéphane Adjemia (Scylla) 2018-11-26 09:53:18 +01:00
parent 139c58dd76
commit e6c716ae9b
Signed by untrusted user who does not match committer: stepan
GPG Key ID: A6D44CB9C64CE77B
3 changed files with 241 additions and 28 deletions

View File

@ -61,6 +61,13 @@ function nls(eqname, params, data, range, optimizer, varargin)
global M_ oo_ options_
is_gauss_newton = false;
objective = 'ssr_';
if nargin>4 && isequal(optimizer, 'GaussNewton')
is_gauss_newton = true;
objective = 'r_';
end
[pacmodl, lhs, rhs, pnames, enames, xnames, pid, eid, xid, ~, ipnames_, params, data, islaggedvariables] = ...
pac.estimate.init(M_, oo_, eqname, params, data, range);
@ -113,10 +120,27 @@ for i=1:length(objNames)
end
end
% Create a routine for evaluating the sum of squared residuals
ssrfun = ['ssr_' eqname];
fid = fopen([ssrfun '.m'], 'w');
fprintf(fid, 'function [s, fake1, fake2, fake3, fake4] = %s(params, data, DynareModel, DynareOutput)\n', ssrfun);
% Create a routine for evaluating the residuals of the nonlinear model
fun = ['r_' eqname];
fid = fopen(['+' M_.fname filesep() fun '.m'], 'w');
fprintf(fid, 'function r = %s(params, data, DynareModel, DynareOutput)\n', fun);
fprintf(fid, '\n');
fprintf(fid, '%% Evaluates the residuals for equation %s.\n', eqname);
fprintf(fid, '%% File created by Dynare (%s).\n', datestr(datetime));
fprintf(fid, '\n');
for i=1:length(ipnames_)
fprintf(fid, 'DynareModel.params(%u) = params(%u);\n', ipnames_(i), i);
end
fprintf(fid, '\n');
fprintf(fid, 'DynareModel = pac.update.parameters(''%s'', DynareModel, DynareOutput);\n', pacmodl);
fprintf(fid, '\n');
fprintf(fid, 'r = %s-(%s);\n', lhs, rhs);
fclose(fid);
% Create a routine for evaluating the sum of squared residuals of the nonlinear model
fun = ['ssr_' eqname];
fid = fopen(['+' M_.fname filesep() fun '.m'], 'w');
fprintf(fid, 'function [s, fake1, fake2, fake3, fake4] = %s(params, data, DynareModel, DynareOutput)\n', fun);
fprintf(fid, '\n');
fprintf(fid, '%% Evaluates the sum of square residuals for equation %s.\n', eqname);
fprintf(fid, '%% File created by Dynare (%s).\n', datestr(datetime));
@ -136,10 +160,14 @@ fprintf(fid, 'r = %s-(%s);\n', lhs, rhs);
fprintf(fid, 's = r''*r;\n');
fclose(fid);
% Create a function handle returning the sum of square residuals for a given
% vector of parameters.
% Copy (sub)sample data in a matrix.
DATA = data([range(1)-1, range]).data;
ssr = @(p) feval(['ssr_' eqname], p, DATA, M_, oo_);
% Create a function handle returning the sum of square residuals for a given vector of parameters.
ssrfun = @(p) feval([M_.fname '.ssr_' eqname], p, DATA, M_, oo_);
% Create a function handle returning the sum of square residuals for a given vector of parameters.
resfun = @(p) feval([M_.fname '.r_' eqname], p, DATA, M_, oo_);
% Set initial condition.
params0 = cell2mat(struct2cell(params));
@ -154,6 +182,8 @@ if nargin<5 || isempty(optimizer)
minalgo = 4;
else
switch optimizer
case 'GaussNewton'
% Nothing to do here.
case 'fmincon'
if isoctave
error('Optimization algorithm ''fmincon'' is not available under Octave')
@ -205,6 +235,7 @@ else
msg = sprintf('%s - %s\n', msg, 'fminsearch');
msg = sprintf('%s - %s\n', msg, 'simplex');
msg = sprintf('%s - %s\n', msg, 'annealing');
msg = sprintf('%s - %s\n', msg, 'GaussNewton');
error(msg)
end
end
@ -219,22 +250,14 @@ if nargin>5
opt = '';
while i<nargin-5
if i==1
opt = sprintf('''%s'',', varargin{i});
opt = sprintf('''%s''', varargin{i});
else
opt = sprintf('%s,''%s'',', opt, varargin{i});
opt = sprintf('%s,''%s''', opt, varargin{i});
end
if isnumeric(varargin{i+1})
if (i+1)==(nargin-5)
opt = sprintf('%s%s', opt, varargin{i+1});
else
opt = sprintf('%s%s,', opt, varargin{i+1});
end
opt = sprintf('%s,%s', opt, num2str(varargin{i+1}));
else
if (i+1)==(nargin-5)
opt = sprintf('%s''%s''', opt, varargin{i+1});
else
opt = sprintf('%s''%s'',', opt, varargin{i+1});
end
opt = sprintf('%s,''%s''', opt, varargin{i+1});
end
i = i+2;
end
@ -248,21 +271,43 @@ if nargin<5
options_.optim_opt = '''verbosity'',0';
end
% Estimate the parameters by minimizing the sum of squared residuals.
[pparams1, SSR, exitflag] = dynare_minimize_objective(ssr, params0, ...
if is_gauss_newton
[params1, SSR, exitflag] = gauss_newton(resfun, params0);
else
% Estimate the parameters by minimizing the sum of squared residuals.
[params1, SSR, exitflag] = dynare_minimize_objective(ssrfun, params0, ...
minalgo, ...
options_, ...
bounds, ...
parameter_names, ...
[], ...
[]);
options_.optim_opt = oldopt;
% Update M_.params
for i=1:length(pparams1)
M_.params(ipnames_(i)) = pparams1(i);
end
% Revert local modifications to the options.
options_.optim_opt = oldopt;
% Compute an estimator of the covariance matrix (see White and
% Domovitz [Econometrica, 1984], theorem 3.2).
[r, J] = jacobian(resfun, params1, 1e-6);
T = length(r);
A = 2.0*(J'*J)/T;
J = bsxfun(@times, J, r);
B = J'*J;
l = round(T^.25);
for tau=1:l
B = B + (1-tau/(l+1))*(J(tau+1:end,:)'*J(1:end-tau,:)+J(1:end-tau,:)'*J(tau+1:end,:));
end
B = (4.0/T)*B;
C = inv(A)*B*inv(A); % C is the asymptotic covariance of sqrt(T) times the vector of estimated parameters.
C = C/T;
% Save results
oo_.pac.(pacmodl).ssr = SSR;
oo_.pac.(pacmodl).estimator = params1;
oo_.pac.(pacmodl).covariance = C;
oo_.pac.(pacmodl).student = params1./(sqrt(diag(C)));
% Also save estimated parameters in M_
M_.params(ipnames_) = params1;
M_ = pac.update.parameters(pacmodl, M_, oo_);

View File

@ -0,0 +1,8 @@
#!/bin/sh
rm -rf example
rm -rf +example
rm -f example.log
rm -f *.mat
rm -f *.m
rm -f *.dat

View File

@ -0,0 +1,160 @@
// --+ options: json=compute, stochastic +--
var x1 x2 x1bar x2bar z y x;
varexo ex1 ex2 ex1bar ex2bar ez ey ex;
parameters
rho_1 rho_2 rho_3 rho_4
a_x1_0 a_x1_1 a_x1_2 a_x1_x2_1 a_x1_x2_2
a_x2_0 a_x2_1 a_x2_2 a_x2_x1_1 a_x2_x1_2
e_c_m c_z_1 c_z_2 beta
lambda;
rho_1 = .9;
rho_2 = -.2;
rho_3 = .4;
rho_4 = -.3;
a_x1_0 = -.9;
a_x1_1 = .4;
a_x1_2 = .3;
a_x1_x2_1 = .1;
a_x1_x2_2 = .2;
a_x2_0 = -.9;
a_x2_1 = .2;
a_x2_2 = -.1;
a_x2_x1_1 = -.1;
a_x2_x1_2 = .2;
beta = .2;
e_c_m = .5;
c_z_1 = .2;
c_z_2 = -.1;
lambda = 0.5; // Share of optimizing agents.
trend_component_model(model_name=toto, eqtags=['eq:x1', 'eq:x2', 'eq:x1bar', 'eq:x2bar'], targets=['eq:x1bar', 'eq:x2bar']);
pac_model(auxiliary_model_name=toto, discount=beta, model_name=pacman);
model;
[name='eq:y']
y = rho_1*y(-1) + rho_2*y(-2) + ey;
[name='eq:x']
x = rho_3*x(-1) + rho_4*x(-2) + ex;
[name='eq:x1']
diff(x1) = a_x1_0*(x1(-1)-x1bar(-1)) + a_x1_1*diff(x1(-1)) + a_x1_2*diff(x1(-2)) + a_x1_x2_1*diff(x2(-1)) + a_x1_x2_2*diff(x2(-2)) + ex1;
[name='eq:x2']
diff(x2) = a_x2_0*(x2(-1)-x2bar(-1)) + a_x2_1*diff(x1(-1)) + a_x2_2*diff(x1(-2)) + a_x2_x1_1*diff(x2(-1)) + a_x2_x1_2*diff(x2(-2)) + ex2;
[name='eq:x1bar']
x1bar = x1bar(-1) + ex1bar;
[name='eq:x2bar']
x2bar = x2bar(-1) + ex2bar;
[name='zpac']
diff(z) = lambda*(e_c_m*(x1(-1)-z(-1)) + c_z_1*diff(z(-1)) + c_z_2*diff(z(-2)) + pac_expectation(pacman)) + (1-lambda)*( y + x) + ez;
end;
shocks;
var ex1 = 1.0;
var ex2 = 1.0;
var ex1bar = 1.0;
var ex2bar = 1.0;
var ez = 1.0;
var ey = 0.1;
var ex = 0.1;
end;
// Initialize the PAC model (build the Companion VAR representation for the auxiliary model).
pac.initialize('pacman');
// Update the parameters of the PAC expectation model (h0 and h1 vectors).
pac.update.expectation('pacman');
// Set initial conditions to zero. Please use more sensible values if any...
initialconditions = dseries(zeros(10, M_.endo_nbr+M_.exo_nbr), 2000Q1, vertcat(M_.endo_names,M_.exo_names));
// Simulate the model for 500 periods
TrueData = simul_backward_model(initialconditions, 300);
// Define a structure describing the parameters to be estimated (with initial conditions).
clear eparams
eparams.e_c_m = .9;
eparams.c_z_1 = .5;
eparams.c_z_2 = .2;
eparams.lambda = .7;
// Define the dataset used for estimation
edata = TrueData;
edata.ez = dseries(NaN(TrueData.nobs, 1), 2000Q1, 'ez');
tic
pac.estimate.nls('zpac', eparams, edata, 2005Q1:2005Q1+200, 'csminwel', 'verbosity', 0);
toc
skipline(1)
e_c_m_nls = M_.params(strmatch('e_c_m', M_.param_names, 'exact'));
c_z_1_nls = M_.params(strmatch('c_z_1', M_.param_names, 'exact'));
c_z_2_nls = M_.params(strmatch('c_z_2', M_.param_names, 'exact'));
lambda_nls = M_.params(strmatch('lambda', M_.param_names, 'exact'));
disp(sprintf('Estimate of e_c_m: %f', e_c_m_nls))
disp(sprintf('Estimate of c_z_1: %f', c_z_1_nls))
disp(sprintf('Estimate of c_z_2: %f', c_z_2_nls))
disp(sprintf('Estimate of lambda: %f', lambda_nls))
skipline(2)
// Define a structure describing the parameters to be estimated (with initial conditions).
// Define a structure describing the parameters to be estimated (with initial conditions).
clear eparams
eparams.e_c_m = .9;
eparams.c_z_1 = .5;
eparams.c_z_2 = .2;
eparams.lambda = .0;
// Define the dataset used for estimation
edata = TrueData;
edata.ez = dseries(NaN(TrueData.nobs, 1), 2000Q1, 'ez');
tic
pac.estimate.nls('zpac', eparams, edata, 2005Q1:2005Q1+200, 'GaussNewton');
toc
skipline(1)
e_c_m_gauss_newton = M_.params(strmatch('e_c_m', M_.param_names, 'exact'));
c_z_1_gauss_newton = M_.params(strmatch('c_z_1', M_.param_names, 'exact'));
c_z_2_gauss_newton= M_.params(strmatch('c_z_2', M_.param_names, 'exact'));
lambda_gauss_newton = M_.params(strmatch('lambda', M_.param_names, 'exact'));
disp(sprintf('Estimate of e_c_m: %f', e_c_m_gauss_newton))
disp(sprintf('Estimate of c_z_1: %f', c_z_1_gauss_newton))
disp(sprintf('Estimate of c_z_2: %f', c_z_2_gauss_newton))
disp(sprintf('Estimate of lambda: %f', lambda_gauss_newton))
if abs(e_c_m_nls-e_c_m_gauss_newton)>.01
error('Gauss Newton and direct SSR minimization do not provide consistent estimates (e_c_m)')
end
if abs(c_z_1_nls-c_z_1_gauss_newton)>.01
error('Gauss Newton and direct SSR minimization do not provide consistent estimates (c_z_1)')
end
if abs(c_z_2_nls-c_z_2_gauss_newton)>.01
error('Gauss Newton and direct SSR minimization do not provide consistent estimates (c_z_2)')
end
if abs(lambda_nls-lambda_gauss_newton)>.01
error('Gauss Newton and direct SSR minimization do not provide consistent estimates (lambda)')
end