Putting dynare++ under main dynare SVN repository
git-svn-id: https://www.dynare.org/svn/dynare/trunk@2905 ac1d8469-bf42-47a9-8791-bf33cf982152time-shift
parent
ee49993202
commit
b92b48c58e
|
@ -0,0 +1,27 @@
|
|||
# $Id: Makefile 843 2006-07-28 08:54:19Z tamas $
|
||||
# Copyright 2008, Ondra Kamenik
|
||||
|
||||
CC = g++
|
||||
|
||||
|
||||
#LD_LIBS := -llapack -lcblas -lf77blas -latlas -lg2c
|
||||
LD_LIBS := -L /opt//intel/Compiler/11.0/074/mkl/lib/em64t -lmkl_intel_thread -lmkl_lapack -lmkl -lmkl_em64t -L /opt//intel/Compiler/11.0/074/lib/intel64 -lguide -lstdc++
|
||||
|
||||
CC_FLAGS := -Wall
|
||||
|
||||
ifeq ($(DEBUG),yes)
|
||||
CC_FLAGS := $(CC_FLAGS) -g -DPOSIX_THREADS -DTL_DEBUG=2
|
||||
else
|
||||
CC_FLAGS := $(CC_FLAGS) -O2 -DPOSIX_THREADS
|
||||
endif
|
||||
|
||||
ifeq ($(OS),Windows_NT)
|
||||
CC_FLAGS := -mno-cygwin -mthreads $(CC_FLAGS)
|
||||
LD_LIBS := -mno-cygwin -mthreads $(LD_LIBS) -lpthreadGC1
|
||||
else
|
||||
CC_FLAGS := -fPIC $(CC_FLAGS)
|
||||
LD_LIBS := $(LD_LIBS) -lpthread
|
||||
endif
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,280 @@
|
|||
<HTML>
|
||||
<TITLE>
|
||||
Dynare++ Change Log
|
||||
</TITLE>
|
||||
<!-- $Header$ -->
|
||||
<BODY>
|
||||
<TABLE CELLSPACING=2 ALIGN="CENTER" BORDER=1>
|
||||
<TR>
|
||||
<TD BGCOLOR="#d0d0d0" WIDTH="85"> <b>Revision</b> </TD>
|
||||
<TD BGCOLOR="#d0d0d0" WIDTH="85"> <b>Version</b></TD>
|
||||
<TD BGCOLOR="#d0d0d0" WIDTH="80"> <b>Date</b> </TD>
|
||||
<TD BGCOLOR="#d0d0d0" WIDTH="600"> <b>Description of changes</b></TD>
|
||||
</TR>
|
||||
|
||||
<TR>
|
||||
<TD>
|
||||
<TD>1.3.7
|
||||
<TD>2008/01/15
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD> <TD> Corrected a serious bug in centralizing a
|
||||
decision rule. This bug implies that all results based on simulations
|
||||
of the decision rule were wrong. However results based on stochastic
|
||||
fix points were correct. Thanks to Wouter J. den Haan and Joris de Wind!
|
||||
|
||||
<TR><TD><TD><TD> <TD> Added options --centralize and --no-centralize.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Corrected an error of a wrong
|
||||
variance-covariance matrix in real-time simulations (thanks to Pawel
|
||||
Zabzcyk).
|
||||
|
||||
<TR><TD><TD><TD> <TD> Corrected a bug of integer overflow in refined
|
||||
faa Di Bruno formula if one of refinements is empty. This bug appeared
|
||||
when solving models without forward looking variables.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Corrected a bug in the Sylvester equation
|
||||
formerly working only for models with forward looking variables.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Corrected a bug in global check printout.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Added generating a dump file.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Fixed a bug of forgetting repeated assignments
|
||||
(for example in parameter settings and initval).
|
||||
|
||||
<TR><TD><TD><TD> <TD> Added a diff operator to the parser.
|
||||
|
||||
<TR>
|
||||
<TD>1539
|
||||
<TD>1.3.6
|
||||
<TD>2008/01/03
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD> <TD> Corrected a bug of segmentation faults for long
|
||||
names and path names.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Changed a way how random numbers are
|
||||
generated. Dynare++ uses a separate instance of Mersenne twister for
|
||||
each simulation, this corrects a flaw of additional randomness caused
|
||||
by operating system scheduler. This also corrects a strange behaviour
|
||||
of random generator on Windows, where each simulation was getting the
|
||||
same sequence of random numbers.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Added calculation of conditional distributions
|
||||
controlled by --condper and --condsim.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Dropped creating unfoled version of decision
|
||||
rule at the end. This might consume a lot of memory. However,
|
||||
simulations might be slower for some models.
|
||||
|
||||
<TR>
|
||||
<TD>1368
|
||||
<TD>1.3.5
|
||||
<TD>2007/07/11
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD> <TD> Corrected a bug of useless storing all derivative
|
||||
indices in a parser. This consumed a lot of memory for large models.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Added an option <tt>--ss-tol</tt> controlling a
|
||||
tolerance used for convergence of a non-linear solver.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Corrected buggy interaction of optimal policy
|
||||
and forward looking variables with more than one period.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Variance matrices can be positive
|
||||
semidefinite. This corrects a bug of throwing an error if estimating
|
||||
approximation errors on ellipse of the state space with a
|
||||
deterministic variable.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Implemented simulations with statistics
|
||||
calculated in real-time. Options <tt>--rtsim</tt> and <tt>--rtper</tt>.
|
||||
|
||||
<TR>
|
||||
<TD>1282
|
||||
<TD>1.3.4
|
||||
<TD>2007/05/15
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD> <TD>Corrected a bug of wrong representation of NaN in generated M-files.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Corrected a bug of occassionaly wrong evaluation of higher order derivatives of integer powers.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Implemented automatic handling of terms involving multiple leads.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Corrected a bug in the numerical integration, i.e. checking of the precision of the solution.
|
||||
|
||||
<TR>
|
||||
<TD>1090
|
||||
<TD>1.3.3
|
||||
<TD>2006/11/20
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD> <TD>Corrected a bug of non-registering an auxiliary variable in initval assignments.
|
||||
|
||||
<TR>
|
||||
<TD>988
|
||||
<TD>1.3.2
|
||||
<TD>2006/10/11
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD> <TD>Corrected a few not-serious bugs: segfault on
|
||||
some exception, error in parsing large files, error in parsing
|
||||
matrices with comments, a bug in dynare_simul.m
|
||||
|
||||
<TR><TD><TD><TD> <TD>Added posibility to specify a list of shocks for
|
||||
which IRFs are calculated
|
||||
|
||||
<TR><TD><TD><TD> <TD>Added --order command line switch
|
||||
|
||||
<TR><TD><TD><TD> <TD>Added writing two Matlab files for steady state
|
||||
calcs
|
||||
|
||||
<TR><TD><TD><TD> <TD>Implemented optimal policy using keyword
|
||||
planner_objective and planner_discount
|
||||
|
||||
<TR><TD><TD><TD> <TD>Implemented an R interface to Dynare++ algorithms
|
||||
(Tamas Papp)
|
||||
|
||||
<TR><TD><TD><TD> <TD>Highlevel code reengineered to allow for
|
||||
different model inputs
|
||||
|
||||
<TR>
|
||||
<TD>799
|
||||
<TD>1.3.1
|
||||
<TD>2006/06/13
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD> <TD>Corrected few bugs: in error functions, in linear algebra module.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Updated dynare_simul.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Updated the tutorial.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Corrected an error in summing up tensors where
|
||||
setting up the decision rule derivatives. Thanks to Michel
|
||||
Juillard. The previous version was making deterministic effects of
|
||||
future volatility smaller than they should be.
|
||||
|
||||
<TR>
|
||||
<TD>766
|
||||
<TD>1.3.0
|
||||
<TD>2006/05/22
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD> <TD>The non-linear solver replaced with a new one.
|
||||
|
||||
<TR><TD><TD><TD> <TD>The parser and derivator replaced with a new
|
||||
code. Now it is possible to put expressions in parameters and initval
|
||||
sections.
|
||||
|
||||
<TR>
|
||||
<TD>752
|
||||
<TD>1.2.2
|
||||
<TD>2006/05/22
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD> <TD>Added an option triggering/suppressing IRF calcs..
|
||||
|
||||
<TR><TD><TD><TD> <TD>Newton algortihm is now used for fix-point calculations.
|
||||
|
||||
<TR><TD><TD><TD> <TD> Vertical narrowing of tensors in Faa Di Bruno
|
||||
formula to avoid multiplication with zeros..
|
||||
|
||||
<TR>
|
||||
<TD>436
|
||||
<TD>1.2.1
|
||||
<TD>2005/08/17
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD> <TD>Faa Di Bruno for sparse matrices optimized. The
|
||||
implementation now accommodates vertical refinement of function stack
|
||||
in order to fit a corresponding slice to available memory. In
|
||||
addition, zero slices are identified. For some problems, this implies
|
||||
significant speedup.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Analytic derivator speedup.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Corrected a bug in the threading code. The bug
|
||||
stayed concealed in Linux 2.4.* kernels, and exhibited in Linux 2.6.*,
|
||||
which has a different scheduling. This correction also allows using
|
||||
detached threads on Windows.
|
||||
|
||||
<TR>
|
||||
<TD>410
|
||||
<TD>1.2
|
||||
<TD>2005/07/29
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD> <TD>Added Dynare++ tutorial.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Changed and enriched contents of MAT-4 output
|
||||
file.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Corrected a bug of wrong variable indexation
|
||||
resulting in an exception. The error occurred if a variable appeared
|
||||
at time t-1 or t+1 and not at t.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Added Matlab interface, which allows simulation
|
||||
of a decision rule in Matlab.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Got rid of Matrix Template Library.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Added checking of model residuals by the
|
||||
numerical integration. Three methods: checking along simulation path,
|
||||
checking along shocks, and on ellipse of states.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Corrected a bug in calculation of higher moments
|
||||
of Normal dist.
|
||||
|
||||
<TR><TD><TD><TD> <TD>Corrected a bug of wrong drawing from Normal dist
|
||||
with non-zero covariances.
|
||||
|
||||
<TR><TD><TD><TD>
|
||||
<TD>Added numerical integration module. Product and Smolyak
|
||||
quadratures over Gauss-Hermite and Gauss-Legendre, and quasi Monte
|
||||
Carlo.
|
||||
|
||||
<TR>
|
||||
<TD>152
|
||||
<TD>1.1
|
||||
<TD>2005/04/22
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD>
|
||||
<TD>Added a calculation of approximation at a stochastic steady state
|
||||
(still experimental).
|
||||
|
||||
<TR><TD><TD><TD>
|
||||
<TD>Corrected a bug in Cholesky decomposition of variance-covariance
|
||||
matrix with off-diagonal elements.
|
||||
|
||||
<TR>
|
||||
<TD>89
|
||||
<TD>1.01
|
||||
<TD>2005/02/23
|
||||
<TD>
|
||||
|
||||
<TR><TD><TD><TD>
|
||||
<TD>Added version printout.
|
||||
|
||||
<TR><TD><TD><TD>
|
||||
<TD>Corrected the bug of multithreading support for P4 HT processors running on Win32.
|
||||
|
||||
<TR><TD><TD><TD>
|
||||
<TD>Enhanced Kronecker product code resulting in approx. 20% speedup.
|
||||
|
||||
<TR><TD><TD><TD>
|
||||
<TD>Implemented vertical stack container refinement, and another
|
||||
method for sparse folded Faa Di Bruno (both not used yet).
|
||||
|
||||
<TR>
|
||||
<TD>5
|
||||
<TD>1.0
|
||||
<TD>2005/02/23
|
||||
<TD>The first released version.
|
||||
|
||||
</TABLE>
|
||||
</BODY>
|
||||
</HTML>
|
|
@ -0,0 +1,23 @@
|
|||
It is suggested that you compile Dynare++ with gcc version 3.4. If you have
|
||||
other versions of gcc installed on your system, you need to select version
|
||||
3.4, for example,
|
||||
|
||||
$ make "CC=gcc-3.4"
|
||||
|
||||
For linking, you need to compile the required linear algebra libraries
|
||||
(blas, atlas, etc). Alternatively, you can install precompiled versions of
|
||||
these -- make sure that you select the version that matches your CPU.
|
||||
|
||||
For example, if you have an SSE2 capable CPU, then on Debian GNU/Linux
|
||||
(etch) you need to install the following packages to get the precompiled
|
||||
linear algebra libraries:
|
||||
|
||||
lapack3-dev
|
||||
atlas3-sse2-dev
|
||||
|
||||
Then set the environment variable LD_LIBRARY_PATH, eg
|
||||
|
||||
$ export LD_LIBRARY_PATH /usr/lib/sse2
|
||||
|
||||
before calling make. This will include the shared libraries in the search
|
||||
path for ld.
|
|
@ -0,0 +1,157 @@
|
|||
\documentclass[10pt]{article}
|
||||
\usepackage{array,natbib,times}
|
||||
\usepackage{amsmath, amsthm, amssymb}
|
||||
|
||||
%\usepackage[pdftex,colorlinks]{hyperref}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\title{Implementation of Ramsey Optimal Policy in Dynare++, Timeless Perspective}
|
||||
|
||||
\author{Ondra Kamen\'\i k}
|
||||
|
||||
\date{June 2006}
|
||||
\maketitle
|
||||
|
||||
\textbf{Abstract:} This document provides a derivation of Ramsey
|
||||
optimal policy from timeless perspective and describes its
|
||||
implementation in Dynare++.
|
||||
|
||||
\section{Derivation of the First Order Conditions}
|
||||
|
||||
Let us start with an economy populated by agents who take a number of
|
||||
variables exogenously, or given. These may include taxes or interest
|
||||
rates for example. These variables can be understood as decision (or control)
|
||||
variables of the timeless Ramsey policy (or social planner). The agent's
|
||||
information set at time $t$ includes mass-point distributions of these
|
||||
variables for all times after $t$. If $i_t$ denotes an interest rate
|
||||
for example, then the information set $I_t$ includes
|
||||
$i_{t|t},i_{t+1|t},\ldots,i_{t+k|t},\ldots$ as numbers. In addition
|
||||
the information set includes all realizations of past exogenous
|
||||
innovations $u_\tau$ for $\tau=t,t-1,\ldots$ and distibutions
|
||||
$u_\tau\sim N(0,\Sigma)$ for $\tau=t+1,\ldots$. These information sets will be denoted $I_t$.
|
||||
|
||||
An information set including only the information on past realizations
|
||||
of $u_\tau$ and future distributions of $u_\tau\sim N(0\sigma)$ will
|
||||
be denoted $J_t$. We will use the following notation for expectations
|
||||
through these sets:
|
||||
\begin{eqnarray*}
|
||||
E^I_t[X] &=& E(X|I_t)\\
|
||||
E^J_t[X] &=& E(X|J_t)
|
||||
\end{eqnarray*}
|
||||
|
||||
The agents optimize taking the decision variables of the social
|
||||
planner at $t$ and future as given. This means that all expectations
|
||||
they form are conditioned on the set $I_t$. Let $y_t$ denote a vector
|
||||
of all endogenous variables including the planer's decision
|
||||
variables. Let the number of endogenous variables be $n$. The economy
|
||||
can be described by $m$ equations including the first order conditions
|
||||
and transition equations:
|
||||
\begin{equation}\label{constr}
|
||||
E_t^I\left[f(y_{t-1},y_t,y_{t+1},u_t)\right] = 0.
|
||||
\end{equation}
|
||||
This lefts $n-m$
|
||||
the planner's control variables. The solution of this problem is a
|
||||
decision rule of the form:
|
||||
\begin{equation}\label{agent_dr}
|
||||
y_t=g(y_{t-1},u_t,c_{t|t},c_{t+1|t},\ldots,c_{t+k|t},\ldots),
|
||||
\end{equation}
|
||||
where $c$ is a vector of planner's control variables.
|
||||
|
||||
Each period the social planner chooses the vector $c_t$ to maximize
|
||||
his objective such that \eqref{agent_dr} holds for all times following
|
||||
$t$. This would lead to $n-m$ first order conditions with respect to
|
||||
$c_t$. These first order conditions would contain unknown derivatives
|
||||
of endogenous variables with respect to $c$, which would have to be
|
||||
retrieved from the implicit constraints \eqref{constr} since the
|
||||
explicit form \eqref{agent_dr} is not known.
|
||||
|
||||
The other way to proceed is to assume that the planner is so dumb that
|
||||
he is not sure what are his control variables. So he optimizes with
|
||||
respect to all $y_t$ given the constraints \eqref{constr}. If the
|
||||
planner's objective is $b(y_{t-1},y_t,y_{t+1},u_t)$ with a discount rate
|
||||
$\beta$, then the optimization problem looks as follows:
|
||||
\begin{align}
|
||||
\max_{\left\{y_\tau\right\}^\infty_t}&E_t^J
|
||||
\left[\sum_{\tau=t}^\infty\beta^{\tau-t}b(y_{\tau-1},y_\tau,y_{\tau+1},u_\tau)\right]\notag\\
|
||||
&\rm{s.t.}\label{planner_optim}\\
|
||||
&\hskip1cm E^I_\tau\left[f(y_{\tau-1},y_\tau,y_{\tau+1},u_\tau)\right]=0\quad\rm{for\ }
|
||||
\tau=\ldots,t-1,t,t+1,\ldots\notag
|
||||
\end{align}
|
||||
Note two things: First, each constraint \eqref{constr} in
|
||||
\eqref{planner_optim} is conditioned on $I_\tau$ not $I_t$. This is
|
||||
very important, since the behaviour of agents at period $\tau=t+k$ is
|
||||
governed by the constraint using expectations conditioned on $t+k$,
|
||||
not $t$. The social planner knows that at $t+k$ the agents will use
|
||||
all information available at $t+k$. Second, the constraints for the
|
||||
planner's decision made at $t$ include also constraints for agent's
|
||||
behaviour prior to $t$. This is because the agent's decision rules are
|
||||
given in the implicit form \eqref{constr} and not in the explicit form
|
||||
\eqref{agent_dr}.
|
||||
|
||||
Using Lagrange multipliers, this can be rewritten as
|
||||
\begin{align}
|
||||
\max_{y_t}E_t^J&\left[\sum_{\tau=t}^\infty\beta^{\tau-t}b(y_{\tau-1},y_\tau,y_{\tau+1},u_\tau)\right.\notag\\
|
||||
&\left.+\sum_{\tau=-\infty}^{\infty}\beta^{\tau-t}\lambda^T_\tau E_\tau^I\left[f(y_{\tau-1},y_\tau,y_{\tau+1},u_\tau)\right]\right],
|
||||
\label{planner_optim_l}
|
||||
\end{align}
|
||||
where $\lambda_t$ is a vector of Lagrange multipliers corresponding to
|
||||
constraints \eqref{constr}. Note that the multipliers are multiplied
|
||||
by powers of $\beta$ in order to make them stationary. Taking a
|
||||
derivative wrt $y_t$ and putting it to zero yields the first order
|
||||
conditions of the planner's problem:
|
||||
\begin{align}
|
||||
E^J_t\left[\vphantom{\frac{\int^(_)}{\int^(\_)}}\right.&\frac{\partial}{\partial y_t}b(y_{t-1},y_t,y_{t+1},u_t)+
|
||||
\beta L^{+1}\frac{\partial}{\partial y_{t-1}}b(y_{t-1},y_t,y_{t+1},u_t)\notag\\
|
||||
&+\beta^{-1}\lambda_{t-1}^TE^I_{t-1}\left[L^{-1}\frac{\partial}{\partial y_{t+1}}f(y_{t-1},y_t,y_{t+1},u_t)\right]\notag\\
|
||||
&+\lambda_t^TE^I_t\left[\frac{\partial}{\partial y_{t}}f(y_{t-1},y_t,y_{t+1},u_t)\right]\notag\\
|
||||
&+\beta\lambda_{t+1}^TE^I_{t+1}\left[L^{+1}\frac{\partial}{\partial y_{t-1}}f(y_{t-1},y_t,y_{t+1},u_t)\right]
|
||||
\left.\vphantom{\frac{\int^(_)}{\int^(\_)}}\right]
|
||||
= 0,\label{planner_optim_foc}
|
||||
\end{align}
|
||||
where $L^{+1}$ and $L^{-1}$ are one period lead and lag operators respectively.
|
||||
|
||||
Now we have to make a few assertions concerning expectations
|
||||
conditioned on the different information sets to simplify
|
||||
\eqref{planner_optim_foc}. Recall the formula for integration through
|
||||
information on which another expectation is conditioned, this is:
|
||||
$$E\left[E\left[u|v\right]\right] = E[u],$$
|
||||
where the outer expectation integrates through $v$. Since $J_t\subset
|
||||
I_t$, by easy application of the above formula we obtain
|
||||
\begin{eqnarray}
|
||||
E^J_t\left[E^I_t\left[X\right]\right] &=& E^J_t\left[X\right]\quad\rm{and}\notag\\
|
||||
E^J_t\left[E^I_{t-1}\left[X\right]\right] &=& E^J_t\left[X\right]\label{e_iden}\\
|
||||
E^J_t\left[E^I_{t+1}\left[X\right]\right] &=& E^J_{t+1}\left[X\right]\notag
|
||||
\end{eqnarray}
|
||||
Now, the last term of \eqref{planner_optim_foc} needs a special
|
||||
attention. It is equal to
|
||||
$E^J_t\left[\beta\lambda^T_{t+1}E^I_{t+1}[X]\right]$. If we assume
|
||||
that the problem \eqref{planner_optim} has a solution, then there is a
|
||||
deterministic function from $J_{t+1}$ to $\lambda_{t+1}$ and so
|
||||
$\lambda_{t+1}\in J_{t+1}\subset I_{t+1}$. And the last term is equal
|
||||
to $E^J_{t}\left[E^I_{t+1}[\beta\lambda^T_{t+1}X]\right]$, which is
|
||||
$E^J_{t+1}\left[\beta\lambda^T_{t+1}X\right]$. This term can be
|
||||
equivalently written as
|
||||
$E^J_{t}\left[\beta\lambda^T_{t+1}E^J_{t+1}[X]\right]$. The reason why
|
||||
we write the term in this way will be clear later. All in all, we have
|
||||
\begin{align}
|
||||
E^J_t\left[\vphantom{\frac{\int^(_)}{\int^(\_)}}\right.&\frac{\partial}{\partial y_t}b(y_{t-1},y_t,y_{t+1},u_t)+
|
||||
\beta L^{+1}\frac{\partial}{\partial y_{t-1}}b(y_{t-1},y_t,y_{t+1},u_t)\notag\\
|
||||
&+\beta^{-1}\lambda_{t-1}^TL^{-1}\frac{\partial}{\partial y_{t+1}}f(y_{t-1},y_t,y_{t+1},u_t)\notag\\
|
||||
&+\lambda_t^T\frac{\partial}{\partial y_{t}}f(y_{t-1},y_t,y_{t+1},u_t)\notag\\
|
||||
&+\beta\lambda_{t+1}^TE^J_{t+1}\left[L^{+1}\frac{\partial}{\partial y_{t-1}}f(y_{t-1},y_t,y_{t+1},u_t)\right]
|
||||
\left.\vphantom{\frac{\int^(_)}{\int^(\_)}}\right]
|
||||
= 0.\label{planner_optim_foc2}
|
||||
\end{align}
|
||||
Note that we have not proved that \eqref{planner_optim_foc} and
|
||||
\eqref{planner_optim_foc2} are equivalent. We proved only that if
|
||||
\eqref{planner_optim_foc} has a solution, then
|
||||
\eqref{planner_optim_foc2} is equivalent (and has the same solution).
|
||||
|
||||
\section{Implementation}
|
||||
|
||||
The user inputs $b(y_{t-1},y_t,y_{t+1},u_t)$, $\beta$, and agent's
|
||||
first order conditions \eqref{constr}. The algorithm has to produce
|
||||
\eqref{planner_optim_foc2}.
|
||||
|
||||
\end{document}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,54 @@
|
|||
RINTERNALS=/usr/share/R/include/
|
||||
|
||||
sylvcppsource := $(wildcard ../../sylv/cc/*.cpp)
|
||||
sylvhsource := $(wildcard ../../sylv/cc/*.h)
|
||||
sylvobjects := $(patsubst %.cpp, %.o, $(sylvcppsource))
|
||||
|
||||
tlcwebsource := $(wildcard ../../tl/cc/*.cweb)
|
||||
tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
|
||||
tlhwebsource := $(wildcard ../../tl/cc/*.hweb)
|
||||
tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
|
||||
tlobjects := $(patsubst %.cweb,%.o,$(tlcwebsource))
|
||||
|
||||
kordcwebsource := $(wildcard ../../kord/*.cweb)
|
||||
kordcppsource := $(patsubst %.cweb,%.cpp,$(kordcwebsource))
|
||||
kordhwebsource := $(wildcard ../../kord/*.hweb)
|
||||
kordhsource := $(patsubst %.hweb,%.h,$(kordhwebsource))
|
||||
kordobjects := $(patsubst %.cweb,%.o,$(kordcwebsource))
|
||||
|
||||
integcwebsource := $(wildcard ../../integ/cc/*.cweb)
|
||||
integcppsource := $(patsubst %.cweb,%.cpp,$(integcwebsource))
|
||||
integhwebsource := $(wildcard ../../integ/cc/*.hweb)
|
||||
integhsource := $(patsubst %.hweb,%.h,$(integhwebsource))
|
||||
integobjects := $(patsubst %.cweb,%.o,$(integcwebsource))
|
||||
|
||||
parserhsource := $(wildcard ../../parser/cc/*.h)
|
||||
parsercppsource := $(wildcard ../parser/cc/*.cpp)
|
||||
|
||||
utilshsource := $(wildcard ../../utils/cc/*.h)
|
||||
utilscppsource := $(wildcard ../utils/cc/*.cpp)
|
||||
|
||||
srccpp := dynare3.cpp dynare_model.cpp planner_builder.cpp dynare_atoms.cpp dynare_params.cpp nlsolve.cpp
|
||||
objects := $(patsubst %.cpp,../../src/%.o,$(srccpp)) \
|
||||
$(patsubst %.y,%_ll.o,$(wildcard ../../src/*.y)) \
|
||||
$(patsubst %.lex,%_tab.o,$(wildcard ../../src/*.lex))
|
||||
|
||||
PKG_CPPFLAGS= -I../../tl/cc -I../../sylv/cc -I../../kord -I../../src -I../.. -I$(RINTERNALS)
|
||||
PKG_LIBS= ${LAPACK_LIBS} ${BLAS_LIBS} $(objects) $(kordobjects) $(integobjects) $(tlobjects) ../../parser/cc/parser.a ../../utils/cc/utils.a $(sylvobjects) -lpthread -llapack -lcblas -lf77blas -latlas -lg2c -lstdc++
|
||||
|
||||
ifneq ($(LD_LIBRARY_PATH),) # use LD_LIBRARY_PATH from environment
|
||||
PKG_LIBS := -Wl,--library-path $(LD_LIBRARY_PATH) $(PKG_LIBS)
|
||||
endif
|
||||
|
||||
dynareR.so: dynareR.o
|
||||
g++ -shared -o dynareR.so dynareR.o -L/usr/lib/R/lib -lR $(PKG_LIBS)
|
||||
|
||||
dynareR.o: dynareR.cpp
|
||||
g++ -I/usr/share/R/include -I/usr/share/R/include $(PKG_CPPFLAGS) \
|
||||
-fpic -g -O2 -c dynareR.cpp -o dynareR.o -DDEBUG
|
||||
|
||||
test: test.cpp dynareR.cpp
|
||||
g++ -O0 -g -o test test.cpp -DDEBUG $(PKG_LIBS) $(PKG_CPPFLAGS)
|
||||
|
||||
test-debug:
|
||||
valgrind --leak-check=yes ./test
|
|
@ -0,0 +1,17 @@
|
|||
COMPILING
|
||||
|
||||
The makefile for this interface is still preliminary, I will write a decent
|
||||
one when I have the time. It needs all the compiled files from dynare++,
|
||||
but doesn't know how to make them. So first you need to run make in the
|
||||
src/ directory, then run make in extern/R.
|
||||
|
||||
You need Rinternals.h to make this file. If you are not using prepackaged R
|
||||
on Unix/Linux, you need to modify the variable RINCLUDE in the Makefile
|
||||
accordingly.
|
||||
|
||||
To compile dynare++, read doc/compiling-notes.txt.
|
||||
|
||||
INSTALLATION
|
||||
|
||||
Copy the dynareR.r and dynareR.so files to your working directory so that R
|
||||
can find them.
|
|
@ -0,0 +1,249 @@
|
|||
// $Id: dynareR.cpp 862 2006-08-04 17:34:56Z tamas $
|
||||
|
||||
// Copyright 2006, Tamas K Papp
|
||||
|
||||
#include "dynare3.h" // Dynare class
|
||||
#include "approximation.h" // Approximation class
|
||||
|
||||
// exceptions
|
||||
#include "dynare_exception.h"
|
||||
#include "parser/cc/parser_exception.h"
|
||||
#include "utils/cc/exception.h"
|
||||
#include "SylvException.h"
|
||||
#include "tl_exception.h"
|
||||
#include "kord_exception.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#ifdef DEBUG
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
#include <R_ext/Memory.h>
|
||||
|
||||
/** This file containt the C glue functions for an R interface to
|
||||
* Dynare++. Although written in standard C (except for the use of
|
||||
* R_alloc), the indexing, calling and memory management conventions
|
||||
* of the functions in this file were tailored for R.
|
||||
*
|
||||
* It is not recommended that you use this interface for anything else
|
||||
* but R.
|
||||
*/
|
||||
|
||||
/** Error codes: these error codes correspond to possible
|
||||
* exceptions. */
|
||||
#define DYNARER_SYLVEXCEPTION 1
|
||||
#define DYNARER_DYNAREEXCEPTION 2
|
||||
#define DYNARER_OGUEXCEPTION 3
|
||||
#define DYNARER_TLEXCEPTION 4
|
||||
#define DYNARER_KORDEXCEPTION 5
|
||||
#define DYNARER_NAMESMATCHINGERROR 6
|
||||
|
||||
/** Copies the message into a buffer. The buffer is allocated and
|
||||
* managed by R, ie it will be garbage collected after the .C call
|
||||
* returns and the contents are duplicated.
|
||||
*/
|
||||
char *passmessage(const char *errormessage) {
|
||||
long l = strlen(errormessage);
|
||||
char *em = R_alloc(l, 1);
|
||||
return strcpy(em, errormessage);
|
||||
}
|
||||
|
||||
/** This function puts the mapping between the newtotal items after
|
||||
* nl[offset] and the items in orig into the buffer perm, which has to
|
||||
* be at least as long as newtotal. The function uses R indexing,
|
||||
* that is to say, the first index is 1.
|
||||
*/
|
||||
int matchnames(const char **orig, int origtotal,
|
||||
const NameList &nl, int offset, int newtotal,
|
||||
int *perm) {
|
||||
#ifdef DEBUG
|
||||
printf("matching names (R indexing):\n");
|
||||
#endif
|
||||
for (int i=0; i < newtotal; i++) {
|
||||
int j;
|
||||
for (j=0; j < origtotal; j++)
|
||||
if (strcmp(nl.getName(offset+i), *(orig+j))==0) {
|
||||
*(perm+i) = j+1;
|
||||
#ifdef DEBUG
|
||||
printf("%d -> %d\n",i+1,j+1);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
if (j==origtotal)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** dynareR is the interface function. The user provides:
|
||||
* - a list of endogenous and exogenous variables, a list of
|
||||
* parameters (and the length of each list)
|
||||
* - the model equations (modeleq, pointer to a 0-terminated string)
|
||||
* - the order of expansion (ord)
|
||||
* - journal file name (jnlfile, can be "/dev/null" for no journal)
|
||||
* - values for the parametes (parval)
|
||||
* - variance-covariance matrix (vcov, stacked by columns, R does
|
||||
* this)
|
||||
* - initial values for finding the steady state (initval)
|
||||
* - and the number of steps for the approximation algorithm
|
||||
* (numsteps)
|
||||
*
|
||||
* If successful, the interface will write the results to these
|
||||
* buffers:
|
||||
* - tensorbuffer for the steady state and the flattened tensors
|
||||
* - num_state for the number of endogenous variables that ended up in
|
||||
* the state
|
||||
* - mappings to variable names (ordering_state, ordering_endo,
|
||||
* ordering_exo), indices start from 1
|
||||
* - the deterministic steady state (newinitval)
|
||||
*
|
||||
* If dynare throws an exception, the interface tries to catch it and
|
||||
* return an error code (error), and error message (errormessage), and
|
||||
* if applicable, information on the stability of the model
|
||||
* (kordcode). errormessage is allocated into R's memory, and will be
|
||||
* collected after duplication.
|
||||
*/
|
||||
extern "C" {
|
||||
void dynareR(const char** endo, const int* num_endo,
|
||||
const char** exo, const int* num_exo,
|
||||
const char** par, const int* num_par,
|
||||
const char** equations, const int* ord, const char* jnlfile,
|
||||
const double *parval, const double *vcov,
|
||||
const double *initval,
|
||||
const int *num_steps,
|
||||
double* tensorbuffer,
|
||||
int *num_state, int *ordering_state,
|
||||
int *ordering_endo, int *ordering_exo,
|
||||
double *newinitval,
|
||||
int* error, char **errormessage, int *kordcode) {
|
||||
// construct the model here
|
||||
try {
|
||||
#ifdef DEBUG // will print only first var names etc.
|
||||
printf("eq: %s\nendo: %d %s\nexo: %d %s\npar: %d %s\nord: %d\n",
|
||||
*equations,*num_endo,*endo,*num_exo,*exo,*num_par,*par,*ord);
|
||||
#endif
|
||||
// create journal
|
||||
Journal journal(jnlfile);
|
||||
// create Dynare object
|
||||
Dynare dynare(endo, *num_endo, exo, *num_exo,
|
||||
par, *num_par, *equations, strlen(*equations),
|
||||
*ord, journal);
|
||||
// set Vcov and parameter values
|
||||
copy(parval,parval+(*num_par),dynare.getParams().base());
|
||||
#ifdef DEBUG
|
||||
printf("parameter values (%d):\n",dynare.getParams().length());
|
||||
dynare.getParams().print();
|
||||
#endif
|
||||
copy(vcov,vcov+(*num_exo)*(*num_exo),dynare.getVcov().base());
|
||||
#ifdef DEBUG
|
||||
printf("vcov matrix:\n");
|
||||
dynare.getVcov().print();
|
||||
#endif
|
||||
// set initial values
|
||||
Vector iv(initval,*num_endo);
|
||||
#ifdef DEBUG
|
||||
printf("initial values:\n");
|
||||
iv.print();
|
||||
#endif
|
||||
dynare.setInitOuter(iv);
|
||||
// construct approximation
|
||||
tls.init(dynare.order(),
|
||||
dynare.nstat()+2*dynare.npred()+3*dynare.nboth()+
|
||||
2*dynare.nforw()+dynare.nexog());
|
||||
Approximation approximation(dynare,journal,*num_steps);
|
||||
approximation.walkStochSteady();
|
||||
// write the steady state into the buffer
|
||||
int ny = dynare.ny();
|
||||
const Vector ss(dynare.getSteady());
|
||||
// ss = ConstVector(approximation.getSS(), 0); // FIXME allow
|
||||
// // for nonzero
|
||||
int s = dynare.getStateNames().getNum();
|
||||
int sm = s;
|
||||
tensorbuffer = copy(ss.base(),ss.base()+ny,tensorbuffer);
|
||||
// write the tensors into buffer
|
||||
const UnfoldDecisionRule& udr =
|
||||
approximation.getUnfoldDecisionRule();
|
||||
for (int i=1; i <= *ord; i++) {
|
||||
const UFSTensor* t = udr.get(Symmetry(i));
|
||||
#ifdef DEBUG
|
||||
printf("tensor %d:\n", i);
|
||||
t->print();
|
||||
#endif
|
||||
tensorbuffer = copy(t->base(), t->base()+ny*sm, tensorbuffer);
|
||||
sm *= s;
|
||||
}
|
||||
// save number of endogenous states
|
||||
*num_state = s-(*num_exo);
|
||||
// ordering
|
||||
#ifdef DEBUG
|
||||
printf("all endo names:\n");
|
||||
dynare.getAllEndoNames().print();
|
||||
printf("all state names:\n");
|
||||
dynare.getStateNames().print();
|
||||
#endif
|
||||
if (matchnames(endo, *num_endo, dynare.getAllEndoNames(),
|
||||
0, *num_endo, ordering_endo) ||
|
||||
matchnames(endo, *num_endo, dynare.getStateNames(),
|
||||
0, *num_state, ordering_state) ||
|
||||
matchnames(exo, *num_exo, dynare.getStateNames(),
|
||||
*num_state, *num_exo, ordering_exo)) {
|
||||
*error = DYNARER_NAMESMATCHINGERROR;
|
||||
*errormessage = "There was a problem when matching names. This is weird and should not happen.";
|
||||
return;
|
||||
}
|
||||
// return new init values (first column of SS matrix)
|
||||
ConstVector newinit((const GeneralMatrix&) approximation.getSS(), 0);
|
||||
#ifdef DEBUG
|
||||
printf("new initial values:\n");
|
||||
newinit.print();
|
||||
#endif
|
||||
copy(newinit.base(),newinit.base()+(*num_endo),newinitval);
|
||||
} catch (const SylvException &e) {
|
||||
*error = DYNARER_SYLVEXCEPTION;
|
||||
char errorbuffer[501];
|
||||
e.printMessage(errorbuffer, 500);
|
||||
*errormessage = passmessage(errorbuffer);
|
||||
#ifdef DEBUG
|
||||
printf("Caught Sylv exception: ");
|
||||
e.printMessage();
|
||||
#endif
|
||||
return;
|
||||
} catch (const DynareException &e) {
|
||||
*error = DYNARER_DYNAREEXCEPTION;
|
||||
*errormessage = passmessage(e.message());
|
||||
#ifdef DEBUG
|
||||
printf("Caught Dynare exception: %s\n", e.message());
|
||||
#endif
|
||||
return;
|
||||
} catch (const ogu::Exception &e) {
|
||||
*error = DYNARER_OGUEXCEPTION;
|
||||
*errormessage = passmessage(e.message());
|
||||
#ifdef DEBUG
|
||||
printf("Caught ogu::Exception: ");
|
||||
e.print();
|
||||
#endif
|
||||
return;
|
||||
} catch (const TLException &e) {
|
||||
*error = DYNARER_TLEXCEPTION;
|
||||
*errormessage = passmessage(e.getmessage());
|
||||
#ifdef DEBUG
|
||||
printf("Caugth TL exception: ");
|
||||
e.print();
|
||||
#endif
|
||||
return;
|
||||
} catch (const KordException &e) {
|
||||
*error = DYNARER_KORDEXCEPTION;
|
||||
*errormessage = passmessage(e.getmessage());
|
||||
*kordcode = e.code(); // Kord error code
|
||||
#ifdef DEBUG
|
||||
printf("Caugth Kord exception: ");
|
||||
e.print();
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
*error = 0;
|
||||
return;}
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
## $Id: dynareR.r 862 2006-08-04 17:34:56Z tamas $
|
||||
|
||||
## Copyright 2006, Tamas K Papp
|
||||
|
||||
dyn.load("dynareR.so") # FIXME: make it platform-independent
|
||||
|
||||
## FIXME hide auxiliary functions in a namespace
|
||||
|
||||
dynareR.indextensor <- function(ord, nume, nums) {
|
||||
nume*((nums^ord-1)/(nums-1))
|
||||
}
|
||||
|
||||
dynareR.extracttensor <- function(tensor, ord, nume, nums) {
|
||||
aperm(array(tensor[dynareR.indextensor(ord,nume,nums)+(1:(nume*nums^ord))],
|
||||
c(nume,rep(nums,ord))),(ord+1):1)
|
||||
}
|
||||
|
||||
dynareR.errormessages <- c("Sylvester exception",
|
||||
"Dynare exception",
|
||||
"OGU exception",
|
||||
"Tensor library exception",
|
||||
"K-order expansion library exception",
|
||||
"Error matching names")
|
||||
|
||||
calldynare <- function(modeleq, endo, exo, parameters, expandorder,
|
||||
parval, vcovmatrix, initval=rep(1,length(endo)),
|
||||
numsteps=0, jnlfile="/dev/null") {
|
||||
## check type of parameters
|
||||
local({
|
||||
is.charvector <- function(cv) { is.character(cv) && is.vector(cv) }
|
||||
stopifnot(is.charvector(modeleq) && is.charvector(endo) &&
|
||||
is.charvector(exo) && is.charvector(parameters) &&
|
||||
is.charvector(jnlfile))
|
||||
})
|
||||
stopifnot(is.numeric(expandorder) && is.vector(expandorder) &&
|
||||
(length(expandorder) == 1) && (expandorder >= 0))
|
||||
stopifnot(length(jnlfile) == 1)
|
||||
local({ # variable names
|
||||
checkvarname <- function(v) {
|
||||
stopifnot(length(grep("[^a-zA-Z].*",v)) == 0) # look for strange chars
|
||||
}
|
||||
checkvarname(endo)
|
||||
checkvarname(exo)
|
||||
checkvarname(parameters)
|
||||
})
|
||||
stopifnot(is.vector(parval) && is.numeric(parval))
|
||||
stopifnot(is.vector(initval) && is.numeric(initval))
|
||||
stopifnot(is.matrix(vcovmatrix) && is.numeric(vcovmatrix))
|
||||
stopifnot(is.numeric(numsteps) && is.vector(numsteps) &&
|
||||
(length(numsteps)==1))
|
||||
## append semicolons to model equations if necessary
|
||||
modeleq <- sapply(modeleq, function(s) {
|
||||
if (length(grep("^.*; *$",s))==1)
|
||||
s
|
||||
else
|
||||
sprintf("%s;",s)
|
||||
})
|
||||
## then concatenate into a single string
|
||||
modeleq <- paste(modeleq, collapse=" ")
|
||||
## call dynareR
|
||||
nume <- length(endo)
|
||||
maxs <- length(endo)+length(exo)
|
||||
dr <- .C("dynareR",
|
||||
endo,as.integer(nume),
|
||||
exo,as.integer(length(exo)),
|
||||
parameters,as.integer(length(parameters)),
|
||||
modeleq,as.integer(expandorder),jnlfile,
|
||||
as.double(parval),as.double(vcovmatrix),
|
||||
as.double(initval),
|
||||
as.integer(numsteps),
|
||||
tensorbuffer=double(dynareR.indextensor(expandorder+1,nume,maxs)),
|
||||
numstate=integer(1), orderstate=integer(maxs),
|
||||
orderendo=integer(nume),
|
||||
orderexo=integer(length(exo)),
|
||||
newinitval=double(nume),
|
||||
error=integer(1),
|
||||
errormessage=character(1),
|
||||
kordcode=integer(1))
|
||||
## check for errors
|
||||
kordcode <- 0
|
||||
if (dr$error == 0) {
|
||||
if (dr$error == 5) {
|
||||
list(kordcode=dr$kordcode - 251) # magic dynare++ constant
|
||||
} else {
|
||||
## return result
|
||||
with(dr, {
|
||||
nums <- numstate+length(exo)
|
||||
list(ss=dynareR.extracttensor(dr$tensorbuffer,0,nume,nums), # ss
|
||||
rule=sapply(1:expandorder,function (o) { # decision rule
|
||||
dynareR.extracttensor(dr$tensorbuffer,o,nume,nums)
|
||||
}),
|
||||
orderstate=orderstate[1:numstate], # state ordering
|
||||
orderendo=orderendo, # endog. ordering
|
||||
orderexo=orderexo, # exog. ordering
|
||||
newinitval=newinitval, # new init values
|
||||
kordcode=0)
|
||||
})
|
||||
}
|
||||
} else {
|
||||
stop(sprintf("%s (\"%s\")",dynareR.errormessages[dr$error],
|
||||
dr$errormessage))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,201 @@
|
|||
%% $Id: dynareR.tex 863 2006-08-04 17:35:21Z tamas $
|
||||
%% Copyright Tamas K Papp, 2006
|
||||
%% should compile with any reasonable TeX distribution, I am using tetex
|
||||
\documentclass[12pt,a4paper]{article}
|
||||
|
||||
\usepackage{amsmath}
|
||||
\usepackage{amsfonts}
|
||||
%\usepackage[letterpaper,vmargin=1.7in]{geometry}
|
||||
%\usepackage[letterpaper,left=2cm,right=8cm,bottom=3cm,top=3cm,marginparwidth=4cm]{geometry}
|
||||
%\usepackage{natbib}
|
||||
\usepackage{graphicx}
|
||||
\usepackage{url}
|
||||
\usepackage{natbib}
|
||||
\usepackage{color}
|
||||
\usepackage{paralist} % compactitem
|
||||
\DeclareMathOperator{\Var}{Var}
|
||||
\DeclareMathOperator{\Cov}{Cov}
|
||||
\DeclareMathOperator{\argmin}{argmin}
|
||||
\DeclareMathOperator{\argmax}{argmax}
|
||||
\DeclareMathSymbol{\ueps}{\mathord}{letters}{"0F} % ugly epsilon
|
||||
\renewcommand{\epsilon}{\varepsilon}
|
||||
\newcommand{\aseq}{\overset{as}=} % almost surely equals
|
||||
|
||||
\usepackage{fancyhdr}
|
||||
\pagestyle{fancy}
|
||||
\lhead{Tam\'as K Papp} \chead{} \rhead{DynareR}
|
||||
\cfoot{\thepage}
|
||||
|
||||
\renewcommand\floatpagefraction{.9}
|
||||
\renewcommand\topfraction{.9}
|
||||
\renewcommand\bottomfraction{.9}
|
||||
\renewcommand\textfraction{.1}
|
||||
|
||||
\usepackage{listings}
|
||||
\lstset{
|
||||
language=R,
|
||||
extendedchars=true,
|
||||
basicstyle=\footnotesize,
|
||||
stringstyle=\ttfamily,
|
||||
commentstyle=\slshape,
|
||||
% numbers=left,
|
||||
% stepnumber=5,
|
||||
% numbersep=6pt,
|
||||
% numberstyle=\footnotesize,
|
||||
breaklines=true,
|
||||
frame=single,
|
||||
columns=fullflexible,
|
||||
}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\title{DynareR}
|
||||
\author{Tam\'as K Papp (\url{tpapp@princeton.edu})}
|
||||
\date{\today}
|
||||
\maketitle
|
||||
|
||||
DynareR is an R interface for Ondra Kamen\'ik's Dynare++ program. The
|
||||
interface is still under development, and the functions might change.
|
||||
However, I thought that some documentation would help to get users
|
||||
started.
|
||||
|
||||
The purpose of DynareR is to return the transition rule (the
|
||||
steady state and a list of tensors) for a given model. DynareR
|
||||
does not simulate, and currently does no checking of the
|
||||
approximation. Primarily, the interface is to be intended to be used
|
||||
in Bayesian estimation of DSGE models (via MCMC).
|
||||
|
||||
Before you read on, make sure that
|
||||
\begin{compactitem}
|
||||
\item you understand what Dynare++ is and how it works,
|
||||
\item you have compiled Dynare++ and DynareR (see \verb!README! in
|
||||
\verb!extern/R!), and placed \verb!dynareR.so! and
|
||||
\verb!dynareR.r! in your load path for R.
|
||||
\end{compactitem}
|
||||
|
||||
The function that performs all the work is called
|
||||
\lstinline{calldynare}. Its is defined like this:
|
||||
\begin{lstlisting}
|
||||
calldynare <- function(modeleq, endo, exo, parameters, expandorder,
|
||||
parval, vcovmatrix, initval=rep(1,length(endo)),
|
||||
numsteps=0, jnlfile="/dev/null") {
|
||||
...
|
||||
}
|
||||
\end{lstlisting}
|
||||
\lstinline{modeleq} is a character vector for the model equations, and
|
||||
it may have a length longer than one. First, \lstinline{calldynare}
|
||||
checks if each string in the vector has a terminating semicolon (may
|
||||
be followed by whitespace), if it doesn't, then it appends one. Then
|
||||
it concatenates all equations into a single string. Thus, the
|
||||
following versions of \lstinline{modeleq} give equivalent results:
|
||||
\begin{lstlisting}
|
||||
modeleq1 <- c("(c/c(1))^gamma*beta*(alpha*exp(a(1))*k^(alpha-1)+1-delta)=1",
|
||||
"a=rho*a(-1)+eps",
|
||||
"k+c=exp(a)*k(-1)^alpha+(1-delta)*k(-1)")
|
||||
modeleq2 <- c("(c/c(1))^gamma*beta*(alpha*exp(a(1))*k^(alpha-1)+1-delta)=1;",
|
||||
"a=rho*a(-1)+eps ; ",
|
||||
"k+c=exp(a)*k(-1)^alpha+(1-delta)*k(-1) \t;\t ")
|
||||
modeleq3 <- paste(modeleq1, collapse=" ")
|
||||
\end{lstlisting}
|
||||
The next three arguments name the endo- and exogenous variables and
|
||||
the parameters. The names should be character vectors, for example,
|
||||
\begin{lstlisting}
|
||||
parameters <- c("beta","gamma","rho","alpha","delta")
|
||||
varendo <- c("k","c","a")
|
||||
varexo <- "eps"
|
||||
\end{lstlisting}
|
||||
\lstinline{calldynare} also needs the order of the approximation
|
||||
\lstinline{expandorder} (a nonnegative integer), the parameter values
|
||||
\lstinline{parval} (should be the same length as
|
||||
\lstinline{parameters}), a variance-covariance matrix \lstinline{vcov}
|
||||
(dimensions should match the length of \lstinline{exo}) and initial
|
||||
values for finding the deterministic steady state
|
||||
(\lstinline{initval}). If you don't provide initial values,
|
||||
\lstinline{calldynare} will use a sequence of $1$s, on the assumption
|
||||
that most variables in economics are positive --- you should always
|
||||
try to provide a reasonable initial guess for the nonlinear solver if
|
||||
possible (if you are doing MCMC, chances are that you only have to do
|
||||
it once, see \lstinline{newinitval} below).
|
||||
|
||||
You can also provide the number of steps for calculating the
|
||||
stochastic steady state (\lstinline{numsteps}, the default is zero,
|
||||
see the dynare++ tutorial for more information) and the name of the
|
||||
journal file \lstinline{jnlfile}. If you don't provide a journal
|
||||
file, the default is \verb!/dev/null!.
|
||||
|
||||
Below, you see an example of using dynareR.
|
||||
\lstinputlisting{test.r}
|
||||
|
||||
\lstinline{calldynare} returns the results in a list, variables below
|
||||
refer to elements of this list. First, you should always check
|
||||
\lstinline{kordcode}, which tells whether dynare++ could calculate an
|
||||
approximation. It can have the following values:
|
||||
\begin{description}
|
||||
\item[0] the calculation was successful
|
||||
\item[1] the system is not stable (Blanchard-Kahn)
|
||||
\item[2] failed to calculate fixed point (infinite values)
|
||||
\item[3] failed to calculate fixed point (NaN values)
|
||||
\end{description}
|
||||
If \lstinline{kordcode} is nonzero, then the list has only this
|
||||
element.
|
||||
|
||||
If \lstinline{kordcode} equals zero, then the list has the following
|
||||
elements:
|
||||
\begin{description}
|
||||
\item[ss] the steady state (ordered by \lstinline{orderendo}), which
|
||||
is a vector
|
||||
\item[rule] the transition rule (ordered by \lstinline{orderendo},
|
||||
\lstinline{orderstate} and \lstinline{orderexo}), a list of arrays
|
||||
\item[newinitval] the deterministic steady state, you can use this to
|
||||
initialize the nonlinear solver for a nearby point in the parameter
|
||||
space (ordered by \lstinline{orderendo})
|
||||
\item[orderstate] the index of endogenous variables that ended up in
|
||||
the state
|
||||
\item[orderendo] the ordering of endogenous variables
|
||||
\item[orderexo] the ordering of exogenous variables
|
||||
\item[kordcode] discussed above
|
||||
\end{description}
|
||||
|
||||
An example will illustrate the ordering. To continue the example above,
|
||||
\begin{lstlisting}
|
||||
> dd$orderstate
|
||||
[1] 1 3
|
||||
> dd$orderendo
|
||||
[1] 1 3 2
|
||||
> dd$orderexo
|
||||
[1] 1
|
||||
> dd$rule[[1]]
|
||||
[,1] [,2] [,3]
|
||||
[1,] 0.9669374 0.0 0.02071077
|
||||
[2,] 2.4230073 0.9 0.45309125
|
||||
[3,] 2.6922303 1.0 0.50343473
|
||||
\end{lstlisting}
|
||||
Recall that the original ordering of endogenous variables was
|
||||
\lstinline{k, c, a}. The vectors and matrices of the result are
|
||||
ordered as \lstinline{varendo[dd$orderendo]}, that is, as
|
||||
\lstinline{k, a, c}. This is the ordering for the steady state and
|
||||
the first dimension of the tensors in \lstinline{rule}. The other
|
||||
dimensions are ordered as
|
||||
\lstinline{c(varendo[dd$orderstate],varexo[dd$orderexo])}, that is to
|
||||
say, as \lstinline{k, a, eps}. Use these orderings when calculating
|
||||
with the tensors and the steady state. Also, remember that the $i$th
|
||||
tensor is already divided by $i!$.
|
||||
|
||||
\lstinline{calldynare} also handles exceptions from dynare. All
|
||||
exceptions (except KordException, which sets \lstinline{kordcode})
|
||||
generate an error in the R interface. Normally, when solving a
|
||||
well-formed model (no typos in the equations, etc), users should not
|
||||
encounter these exceptions. Having a journal file is useful for
|
||||
debugging. If you are making long calculations, it is reasonable to
|
||||
catch errors with \lstinline{try} so that they won't abort the
|
||||
calculation.
|
||||
|
||||
% \bibliographystyle{apalike}
|
||||
% \bibliography{/home/tpapp/doc/general.bib}
|
||||
|
||||
\end{document}
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: t
|
||||
%%% End:
|
|
@ -0,0 +1,32 @@
|
|||
#include "dynareR.cpp"
|
||||
|
||||
int main(void) {
|
||||
const char *parameters[] = {"beta","gamma","rho","alpha","delta"};
|
||||
const char *varendo[] = {"k","c","a"};
|
||||
const char *varexo[] = {"eps"};
|
||||
const int numpar = 5;
|
||||
const int numendo = 3;
|
||||
const int numexo = 1;
|
||||
const int ord = 2;
|
||||
const int numsteps = 0;
|
||||
const double parval[] = {.99,2,.9,.3,.025};
|
||||
const double vcov[] = {0.001};
|
||||
const double initval[] = {0.066, 0.43, 0.01};
|
||||
|
||||
int e;
|
||||
double tensorbuffer[100];
|
||||
int num_state;
|
||||
int ordering_state[] = {0,0,0};
|
||||
int ordering_endo[] = {0,0,0};
|
||||
int ordering_exo[] = {0};
|
||||
double newinitval[] = {0,0,0};
|
||||
|
||||
const char *modeleq[] = {"(c/c(1))^gamma*beta*(alpha*exp(a(1))*k^(alpha-1)+1-delta)=1; a=rho*a(-1)+eps; k+c=exp(a)*k(-1)^alpha+(1-delta)*k(-1);"};
|
||||
|
||||
dynareR(varendo, &numendo, varexo, &numexo, parameters, &numpar, modeleq,
|
||||
&ord, "journal", parval, vcov, initval,
|
||||
&numsteps, tensorbuffer,
|
||||
&num_state, ordering_state, ordering_endo, ordering_exo,
|
||||
newinitval,&e);
|
||||
printf("error code: %d\n", e);
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
source("dynareR.r")
|
||||
|
||||
parameters <- c("beta","gamma","rho","alpha","delta")
|
||||
varendo <- c("k","c","a")
|
||||
varexo <- "eps"
|
||||
parval <- c(.99,2,.9,.3,.025)
|
||||
vcovmatrix <- matrix(1,1,1)
|
||||
initval <- c(0.066, 0.43, 0.01)
|
||||
|
||||
modeleq <- c("(c/c(1))^gamma*beta*(alpha*exp(a(1))*k^(alpha-1)+1-delta)=1",
|
||||
"a=rho*a(-1)+eps",
|
||||
"k+c=exp(a)*k(-1)^alpha+(1-delta)*k(-1)")
|
||||
|
||||
|
||||
dd <- calldynare(modeleq,varendo,varexo,parameters,2,parval,vcovmatrix,initval)
|
|
@ -0,0 +1,134 @@
|
|||
include ../../Makefile.include
|
||||
#CC = gcc
|
||||
CC_FLAGS = -Wall -I../../sylv/cc -I../../tl/cc -I../../kord -I../../integ/cc
|
||||
LDFLAGS = -llapack -lblas -lg2c -lstdc++
|
||||
|
||||
ifeq ($(DEBUG),yes)
|
||||
CC_FLAGS := $(CC_FLAGS) -g -DTL_DEBUG=2
|
||||
else
|
||||
CC_FLAGS := $(CC_FLAGS) -O3 -DPOSIX_THREADS
|
||||
endif
|
||||
|
||||
ifeq ($(OS),Windows_NT)
|
||||
CC_FLAGS := -mno-cygwin -mthreads $(CC_FLAGS)
|
||||
LDFLAGS := -mno-cygwin -mthreads $(LDFLAGS) -lpthreadGC2
|
||||
ARCH := w32
|
||||
MEX_SUFFIX = dll
|
||||
else
|
||||
CC_FLAGS := -fPIC $(CC_FLAGS)
|
||||
LDFLAGS := $(LDFLAGS) -lpthread
|
||||
ARCH := linux
|
||||
# MEX_SUFFIX = mexglx
|
||||
MEX_SUFFIX = mexa64
|
||||
endif
|
||||
|
||||
sylvcppsource := $(wildcard ../../sylv/cc/*.cpp)
|
||||
sylvhsource := $(wildcard ../../sylv/cc/*.h)
|
||||
sylvobjects := $(patsubst %.cpp, %.o, $(sylvcppsource))
|
||||
|
||||
tlcwebsource := $(wildcard ../../tl/cc/*.cweb)
|
||||
tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
|
||||
tlhwebsource := $(wildcard ../../tl/cc/*.hweb)
|
||||
tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
|
||||
tlobjects := $(patsubst %.cweb,%.o,$(tlcwebsource))
|
||||
|
||||
kordcwebsource := $(wildcard ../../kord/*.cweb)
|
||||
kordcppsource := $(patsubst %.cweb,%.cpp,$(kordcwebsource))
|
||||
kordhwebsource := $(wildcard ../../kord/*.hweb)
|
||||
kordhsource := $(patsubst %.hweb,%.h,$(kordhwebsource))
|
||||
kordobjects := $(patsubst %.cweb,%.o,$(kordcwebsource))
|
||||
|
||||
integcwebsource := $(wildcard ../../integ/cc/*.cweb)
|
||||
integcppsource := $(patsubst %.cweb,%.cpp,$(integcwebsource))
|
||||
integhwebsource := $(wildcard ../../integ/cc/*.hweb)
|
||||
integhsource := $(patsubst %.hweb,%.h,$(integhwebsource))
|
||||
integobjects := $(patsubst %.cweb,%.o,$(integcwebsource))
|
||||
|
||||
cppsource := $(wildcard *.cpp)
|
||||
mexobjects := $(patsubst %.cpp,%_.$(MEX_SUFFIX),$(cppsource))
|
||||
|
||||
all: $(mexobjects)
|
||||
|
||||
../../tl/cc/dummy.ch:
|
||||
make -C ../../tl/cc dummy.ch
|
||||
|
||||
../../tl/cc/%.cpp: ../../tl/cc/%.cweb ../../tl/cc/dummy.ch
|
||||
make -C ../../tl/cc $*.cpp
|
||||
|
||||
../../tl/cc/%.h: ../../tl/cc/%.hweb ../../tl/cc/dummy.ch
|
||||
make -C ../../tl/cc $*.h
|
||||
|
||||
../../tl/cc/%.o: ../../tl/cc/%.cpp $(tlhsource)
|
||||
make -C ../../tl/cc $*.o
|
||||
|
||||
../../integ/cc/dummy.ch:
|
||||
make -C ../../integ/cc dummy.ch
|
||||
|
||||
../../integ/cc/%.cpp: ../../integ/cc/%.cweb ../../integ/cc/dummy.ch
|
||||
make -C ../../integ/cc $*.cpp
|
||||
|
||||
../../integ/cc/%.h: ../../integ/cc/%.hweb ../../integ/cc/dummy.ch
|
||||
make -C ../../integ/cc $*.h
|
||||
|
||||
../../integ/cc/%.o: ../../integ/cc/%.cpp $(integhsource) $(tlhsource)
|
||||
make -C ../../integ/cc $*.o
|
||||
|
||||
|
||||
../../sylv/cc/%.o: ../../sylv/cc/%.cpp $(sylvhsource)
|
||||
make -C ../../sylv/cc $*.o
|
||||
|
||||
../../kord/dummy.ch:
|
||||
make -C ../../kord dummy.ch
|
||||
|
||||
../../kord/%.cpp: ../../kord/%.cweb ../../kord/dummy.ch
|
||||
make -C ../../kord $*.cpp
|
||||
|
||||
../../kord/%.h: ../../kord/%.hweb ../../kord/dummy.ch
|
||||
make -C ../../kord $*.h
|
||||
|
||||
../../kord/%.o: ../../kord/%.cpp $(tlhsource) $(kordhsource) $(integhsource)
|
||||
make -C ../../kord $*.o
|
||||
|
||||
|
||||
dynarelib.a: $(tlhwebsource) $(tlcwebsoure) $(tlhsource) $(tlcppsource) \
|
||||
$(integhwebsource) $(integcwebsoure) $(integhsource) $(integcppsource) \
|
||||
$(kordhwebsource) $(kordcwebsoure) $(kordhsource) $(kordcppsource) \
|
||||
$(sylvhsource) $(sylvcppsource) \
|
||||
$(kordobjects) $(tlobjects) $(integobjects) $(sylvobjects)
|
||||
ar cr dynarelib.a $(kordobjects) $(tlobjects) $(integobjects) $(sylvobjects)
|
||||
ranlib dynarelib.a
|
||||
|
||||
|
||||
# to compile mex objects for Windows do:
|
||||
# 1. install gnumex
|
||||
# 2. create mexopts.bat via gnumex in this directory, specify MinGW compilation, and dll output
|
||||
# 3. in created mexopts.bat add "-llapack -lblas -lg2c -lstdc++
|
||||
# -lpthreadGC2" in the beginning of GM_ADD_LIBS
|
||||
# 4. in created mexopts.bat add "-fexceptions -DPOSIX_THREADS" to COMPFLAGS in the end
|
||||
# 5. in created mexopts.bat change suffix mexw32 to dll in NAME_OUTPUT
|
||||
# 6. pray it works
|
||||
# OR: just use the mexopt.bat from the repository and check MATLAB
|
||||
# root directory and gnumex root directories
|
||||
%_.$(MEX_SUFFIX): %.cpp $(tlhwebsource) $(tlcwebsoure) $(tlhsource) $(tlcppsource) \
|
||||
$(integhwebsource) $(integcwebsoure) $(integhsource) $(integcppsource) \
|
||||
$(kordhwebsource) $(kordcwebsoure) $(kordhsource) $(kordcppsource) \
|
||||
$(sylvhsource) $(sylvcppsource) \
|
||||
dynarelib.a
|
||||
ifeq ($(OS),Windows_NT)
|
||||
mex.bat -I../../sylv/cc -I../../tl/cc -I../../kord -I../../integ/cc $*.cpp dynarelib.a
|
||||
else
|
||||
mex -I../../sylv/cc/ -I../../tl/cc -I../../kord -I../../integ/cc $*.cpp CFLAGS='$$CFLAGS -fexceptions' dynarelib.a -lmwlapack -lmwblas
|
||||
endif
|
||||
mv $*.$(MEX_SUFFIX) $*_.$(MEX_SUFFIX)
|
||||
|
||||
clear:
|
||||
rm -f dynarelib.a
|
||||
rm -f *.mexglx
|
||||
rm -f *.dll
|
||||
make -C ../../tl/testing clear
|
||||
make -C ../../tl/cc clear
|
||||
make -C ../../integ/testing clear
|
||||
make -C ../../integ/cc clear
|
||||
make -C ../../sylv/testing clear
|
||||
make -C ../../sylv/cc clear
|
||||
make -C ../../kord clear
|
|
@ -0,0 +1,133 @@
|
|||
// $Id: dynare_simul.cpp 1488 2007-12-19 14:16:30Z kamenik $
|
||||
|
||||
// Copyright 2005, Ondra Kamenik
|
||||
|
||||
// This is the mexFunction providing interface to
|
||||
// DecisionRule<>::simulate(). It takes the following input
|
||||
// parameters:
|
||||
// order the order of approximation, needs order+1 derivatives
|
||||
// nstat
|
||||
// npred
|
||||
// nboth
|
||||
// nforw
|
||||
// nexog
|
||||
// ystart starting value (full vector of endogenous)
|
||||
// shocks matrix of shocks (nexog x number of period)
|
||||
// vcov covariance matrix of shocks (nexog x nexog)
|
||||
// seed integer seed
|
||||
// ysteady full vector of decision rule's steady
|
||||
// ... order+1 matrices of derivatives
|
||||
|
||||
// output:
|
||||
// res simulated results
|
||||
|
||||
#include "mex.h"
|
||||
|
||||
#include "decision_rule.h"
|
||||
#include "fs_tensor.h"
|
||||
#include "SylvException.h"
|
||||
|
||||
extern "C" {
|
||||
void mexFunction(int nhls, mxArray* plhs[],
|
||||
int nhrs, const mxArray* prhs[])
|
||||
{
|
||||
if (nhrs < 12)
|
||||
mexErrMsgTxt("Must have at least 12 input parameters.\n");
|
||||
if (nhls != 1)
|
||||
mexErrMsgTxt("Must have exactly 1 output parameter.\n");
|
||||
|
||||
int order = (int)mxGetScalar(prhs[0]);
|
||||
if (nhrs != 12 + order) {
|
||||
mexErrMsgTxt("Must have exactly 11+order input parameters.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
int nstat = (int)mxGetScalar(prhs[1]);
|
||||
int npred = (int)mxGetScalar(prhs[2]);
|
||||
int nboth = (int)mxGetScalar(prhs[3]);
|
||||
int nforw = (int)mxGetScalar(prhs[4]);
|
||||
int nexog = (int)mxGetScalar(prhs[5]);
|
||||
|
||||
const mxArray* const ystart = prhs[6];
|
||||
const mxArray* const shocks = prhs[7];
|
||||
const mxArray* const vcov = prhs[8];
|
||||
int seed = (int)mxGetScalar(prhs[9]);
|
||||
const mxArray* const ysteady = prhs[10];
|
||||
const int* const ystart_dim = mxGetDimensions(ystart);
|
||||
const int* const shocks_dim = mxGetDimensions(shocks);
|
||||
const int* const vcov_dim = mxGetDimensions(vcov);
|
||||
const int* const ysteady_dim = mxGetDimensions(ysteady);
|
||||
|
||||
int ny = nstat + npred + nboth + nforw;
|
||||
if (ny != ystart_dim[0])
|
||||
mexErrMsgTxt("ystart has wrong number of rows.\n");
|
||||
if (1 != ystart_dim[1])
|
||||
mexErrMsgTxt("ystart has wrong number of cols.\n");
|
||||
int nper = shocks_dim[1];
|
||||
if (nexog != shocks_dim[0])
|
||||
mexErrMsgTxt("shocks has a wrong number of rows.\n");
|
||||
if (nexog != vcov_dim[0])
|
||||
mexErrMsgTxt("vcov has a wrong number of rows.\n");
|
||||
if (nexog != vcov_dim[1])
|
||||
mexErrMsgTxt("vcov has a wrong number of cols.\n");
|
||||
if (ny != ysteady_dim[0])
|
||||
mexErrMsgTxt("ysteady has wrong number of rows.\n");
|
||||
if (1 != ysteady_dim[1])
|
||||
mexErrMsgTxt("ysteady has wrong number of cols.\n");
|
||||
|
||||
mxArray* res = mxCreateDoubleMatrix(ny, nper, mxREAL);
|
||||
|
||||
try {
|
||||
// initialize tensor library
|
||||
tls.init(order, npred+nboth+nexog);
|
||||
|
||||
// form the polynomial
|
||||
UTensorPolynomial pol(ny, npred+nboth+nexog);
|
||||
for (int dim = 0; dim <= order; dim++) {
|
||||
const mxArray* gk = prhs[11+dim];
|
||||
const int* const gk_dim = mxGetDimensions(gk);
|
||||
FFSTensor ft(ny, npred+nboth+nexog, dim);
|
||||
if (ft.ncols() != gk_dim[1]) {
|
||||
char buf[1000];
|
||||
sprintf(buf, "Wrong number of columns for folded tensor: got %d but I want %d\n",
|
||||
gk_dim[1], ft.ncols());
|
||||
mexErrMsgTxt(buf);
|
||||
}
|
||||
if (ft.nrows() != gk_dim[0]) {
|
||||
char buf[1000];
|
||||
sprintf(buf, "Wrong number of rows for folded tensor: got %d but I want %d\n",
|
||||
gk_dim[0], ft.nrows());
|
||||
mexErrMsgTxt(buf);
|
||||
}
|
||||
ft.zeros();
|
||||
ConstTwoDMatrix gk_mat(ft.nrows(), ft.ncols(), mxGetPr(gk));
|
||||
ft.add(1.0, gk_mat);
|
||||
UFSTensor* ut = new UFSTensor(ft);
|
||||
pol.insert(ut);
|
||||
}
|
||||
// form the decision rule
|
||||
UnfoldDecisionRule
|
||||
dr(pol, PartitionY(nstat, npred, nboth, nforw),
|
||||
nexog, ConstVector(mxGetPr(ysteady), ny));
|
||||
// form the shock realization
|
||||
TwoDMatrix shocks_mat(nexog, nper, (const double*)mxGetPr(shocks));
|
||||
TwoDMatrix vcov_mat(nexog, nexog, (const double*)mxGetPr(vcov));
|
||||
GenShockRealization sr(vcov_mat, shocks_mat, seed);
|
||||
// simulate and copy the results
|
||||
Vector ystart_vec((const double*)mxGetPr(ystart), ny);
|
||||
TwoDMatrix* res_mat =
|
||||
dr.simulate(DecisionRule::horner, nper,
|
||||
ystart_vec, sr);
|
||||
TwoDMatrix res_tmp_mat(ny, nper, mxGetPr(res));
|
||||
res_tmp_mat = (const TwoDMatrix&)(*res_mat);
|
||||
delete res_mat;
|
||||
plhs[0] = res;
|
||||
} catch (const KordException& e) {
|
||||
mexErrMsgTxt("Caugth Kord exception.");
|
||||
} catch (const TLException& e) {
|
||||
mexErrMsgTxt("Caugth TL exception.");
|
||||
} catch (SylvException& e) {
|
||||
mexErrMsgTxt("Caught Sylv exception.");
|
||||
}
|
||||
}
|
||||
};
|
|
@ -0,0 +1,160 @@
|
|||
%
|
||||
% SYNOPSIS
|
||||
%
|
||||
% r = dynare_simul(name, shocks)
|
||||
% r = dynare_simul(name, prefix, shocks)
|
||||
% r = dynare_simul(name, shocks, start)
|
||||
% r = dynare_simul(name, prefix, shocks, start)
|
||||
%
|
||||
% name name of MAT-file produced by dynare++
|
||||
% prefix prefix of variables in the MAT-file
|
||||
% shocks matrix of shocks
|
||||
% start zero period value
|
||||
%
|
||||
% SEMANTICS
|
||||
%
|
||||
% The command reads a decision rule from the MAT-file having the given
|
||||
% prefix. Then it starts simulating the decision rule with zero time value
|
||||
% equal to the given start. It uses the given shocks for the simulation. If
|
||||
% the start is not given, the state about which the decision rule is
|
||||
% centralized is taken (called fix point, or stochastic steady state, take
|
||||
% your pick).
|
||||
%
|
||||
% prefix Use the prefix with which you called dynare++, the default
|
||||
% prefix in dynare++ is 'dyn'.
|
||||
% shocks Number of rows must be a number of exogenous shocks,
|
||||
% number of columns gives the number of simulated
|
||||
% periods. NaNs and Infs in the matrix are substitued by
|
||||
% draws from the normal distribution using the covariance
|
||||
% matrix given in the model file.
|
||||
% start Vector of endogenous variables in the ordering given by
|
||||
% <prefix>_vars.
|
||||
%
|
||||
% Seed for random generator is derived from calling rand(1,1). Therefore,
|
||||
% seeding can be controlled with rand('state') and rand('state',some_seed).
|
||||
%
|
||||
% EXAMPLES
|
||||
%
|
||||
% All examples suppose that the prefix is 'dyn' and that your_model.mat
|
||||
% has been loaded into Matlab.
|
||||
%
|
||||
% 1. response to permanent negative shock to the third exo var EPS3 for
|
||||
% 100 periods
|
||||
%
|
||||
% shocks = zeros(4,100); % 4 exogenous variables in the model
|
||||
% shocks(dyn_i_EPS3,:) = -0.1; % the permanent shock to EPS3
|
||||
% r = dynare_simul('your_model.mat',shocks);
|
||||
%
|
||||
% 2. one stochastic simulation for 100 periods
|
||||
%
|
||||
% shocks = zeros(4,100)./0; % put NaNs everywhere
|
||||
% r = dynare_simul('your_model.mat',shocks);
|
||||
%
|
||||
% 3. one stochastic simulation starting at 75% undercapitalized economy
|
||||
%
|
||||
% shocks = zeros(4,100)./0; % put NaNs everywhere
|
||||
% ystart = dyn_ss; % get copy of DR fix point
|
||||
% ystart(dyn_i_K) = 0.75*dyn_ss(dyn_i_K); % scale down the capital
|
||||
% r = dynare_simul('your_model.mat',shocks,ystart);
|
||||
%
|
||||
%
|
||||
% SEE ALSO
|
||||
%
|
||||
% "DSGE Models with Dynare++. A Tutorial.", Ondra Kamenik, 2005
|
||||
|
||||
function r = dynare_simul(varargin)
|
||||
|
||||
% get the file name and load data
|
||||
fname = varargin{1};
|
||||
eval(['load ' fname]);
|
||||
|
||||
% set prefix, shocks, ystart
|
||||
if ischar(varargin{2})
|
||||
prefix = varargin{2};
|
||||
if length(varargin) == 3
|
||||
shocks = varargin{3};
|
||||
ystart = NaN;
|
||||
elseif length(varargin) == 4
|
||||
shocks = varargin{3};
|
||||
ystart = varargin{4};
|
||||
else
|
||||
error('Wrong number of parameters.');
|
||||
end
|
||||
else
|
||||
prefix = 'dyn';
|
||||
if length(varargin) == 2
|
||||
shocks = varargin{2};
|
||||
ystart = NaN;
|
||||
elseif length(varargin) == 3
|
||||
shocks = varargin{2};
|
||||
ystart = varargin{3};
|
||||
else
|
||||
error('Wrong number of parameters.');
|
||||
end
|
||||
end
|
||||
|
||||
% load all needed variables but prefix_g_*
|
||||
if (exist([prefix '_nstat']))
|
||||
nstat = eval([prefix '_nstat']);
|
||||
else
|
||||
error(['Could not find variable ' prefix '_nstat in workspace']);
|
||||
end
|
||||
if (exist([prefix '_npred']))
|
||||
npred = eval([prefix '_npred']);
|
||||
else
|
||||
error(['Could not find variable ' prefix '_npred in workspace']);
|
||||
end
|
||||
if (exist([prefix '_nboth']))
|
||||
nboth = eval([prefix '_nboth']);
|
||||
else
|
||||
error(['Could not find variable ' prefix '_nboth in workspace']);
|
||||
end
|
||||
if (exist([prefix '_nforw']))
|
||||
nforw = eval([prefix '_nforw']);
|
||||
else
|
||||
error(['Could not find variable ' prefix '_nforw in workspace']);
|
||||
end
|
||||
if (exist([prefix '_ss']))
|
||||
ss = eval([prefix '_ss']);
|
||||
else
|
||||
error(['Could not find variable ' prefix '_ss in workspace']);
|
||||
end
|
||||
if (exist([prefix '_vcov_exo']))
|
||||
vcov_exo = eval([prefix '_vcov_exo']);
|
||||
else
|
||||
error(['Could not find variable ' prefix '_vcov_exo in workspace']);
|
||||
end
|
||||
nexog = size(vcov_exo,1);
|
||||
|
||||
if isnan(ystart)
|
||||
ystart = ss;
|
||||
end
|
||||
|
||||
% newer version of dynare++ doesn't return prefix_g_0, we make it here if
|
||||
% it does not exist in workspace
|
||||
g_zero = [prefix '_g_0'];
|
||||
if (~ exist(g_zero))
|
||||
eval([ g_zero '= zeros(nstat+npred+nboth+nforw,1);']);
|
||||
end
|
||||
|
||||
% make derstr a string of comma seperated existing prefix_g_*
|
||||
derstr = [',' g_zero];
|
||||
order = 1;
|
||||
cont = 1;
|
||||
while cont == 1
|
||||
g_ord = [prefix '_g_' num2str(order)];
|
||||
if (exist(g_ord))
|
||||
derstr = [derstr ',' g_ord];
|
||||
order = order + 1;
|
||||
else
|
||||
cont = 0;
|
||||
end
|
||||
end
|
||||
|
||||
% set seed
|
||||
seed = ceil(10000*rand(1,1));
|
||||
|
||||
% call dynare_simul_
|
||||
command = ['r=dynare_simul_(' num2str(order-1) ',nstat,npred,nboth,nforw,' ...
|
||||
'nexog,ystart,shocks,vcov_exo,seed,ss' derstr ');'];
|
||||
eval(command);
|
|
@ -0,0 +1,65 @@
|
|||
@echo off
|
||||
rem C:\ondra\work\dpp\dynare++\extern\matlab\mexopts.bat
|
||||
rem Generated by gnumex.m script in c:\fs\gnumex
|
||||
rem gnumex version: 2.01
|
||||
rem Compile and link options used for building MEX etc files with
|
||||
rem the Mingw/Cygwin tools. Options here are:
|
||||
rem Gnumex, version 2.01
|
||||
rem MinGW linking
|
||||
rem Mex (*.dll) creation
|
||||
rem Libraries regenerated now
|
||||
rem Language: C / C++
|
||||
rem Optimization level: -O3 (full optimization)
|
||||
rem Matlab version 7.7
|
||||
rem
|
||||
set MATLAB=C:\PROGRA~1\MATLAB\R2008b
|
||||
set GM_PERLPATH=C:\PROGRA~1\MATLAB\R2008b\sys\perl\win32\bin\perl.exe
|
||||
set GM_UTIL_PATH=c:\fs\gnumex
|
||||
set PATH=c:\fs\mingw\bin;%PATH%
|
||||
set PATH=%PATH%;C:\Cygwin\usr\local\gfortran\libexec\gcc\i686-pc-cygwin\4.3.0
|
||||
set LIBRARY_PATH=c:\fs\mingw\lib
|
||||
set G95_LIBRARY_PATH=c:\fs\mingw\lib
|
||||
rem
|
||||
rem precompiled library directory and library files
|
||||
set GM_QLIB_NAME=C:\\ondra\work\dpp\dynare++\extern\matlab\gnumex
|
||||
rem
|
||||
rem directory for .def-files
|
||||
set GM_DEF_PATH=C:\\ondra\work\dpp\dynare++\extern\matlab\gnumex
|
||||
rem
|
||||
rem Type of file to compile (mex or engine)
|
||||
set GM_MEXTYPE=mex
|
||||
rem
|
||||
rem Language for compilation
|
||||
set GM_MEXLANG=c
|
||||
rem
|
||||
rem File for exporting mexFunction symbol
|
||||
set GM_MEXDEF=C:\\ondra\work\dpp\dynare++\extern\matlab\gnumex\mex.def
|
||||
rem
|
||||
set GM_ADD_LIBS=-llapack -lblas -lg2c -lstdc++ -lpthreadGC2 -llibmx -llibmex -llibmat
|
||||
rem
|
||||
rem compiler options; add compiler flags to compflags as desired
|
||||
set NAME_OBJECT=-o
|
||||
set COMPILER=gcc
|
||||
set COMPFLAGS=-c -DMATLAB_MEX_FILE -fexceptions -DPOSIX_THREADS
|
||||
set OPTIMFLAGS=-O3
|
||||
set DEBUGFLAGS=-g
|
||||
set CPPCOMPFLAGS=%COMPFLAGS% -x c++
|
||||
set CPPOPTIMFLAGS=%OPTIMFLAGS%
|
||||
set CPPDEBUGFLAGS=%DEBUGFLAGS%
|
||||
rem
|
||||
rem NB Library creation commands occur in linker scripts
|
||||
rem
|
||||
rem Linker parameters
|
||||
set LINKER=%GM_PERLPATH% %GM_UTIL_PATH%\linkmex.pl
|
||||
set LINKFLAGS=
|
||||
set CPPLINKFLAGS=GM_ISCPP
|
||||
set LINKOPTIMFLAGS=-s
|
||||
set LINKDEBUGFLAGS=-g -Wl,--image-base,0x28000000\n
|
||||
set LINKFLAGS= -LC:\\ondra\work\dpp\dynare++\extern\matlab\gnumex
|
||||
set LINK_FILE=
|
||||
set LINK_LIB=
|
||||
set NAME_OUTPUT=-o %OUTDIR%%MEX_NAME%.dll
|
||||
rem
|
||||
rem Resource compiler parameters
|
||||
set RC_COMPILER=%GM_PERLPATH% %GM_UTIL_PATH%\rccompile.pl -o %OUTDIR%mexversion.res
|
||||
set RC_LINKER=
|
|
@ -0,0 +1,67 @@
|
|||
# $Id: Makefile 2344 2009-02-09 20:36:08Z michel $
|
||||
# Copyright 2005, Ondra Kamenik
|
||||
|
||||
include ../../Makefile.include
|
||||
|
||||
CC_FLAGS := -I../../sylv/cc -I../../tl/cc
|
||||
|
||||
ifeq ($(DEBUG),yes)
|
||||
CC_FLAGS := $(CC_FLAGS) -g -DTL_DEBUG=2
|
||||
else
|
||||
CC_FLAGS := $(CC_FLAGS) -O2 -DPOSIX_THREADS
|
||||
endif
|
||||
|
||||
ifeq ($(OS),Windows_NT)
|
||||
CC_FLAGS := -mno-cygwin -mthreads $(CC_FLAGS)
|
||||
LD_LIBS := -mno-cygwin -mthreads $(LD_LIBS) -lpthreadGC1
|
||||
else
|
||||
LD_LIBS := $(LD_LIBS) -lpthread
|
||||
endif
|
||||
|
||||
|
||||
matrix_interface := GeneralMatrix Vector SylvException
|
||||
matobjs := $(patsubst %, ../../sylv/cc/%.o, $(matrix_interface))
|
||||
cwebsource := $(wildcard *.cweb)
|
||||
cppsource := $(patsubst %.cweb,%.cpp,$(cwebsource))
|
||||
objects := $(patsubst %.cweb,%.o,$(cwebsource))
|
||||
hwebsource := $(wildcard *.hweb)
|
||||
hsource := $(patsubst %.hweb,%.h,$(hwebsource))
|
||||
|
||||
tlcwebsource := $(wildcard ../../tl/cc/*.cweb)
|
||||
tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
|
||||
tlhwebsource := $(wildcard ../../tl/cc/*.hweb)
|
||||
tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
|
||||
|
||||
|
||||
dummy.ch:
|
||||
touch dummy.ch
|
||||
|
||||
../../tl/cc/dummy.ch:
|
||||
make -C ../../tl/cc dummy.ch
|
||||
|
||||
../../tl/cc/%.h: ../../tl/cc/%.hweb ../../tl/cc/dummy.ch
|
||||
make -C ../../tl/cc $*.h
|
||||
|
||||
%.cpp: %.cweb dummy.ch
|
||||
ctangle -bhp $*.cweb dummy.ch $*.cpp
|
||||
|
||||
%.h: %.hweb dummy.ch
|
||||
ctangle -bhp $*.hweb dummy.ch $*.h
|
||||
|
||||
%.o : %.cpp $(hsource) $(tlhsource)
|
||||
$(CC) $(CC_FLAGS) $(EXTERN_DEFS) -c $*.cpp
|
||||
|
||||
doc: main.web $(hwebsource) $(cwebsource)
|
||||
cweave -bhp main.web
|
||||
pdftex main
|
||||
mv main.pdf integ.pdf
|
||||
|
||||
all: $(objects) $(cppsource) $(hsource)
|
||||
|
||||
|
||||
clear:
|
||||
rm -f $(cppsource)
|
||||
rm -f $(hsource)
|
||||
rm -f *.o
|
||||
rm -f dummy.ch
|
||||
rm -f *~
|
|
@ -0,0 +1,41 @@
|
|||
@q $Id: main.web 2333 2009-01-14 10:32:55Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
\let\ifpdf\relax
|
||||
\input eplain
|
||||
\def\title{{\mainfont Numerical Integration Module}}
|
||||
|
||||
|
||||
@i c++lib.w
|
||||
@s Vector int
|
||||
@s ConstVector int
|
||||
@s IntSequence int
|
||||
@s GeneralMatrix int
|
||||
@s THREAD int
|
||||
@s THREAD_GROUP int
|
||||
@s SYNCHRO int
|
||||
|
||||
\titletrue
|
||||
\null\vfill
|
||||
\centerline{\titlefont Numerical Integration Module}
|
||||
\vfill\vfill
|
||||
Copyright \copyright\ 2005 by Ondra Kamenik
|
||||
|
||||
\penalty-10000
|
||||
|
||||
@i vector_function.hweb
|
||||
@i vector_function.cweb
|
||||
|
||||
@i quadrature.hweb
|
||||
@i quadrature.cweb
|
||||
|
||||
@i product.hweb
|
||||
@i product.cweb
|
||||
|
||||
@i smolyak.hweb
|
||||
@i smolyak.cweb
|
||||
|
||||
@i quasi_mcarlo.hweb
|
||||
@i quasi_mcarlo.cweb
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,213 @@
|
|||
@q $Id: product.cweb 431 2005-08-16 15:41:01Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@ This is {\tt product.cpp} file.
|
||||
|
||||
@c
|
||||
#include "product.h"
|
||||
#include "symmetry.h"
|
||||
|
||||
@<|prodpit| empty constructor@>;
|
||||
@<|prodpit| regular constructor@>;
|
||||
@<|prodpit| copy constructor@>;
|
||||
@<|prodpit| destructor@>;
|
||||
@<|prodpit::operator==| code@>;
|
||||
@<|prodpit::operator=| code@>;
|
||||
@<|prodpit::operator++| code@>;
|
||||
@<|prodpit::setPointAndWeight| code@>;
|
||||
@<|prodpit::print| code@>;
|
||||
@<|ProductQuadrature| constructor@>;
|
||||
@<|ProductQuadrature::begin| code@>;
|
||||
@<|ProductQuadrature::designLevelForEvals| code@>;
|
||||
|
||||
@
|
||||
@<|prodpit| empty constructor@>=
|
||||
prodpit::prodpit()
|
||||
: prodq(NULL), level(0), npoints(0), jseq(NULL),
|
||||
end_flag(true), sig(NULL), p(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
@ This constructs a product iterator corresponding to index $(j0,0\ldots,0)$.
|
||||
@<|prodpit| regular constructor@>=
|
||||
prodpit::prodpit(const ProductQuadrature& q, int j0, int l)
|
||||
: prodq(&q), level(l), npoints(q.uquad.numPoints(l)), jseq(new IntSequence(q.dimen(), 0)),
|
||||
end_flag(false), sig(new ParameterSignal(q.dimen())), p(new Vector(q.dimen()))
|
||||
{
|
||||
if (j0 < npoints) {
|
||||
(*jseq)[0] = j0;
|
||||
setPointAndWeight();
|
||||
} else {
|
||||
end_flag = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ Copy constructor, clear.
|
||||
@<|prodpit| copy constructor@>=
|
||||
prodpit::prodpit(const prodpit& ppit)
|
||||
: prodq(ppit.prodq), level(ppit.level), npoints(ppit.npoints),
|
||||
end_flag(ppit.end_flag), w(ppit.w)
|
||||
{
|
||||
if (ppit.jseq)
|
||||
jseq = new IntSequence(*(ppit.jseq));
|
||||
else
|
||||
jseq = NULL;
|
||||
if (ppit.sig)
|
||||
sig = new ParameterSignal(*(ppit.sig));
|
||||
else
|
||||
sig = NULL;
|
||||
if (ppit.p)
|
||||
p = new Vector(*(ppit.p));
|
||||
else
|
||||
p = NULL;
|
||||
}
|
||||
|
||||
@
|
||||
@<|prodpit| destructor@>=
|
||||
prodpit::~prodpit()
|
||||
{
|
||||
if (jseq)
|
||||
delete jseq;
|
||||
if (sig)
|
||||
delete sig;
|
||||
if (p)
|
||||
delete p;
|
||||
}
|
||||
|
||||
@
|
||||
@<|prodpit::operator==| code@>=
|
||||
bool prodpit::operator==(const prodpit& ppit) const
|
||||
{
|
||||
bool ret = true;
|
||||
ret = ret & prodq == ppit.prodq;
|
||||
ret = ret & end_flag == ppit.end_flag;
|
||||
ret = ret & ((jseq==NULL && ppit.jseq==NULL) ||
|
||||
(jseq!=NULL && ppit.jseq!=NULL && *jseq == *(ppit.jseq)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@
|
||||
@<|prodpit::operator=| code@>=
|
||||
const prodpit& prodpit::operator=(const prodpit& ppit)
|
||||
{
|
||||
prodq = ppit.prodq;
|
||||
end_flag = ppit.end_flag;
|
||||
w = ppit.w;
|
||||
|
||||
if (jseq)
|
||||
delete jseq;
|
||||
if (sig)
|
||||
delete sig;
|
||||
if (p)
|
||||
delete p;
|
||||
|
||||
if (ppit.jseq)
|
||||
jseq = new IntSequence(*(ppit.jseq));
|
||||
else
|
||||
jseq = NULL;
|
||||
if (ppit.sig)
|
||||
sig = new ParameterSignal(*(ppit.sig));
|
||||
else
|
||||
sig = NULL;
|
||||
if (ppit.p)
|
||||
p = new Vector(*(ppit.p));
|
||||
else
|
||||
p = NULL;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
@
|
||||
@<|prodpit::operator++| code@>=
|
||||
prodpit& prodpit::operator++()
|
||||
{
|
||||
// todo: throw if |prodq==NULL| or |jseq==NULL| or |sig==NULL| or |end_flag==true|
|
||||
int i = prodq->dimen()-1;
|
||||
(*jseq)[i]++;
|
||||
while (i >= 0 && (*jseq)[i] == npoints) {
|
||||
(*jseq)[i] = 0;
|
||||
i--;
|
||||
if (i >= 0)
|
||||
(*jseq)[i]++;
|
||||
}
|
||||
sig->signalAfter(std::max(i,0));
|
||||
|
||||
if (i == -1)
|
||||
end_flag = true;
|
||||
|
||||
if (! end_flag)
|
||||
setPointAndWeight();
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
@ This calculates the weight and sets point coordinates from the indices.
|
||||
@<|prodpit::setPointAndWeight| code@>=
|
||||
void prodpit::setPointAndWeight()
|
||||
{
|
||||
// todo: raise if |prodq==NULL| or |jseq==NULL| or |sig==NULL| or
|
||||
// |p==NULL| or |end_flag==true|
|
||||
w = 1.0;
|
||||
for (int i = 0; i < prodq->dimen(); i++) {
|
||||
(*p)[i] = (prodq->uquad).point(level, (*jseq)[i]);
|
||||
w* = (prodq->uquad).weight(level, (*jseq)[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ Debug print.
|
||||
@<|prodpit::print| code@>=
|
||||
void prodpit::print() const
|
||||
{
|
||||
printf("j=[");
|
||||
for (int i = 0; i < prodq->dimen(); i++)
|
||||
printf("%2d ", (*jseq)[i]);
|
||||
printf("] %+4.3f*(",w);
|
||||
for (int i = 0; i < prodq->dimen()-1; i++)
|
||||
printf("%+4.3f ", (*p)[i]);
|
||||
printf("%+4.3f)\n",(*p)[prodq->dimen()-1]);
|
||||
}
|
||||
|
||||
@
|
||||
@<|ProductQuadrature| constructor@>=
|
||||
ProductQuadrature::ProductQuadrature(int d, const OneDQuadrature& uq)
|
||||
: QuadratureImpl<prodpit>(d), uquad(uq)
|
||||
{
|
||||
// todo: check |d>=1|
|
||||
}
|
||||
|
||||
@ This calls |prodpit| constructor to return an iterator which points
|
||||
approximatelly at |ti|-th portion out of |tn| portions. First we find
|
||||
out how many points are in the level, and then construct an interator
|
||||
$(j0,0,\ldots,0)$ where $j0=$|ti*npoints/tn|.
|
||||
|
||||
@<|ProductQuadrature::begin| code@>=
|
||||
prodpit ProductQuadrature::begin(int ti, int tn, int l) const
|
||||
{
|
||||
// todo: raise is |l<dimen()|
|
||||
// todo: check |l<=uquad.numLevels()|
|
||||
int npoints = uquad.numPoints(l);
|
||||
return prodpit(*this, ti*npoints/tn, l);
|
||||
}
|
||||
|
||||
@ This just starts at the first level and goes to a higher level as
|
||||
long as a number of evaluations (which is $n_k^d$ for $k$ being the
|
||||
level) is less than the given number of evaluations.
|
||||
|
||||
@<|ProductQuadrature::designLevelForEvals| code@>=
|
||||
void ProductQuadrature::designLevelForEvals(int max_evals, int& lev, int& evals) const
|
||||
{
|
||||
int last_evals;
|
||||
evals = 1;
|
||||
lev = 1;
|
||||
do {
|
||||
lev++;
|
||||
last_evals = evals;
|
||||
evals = numEvals(lev);
|
||||
} while (lev < uquad.numLevels()-2 && evals < max_evals);
|
||||
lev--;
|
||||
evals = last_evals;
|
||||
|
||||
}
|
||||
|
||||
@ End of {\tt product.cpp} file
|
|
@ -0,0 +1,107 @@
|
|||
@q $Id: product.hweb 431 2005-08-16 15:41:01Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@*2 Product quadrature. This is {\tt product.h} file
|
||||
|
||||
This file defines a product multidimensional quadrature. If $Q_k$
|
||||
denotes the one dimensional quadrature, then the product quadrature
|
||||
$Q$ of $k$ level and dimension $d$ takes the form
|
||||
$$Qf=\sum_{i_1=1}^{n_k}\ldots\sum_{i_d=1}^{n^k}w_{i_1}\cdot\ldots\cdot w_{i_d}
|
||||
f(x_{i_1},\ldots,x_{i_d})$$
|
||||
which can be written in terms of the one dimensional quadrature $Q_k$ as
|
||||
$$Qf=(Q_k\otimes\ldots\otimes Q_k)f$$
|
||||
|
||||
Here we define the product quadrature iterator |prodpit| and plug it
|
||||
into |QuadratureImpl| to obtains |ProductQuadrature|.
|
||||
|
||||
@s prodpit int
|
||||
@s ProductQuadrature int
|
||||
|
||||
@c
|
||||
#ifndef PRODUCT_H
|
||||
#define PRODUCT_H
|
||||
|
||||
#include "int_sequence.h"
|
||||
#include "vector_function.h"
|
||||
#include "quadrature.h"
|
||||
|
||||
@<|prodpit| class declaration@>;
|
||||
@<|ProductQuadrature| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
@ This defines a product point iterator. We have to maintain the
|
||||
following: a pointer to product quadrature in order to know the
|
||||
dimension and the underlying one dimensional quadrature, then level,
|
||||
number of points in the level, integer sequence of indices, signal,
|
||||
the coordinates of the point and the weight.
|
||||
|
||||
The point indices, signal, and point coordinates are implmented as
|
||||
pointers in order to allow for empty constructor.
|
||||
|
||||
The constructor |prodpit(const ProductQuadrature& q, int j0, int l)|
|
||||
constructs an iterator pointing to $(j0,0,\ldots,0)$, which is used by
|
||||
|begin| dictated by |QuadratureImpl|.
|
||||
|
||||
@<|prodpit| class declaration@>=
|
||||
class ProductQuadrature;
|
||||
|
||||
class prodpit {
|
||||
protected:@;
|
||||
const ProductQuadrature* prodq;
|
||||
int level;
|
||||
int npoints;
|
||||
IntSequence* jseq;
|
||||
bool end_flag;
|
||||
ParameterSignal* sig;
|
||||
Vector* p;
|
||||
double w;
|
||||
public:@;
|
||||
prodpit();
|
||||
prodpit(const ProductQuadrature& q, int j0, int l);
|
||||
prodpit(const prodpit& ppit);
|
||||
~prodpit();
|
||||
bool operator==(const prodpit& ppit) const;
|
||||
bool operator!=(const prodpit& ppit) const
|
||||
{@+ return ! operator==(ppit);@+}
|
||||
const prodpit& operator=(const prodpit& spit);
|
||||
prodpit& operator++();
|
||||
const ParameterSignal& signal() const
|
||||
{@+ return *sig;@+}
|
||||
const Vector& point() const
|
||||
{@+ return *p;@+}
|
||||
double weight() const
|
||||
{@+ return w;@+}
|
||||
void print() const;
|
||||
protected:@;
|
||||
void setPointAndWeight();
|
||||
};
|
||||
|
||||
@ The product quadrature is just |QuadratureImpl| with the product
|
||||
iterator plugged in. The object is constructed by just giving the
|
||||
underlying one dimensional quadrature, and the dimension. The only
|
||||
extra method is |designLevelForEvals| which for the given maximum
|
||||
number of evaluations (and dimension and underlying quadrature from
|
||||
the object) returns a maximum level yeilding number of evaluations
|
||||
less than the given number.
|
||||
|
||||
@<|ProductQuadrature| class declaration@>=
|
||||
class ProductQuadrature : public QuadratureImpl<prodpit> {
|
||||
friend class prodpit;
|
||||
const OneDQuadrature& uquad;
|
||||
public:@;
|
||||
ProductQuadrature(int d, const OneDQuadrature& uq);
|
||||
virtual ~ProductQuadrature()@+ {}
|
||||
int numEvals(int l) const
|
||||
{
|
||||
int res = 1;
|
||||
for (int i = 0; i < dimen(); i++)
|
||||
res *= uquad.numPoints(l);
|
||||
return res;
|
||||
}
|
||||
void designLevelForEvals(int max_eval, int& lev, int& evals) const;
|
||||
protected:@;
|
||||
prodpit begin(int ti, int tn, int level) const;
|
||||
};
|
||||
|
||||
@ End of {\tt product.h} file
|
|
@ -0,0 +1,63 @@
|
|||
@q $Id: quadrature.cweb 431 2005-08-16 15:41:01Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@ This is {\tt quadrature.cpp} file.
|
||||
|
||||
@c
|
||||
#include "quadrature.h"
|
||||
#include "precalc_quadrature.dat"
|
||||
|
||||
#include <cmath>
|
||||
|
||||
@<|OneDPrecalcQuadrature::calcOffsets| code@>;
|
||||
@<|GaussHermite| constructor code@>;
|
||||
@<|GaussLegendre| constructor code@>;
|
||||
@<|NormalICDF| get code@>;
|
||||
|
||||
@
|
||||
@<|OneDPrecalcQuadrature::calcOffsets| code@>=
|
||||
void OneDPrecalcQuadrature::calcOffsets()
|
||||
{
|
||||
offsets[0] = 0;
|
||||
for (int i = 1; i < num_levels; i++)
|
||||
offsets[i] = offsets[i-1] + num_points[i-1];
|
||||
}
|
||||
|
||||
@
|
||||
@<|GaussHermite| constructor code@>=
|
||||
GaussHermite::GaussHermite()
|
||||
: OneDPrecalcQuadrature(gh_num_levels, gh_num_points, gh_weights, gh_points)@+ {}
|
||||
|
||||
@
|
||||
@<|GaussLegendre| constructor code@>=
|
||||
GaussLegendre::GaussLegendre()
|
||||
: OneDPrecalcQuadrature(gl_num_levels, gl_num_points, gl_weights, gl_points)@+ {}
|
||||
|
||||
@ Here we transform a draw from univariate $\langle 0,1\rangle$ to the
|
||||
draw from Gaussina $N(0,1)$. This is done by a table lookup, the table
|
||||
is given by |normal_icdf_step|, |normal_icfd_data|, |normal_icdf_num|,
|
||||
and a number |normal_icdf_end|. In order to avoid wrong tails for lookups close
|
||||
to zero or one, we rescale input |x| by $(1-2*(1-end))=2*end-1$.
|
||||
|
||||
@<|NormalICDF| get code@>=
|
||||
double NormalICDF::get(double x)
|
||||
{
|
||||
double xx = (2*normal_icdf_end-1)*std::abs(x-0.5);
|
||||
int i = (int)floor(xx/normal_icdf_step);
|
||||
double xx1 = normal_icdf_step*i;
|
||||
double yy1 = normal_icdf_data[i];
|
||||
double y;
|
||||
if (i < normal_icdf_num-1) {
|
||||
double yy2 = normal_icdf_data[i+1];
|
||||
y = yy1 + (yy2-yy1)*(xx-xx1)/normal_icdf_step;
|
||||
} else { // this should never happen
|
||||
y = yy1;
|
||||
}
|
||||
if (x > 0.5)
|
||||
return y;
|
||||
else
|
||||
return -y;
|
||||
}
|
||||
|
||||
|
||||
@ End of {\tt quadrature.cpp} file
|
|
@ -0,0 +1,311 @@
|
|||
@q $Id: quadrature.hweb 2269 2008-11-23 14:33:22Z michel $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@*2 Quadrature. This is {\tt quadrature.h} file
|
||||
|
||||
This file defines an interface for one dimensional (non-nested) quadrature
|
||||
|OneDQuadrature|, and a parent for all multi-dimensional
|
||||
quadratures. This parent class |Quadrature| presents a general concept of
|
||||
quadrature, this is
|
||||
$$\int f(x){\rm d}x \approx\sum_{i=1}^N w_ix_i$$
|
||||
The class |Quadrature| just declares this concept. The concept is
|
||||
implemented by class |QuadratureImpl| which paralelizes the
|
||||
summation. All implementations therefore wishing to use the parallel
|
||||
implementation should inherit from |QuadratureImpl| and integration is
|
||||
done.
|
||||
|
||||
The integration concept relies on a point iterator, which goes through
|
||||
all $x_i$ and $w_i$ for $i=1,\ldots,N$. All the iterators must be able
|
||||
to go through only a portion of the set $i=1,\ldots,N$. This enables
|
||||
us to implement paralelism, for two threads for example, one iterator
|
||||
goes from the beginning to the (approximately) half, and the other
|
||||
goes from the half to the end.
|
||||
|
||||
Besides this concept of the general quadrature, this file defines also
|
||||
one dimensional quadrature, which is basically a scheme of points and
|
||||
weights for different levels. The class |OneDQuadrature| is a parent
|
||||
of all such objects, the classes |GaussHermite| and |GaussLegendre|
|
||||
are specific implementations for Gauss--Hermite and Gauss--Legendre
|
||||
quadratures resp.
|
||||
|
||||
@s OneDQuadrature int
|
||||
@s Quadrature int
|
||||
@s IntegrationWorker int
|
||||
@s QuadratureImpl int
|
||||
@s OneDPrecalcQuadrature int
|
||||
@s GaussHermite int
|
||||
@s GaussLegendre int
|
||||
@s NormalICDF int
|
||||
@s _Tpit int
|
||||
|
||||
@c
|
||||
#ifndef QUADRATURE_H
|
||||
#define QUADRATURE_H
|
||||
|
||||
#include <cstdlib>
|
||||
#include "vector_function.h"
|
||||
#include "int_sequence.h"
|
||||
#include "sthread.h"
|
||||
|
||||
@<|OneDQuadrature| class declaration@>;
|
||||
@<|Quadrature| class declaration@>;
|
||||
@<|IntegrationWorker| class declaration@>;
|
||||
@<|QuadratureImpl| class declaration@>;
|
||||
@<|OneDPrecalcQuadrature| class declaration@>;
|
||||
@<|GaussHermite| class declaration@>;
|
||||
@<|GaussLegendre| class declaration@>;
|
||||
@<|NormalICDF| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
@ This pure virtual class represents a concept of one-dimensional
|
||||
(non-nested) quadrature. So, one dimensional quadrature must return
|
||||
number of levels, number of points in a given level, and then a point
|
||||
and a weight in a given level and given order.
|
||||
|
||||
@<|OneDQuadrature| class declaration@>=
|
||||
class OneDQuadrature {
|
||||
public:@;
|
||||
virtual ~OneDQuadrature()@+ {}
|
||||
virtual int numLevels() const =0;
|
||||
virtual int numPoints(int level) const =0;
|
||||
virtual double point(int level, int i) const =0;
|
||||
virtual double weight(int lelel, int i) const =0;
|
||||
};
|
||||
|
||||
@ This is a general concept of multidimensional quadrature. at this
|
||||
general level, we maintain only a dimension, and declare virtual
|
||||
functions for integration. The function take two forms; first takes a
|
||||
constant |VectorFunction| as an argument, creates locally
|
||||
|VectorFunctionSet| and do calculation, second one takes as an
|
||||
argument |VectorFunctionSet|.
|
||||
|
||||
Part of the interface is a method returning a number of evaluations
|
||||
for a specific level. Note two things: this assumes that the number of
|
||||
evaluations is known apriori and thus it is not applicable for
|
||||
adaptive quadratures, second for Monte Carlo type of quadrature, the
|
||||
level is a number of evaluations.
|
||||
|
||||
@<|Quadrature| class declaration@>=
|
||||
class Quadrature {
|
||||
protected:@;
|
||||
int dim;
|
||||
public:@;
|
||||
Quadrature(int d) : dim(d)@+ {}
|
||||
virtual ~Quadrature()@+ {}
|
||||
int dimen() const
|
||||
{@+ return dim;@+}
|
||||
virtual void integrate(const VectorFunction& func, int level,
|
||||
int tn, Vector& out) const =0;
|
||||
virtual void integrate(VectorFunctionSet& fs, int level, Vector& out) const =0;
|
||||
virtual int numEvals(int level) const =0;
|
||||
};
|
||||
|
||||
@ This is just an integration worker, which works over a given
|
||||
|QuadratureImpl|. It also needs the function, level, a specification
|
||||
of the subgroup of points, and output vector.
|
||||
|
||||
See |@<|QuadratureImpl| class declaration@>| for details.
|
||||
|
||||
@<|IntegrationWorker| class declaration@>=
|
||||
template <typename _Tpit>
|
||||
class QuadratureImpl;
|
||||
|
||||
template <typename _Tpit>
|
||||
class IntegrationWorker : public THREAD {
|
||||
const QuadratureImpl<_Tpit>& quad;
|
||||
VectorFunction& func;
|
||||
int level;
|
||||
int ti;
|
||||
int tn;
|
||||
Vector& outvec;
|
||||
public:@;
|
||||
IntegrationWorker(const QuadratureImpl<_Tpit>& q, VectorFunction& f, int l,
|
||||
int tii, int tnn, Vector& out)
|
||||
: quad(q), func(f), level(l), ti(tii), tn(tnn), outvec(out) @+{}
|
||||
@<|IntegrationWorker::operator()()| code@>;
|
||||
};
|
||||
|
||||
|
||||
@ This integrates the given portion of the integral. We obtain first
|
||||
and last iterators for the portion (|beg| and |end|). Then we iterate
|
||||
through the portion. and finally we add the intermediate result to the
|
||||
result |outvec|.
|
||||
|
||||
This method just everything up as it is coming. This might be imply
|
||||
large numerical errors, perhaps in future I will implement something
|
||||
smarter.
|
||||
|
||||
@<|IntegrationWorker::operator()()| code@>=
|
||||
void operator()() {
|
||||
_Tpit beg = quad.begin(ti, tn, level);
|
||||
_Tpit end = quad.begin(ti+1, tn, level);
|
||||
Vector tmpall(outvec.length());
|
||||
tmpall.zeros();
|
||||
Vector tmp(outvec.length());
|
||||
|
||||
// note that since beg came from begin, it has empty signal
|
||||
// and first evaluation gets no signal
|
||||
for (_Tpit run = beg; run != end; ++run) {
|
||||
func.eval(run.point(), run.signal(), tmp);
|
||||
tmpall.add(run.weight(), tmp);
|
||||
}
|
||||
|
||||
{
|
||||
SYNCHRO@, syn(&outvec, "IntegrationWorker");
|
||||
outvec.add(1.0, tmpall);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ This is the class which implements the integration. The class is
|
||||
templated by the iterator type. We declare a method |begin| returning
|
||||
an iterator to the beginnning of the |ti|-th portion out of total |tn|
|
||||
portions for a given level.
|
||||
|
||||
In addition, we define a method which saves all the points to a given
|
||||
file. Only for debugging purposes.
|
||||
|
||||
@<|QuadratureImpl| class declaration@>=
|
||||
template <typename _Tpit>
|
||||
class QuadratureImpl : public Quadrature {
|
||||
friend class IntegrationWorker<_Tpit>;
|
||||
public:@;
|
||||
QuadratureImpl(int d) : Quadrature(d)@+ {}
|
||||
@<|QuadratureImpl::integrate| code@>;
|
||||
void integrate(const VectorFunction& func,
|
||||
int level, int tn, Vector& out) const {
|
||||
VectorFunctionSet fs(func, tn);
|
||||
integrate(fs, level, out);
|
||||
}
|
||||
@<|Quadrature::savePoints| code@>;
|
||||
_Tpit start(int level) const
|
||||
{@+ return begin(0,1,level);@+}
|
||||
_Tpit end(int level) const
|
||||
{@+ return begin(1,1,level);@+}
|
||||
protected:@;
|
||||
virtual _Tpit begin(int ti, int tn, int level) const =0;
|
||||
};
|
||||
|
||||
@ Just fill a thread group with workes and run it.
|
||||
@<|QuadratureImpl::integrate| code@>=
|
||||
void integrate(VectorFunctionSet& fs, int level, Vector& out) const {
|
||||
// todo: out.length()==func.outdim()
|
||||
// todo: dim == func.indim()
|
||||
out.zeros();
|
||||
THREAD_GROUP@, gr;
|
||||
for (int ti = 0; ti < fs.getNum(); ti++) {
|
||||
gr.insert(new IntegrationWorker<_Tpit>(*this, fs.getFunc(ti),
|
||||
level, ti, fs.getNum(), out));
|
||||
}
|
||||
gr.run();
|
||||
}
|
||||
|
||||
|
||||
@ Just for debugging.
|
||||
@<|Quadrature::savePoints| code@>=
|
||||
void savePoints(const char* fname, int level) const
|
||||
{
|
||||
FILE* fd;
|
||||
if (NULL==(fd = fopen(fname,"w"))) {
|
||||
// todo: raise
|
||||
fprintf(stderr, "Cannot open file %s for writing.\n", fname);
|
||||
exit(1);
|
||||
}
|
||||
_Tpit beg = begin(0,1,level);
|
||||
_Tpit end = begin(1,1,level);
|
||||
for (_Tpit run = beg; run != end; ++run) {
|
||||
fprintf(fd, "%16.12g", run.weight());
|
||||
for (int i = 0; i < dimen(); i++)
|
||||
fprintf(fd, "\t%16.12g", run.point()[i]);
|
||||
fprintf(fd, "\n");
|
||||
}
|
||||
fclose(fd);
|
||||
}
|
||||
|
||||
|
||||
@ This is only an interface to a precalculated data in file {\tt
|
||||
precalc\_quadrature.dat} which is basically C coded static data. It
|
||||
implements |OneDQuadrature|. The data file is supposed to define the
|
||||
following data: number of levels, array of number of points at each
|
||||
level, an array of weights and array of points. The both latter array
|
||||
store data level by level. An offset for a specific level is stored in
|
||||
|offsets| integer sequence.
|
||||
|
||||
The implementing subclasses just fill the necessary data from the
|
||||
file, the rest is calculated here.
|
||||
|
||||
@<|OneDPrecalcQuadrature| class declaration@>=
|
||||
class OneDPrecalcQuadrature : public OneDQuadrature {
|
||||
int num_levels;
|
||||
const int* num_points;
|
||||
const double* weights;
|
||||
const double* points;
|
||||
IntSequence offsets;
|
||||
public:@;
|
||||
OneDPrecalcQuadrature(int nlevels, const int* npoints,
|
||||
const double* wts, const double* pts)
|
||||
: num_levels(nlevels), num_points(npoints),
|
||||
weights(wts), points(pts), offsets(num_levels)
|
||||
{@+ calcOffsets();@+}
|
||||
virtual ~OneDPrecalcQuadrature()@+ {}
|
||||
int numLevels() const
|
||||
{@+ return num_levels;@+}
|
||||
int numPoints(int level) const
|
||||
{@+ return num_points[level-1];@+}
|
||||
double point(int level, int i) const
|
||||
{@+ return points[offsets[level-1]+i];@+}
|
||||
double weight(int level, int i) const
|
||||
{@+ return weights[offsets[level-1]+i];@+}
|
||||
protected:@;
|
||||
void calcOffsets();
|
||||
};
|
||||
|
||||
@ Just precalculated Gauss--Hermite quadrature. This quadrature integrates exactly integrals
|
||||
$$\int_{-\infty}^{\infty} x^ke^{-x^2}{\rm d}x$$
|
||||
for level $k$.
|
||||
|
||||
Note that if pluging this one-dimensional quadrature to product or
|
||||
Smolyak rule in order to integrate a function $f$ through normally
|
||||
distributed inputs, one has to wrap $f$ to
|
||||
|GaussConverterFunction| and apply the product or Smolyak rule to the
|
||||
new function.
|
||||
|
||||
Check {\tt precalc\_quadrature.dat} for available levels.
|
||||
|
||||
@<|GaussHermite| class declaration@>=
|
||||
class GaussHermite : public OneDPrecalcQuadrature {
|
||||
public:@;
|
||||
GaussHermite();
|
||||
};
|
||||
|
||||
@ Just precalculated Gauss--Legendre quadrature. This quadrature integrates exactly integrals
|
||||
$$\int_0^1x^k{\rm d}x$$
|
||||
for level $k$.
|
||||
|
||||
Check {\tt precalc\_quadrature.dat} for available levels.
|
||||
|
||||
@<|GaussLegendre| class declaration@>=
|
||||
class GaussLegendre : public OneDPrecalcQuadrature {
|
||||
public:@;
|
||||
GaussLegendre();
|
||||
};
|
||||
|
||||
@ This is just an inverse cummulative density function of normal
|
||||
distribution. Its only method |get| returns for a given number
|
||||
$x\in(0,1)$ a number $y$ such that $P(z<y)=x$, where the probability
|
||||
is taken over normal distribution $N(0,1)$.
|
||||
|
||||
Currently, the implementation is done by a table lookup which implies
|
||||
that the tails had to be chopped off. This further implies that Monte
|
||||
Carlo quadratures using this transformation to draw points from
|
||||
multidimensional $N(0,I)$ fail to integrate with satisfactory
|
||||
precision polynomial functions, for which the tails matter.
|
||||
|
||||
@<|NormalICDF| class declaration@>=
|
||||
class NormalICDF {
|
||||
public:@;
|
||||
static double get(double x);
|
||||
};
|
||||
|
||||
@ End of {\tt quadrature.h} file
|
|
@ -0,0 +1,341 @@
|
|||
@q $Id: quasi_mcarlo.cweb 431 2005-08-16 15:41:01Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@ This is {\tt quasi\_mcarlo.cpp} file.
|
||||
|
||||
@c
|
||||
#include "quasi_mcarlo.h"
|
||||
|
||||
#include <math.h>
|
||||
|
||||
@<|RadicalInverse| constructor code@>;
|
||||
@<|RadicalInverse::eval| code@>;
|
||||
@<|RadicalInverse::increase| code@>;
|
||||
@<|RadicalInverse::print| code@>;
|
||||
@<|HaltonSequence| static data@>;
|
||||
@<|HaltonSequence| constructor code@>;
|
||||
@<|HaltonSequence::operator=| code@>;
|
||||
@<|HaltonSequence::increase| code@>;
|
||||
@<|HaltonSequence::eval| code@>;
|
||||
@<|HaltonSequence::print| code@>;
|
||||
@<|qmcpit| empty constructor code@>;
|
||||
@<|qmcpit| regular constructor code@>;
|
||||
@<|qmcpit| copy constructor code@>;
|
||||
@<|qmcpit| destructor@>;
|
||||
@<|qmcpit::operator==| code@>;
|
||||
@<|qmcpit::operator=| code@>;
|
||||
@<|qmcpit::operator++| code@>;
|
||||
@<|qmcpit::weight| code@>;
|
||||
@<|qmcnpit| empty constructor code@>;
|
||||
@<|qmcnpit| regular constructor code@>;
|
||||
@<|qmcnpit| copy constructor code@>;
|
||||
@<|qmcnpit| destructor@>;
|
||||
@<|qmcnpit::operator=| code@>;
|
||||
@<|qmcnpit::operator++| code@>;
|
||||
@<|WarnockPerScheme::permute| code@>;
|
||||
@<|ReversePerScheme::permute| code@>;
|
||||
|
||||
@ Here in the constructor, we have to calculate a maximum length of
|
||||
|coeff| array for a given |base| and given maximum |maxn|. After
|
||||
allocation, we calculate the coefficients.
|
||||
|
||||
@<|RadicalInverse| constructor code@>=
|
||||
RadicalInverse::RadicalInverse(int n, int b, int mxn)
|
||||
: num(n), base(b), maxn(mxn),
|
||||
coeff((int)(floor(log((double)maxn)/log((double)b))+2), 0)
|
||||
{
|
||||
int nr = num;
|
||||
j = -1;
|
||||
do {
|
||||
j++;
|
||||
coeff[j] = nr % base;
|
||||
nr = nr / base;
|
||||
} while (nr > 0);
|
||||
}
|
||||
|
||||
@ This evaluates the radical inverse. If there was no permutation, we have to calculate
|
||||
$$
|
||||
{c_0\over b}+{c_1\over b^2}+\ldots+{c_j\over b^{j+1}}
|
||||
$$
|
||||
which is evaluated as
|
||||
$$
|
||||
\left(\ldots\left(\left({c_j\over b}\cdot{1\over b}+{c_{j-1}\over b}\right)
|
||||
\cdot{1\over b}+{c_{j-2}\over b}\right)
|
||||
\ldots\right)\cdot{1\over b}+{c_0\over b}
|
||||
$$
|
||||
Now with permutation $\pi$, we have
|
||||
$$
|
||||
\left(\ldots\left(\left({\pi(c_j)\over b}\cdot{1\over b}+
|
||||
{\pi(c_{j-1})\over b}\right)\cdot{1\over b}+
|
||||
{\pi(c_{j-2})\over b}\right)
|
||||
\ldots\right)\cdot{1\over b}+{\pi(c_0)\over b}
|
||||
$$
|
||||
|
||||
|
||||
@<|RadicalInverse::eval| code@>=
|
||||
double RadicalInverse::eval(const PermutationScheme& p) const
|
||||
{
|
||||
double res = 0;
|
||||
for (int i = j; i >= 0; i--) {
|
||||
int cper = p.permute(i, base, coeff[i]);
|
||||
res = (cper + res)/base;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@ We just add 1 to the lowest coefficient and check for overflow with respect to the base.
|
||||
@<|RadicalInverse::increase| code@>=
|
||||
void RadicalInverse::increase()
|
||||
{
|
||||
// todo: raise if |num+1 > maxn|
|
||||
num++;
|
||||
int i = 0;
|
||||
coeff[i]++;
|
||||
while (coeff[i] == base) {
|
||||
coeff[i] = 0;
|
||||
coeff[++i]++;
|
||||
}
|
||||
if (i > j)
|
||||
j = i;
|
||||
}
|
||||
|
||||
@ Debug print.
|
||||
@<|RadicalInverse::print| code@>=
|
||||
void RadicalInverse::print() const
|
||||
{
|
||||
printf("n=%d b=%d c=", num, base);
|
||||
coeff.print();
|
||||
}
|
||||
|
||||
@ Here we have the first 170 primes. This means that we are not able
|
||||
to integrate dimensions greater than 170.
|
||||
|
||||
@<|HaltonSequence| static data@>=
|
||||
int HaltonSequence::num_primes = 170;
|
||||
int HaltonSequence::primes[] = {
|
||||
2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
|
||||
31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
|
||||
73, 79, 83, 89, 97, 101, 103, 107, 109, 113,
|
||||
127, 131, 137, 139, 149, 151, 157, 163, 167, 173,
|
||||
179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
|
||||
233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
|
||||
283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
|
||||
353, 359, 367, 373, 379, 383, 389, 397, 401, 409,
|
||||
419, 421, 431, 433, 439, 443, 449, 457, 461, 463,
|
||||
467, 479, 487, 491, 499, 503, 509, 521, 523, 541,
|
||||
547, 557, 563, 569, 571, 577, 587, 593, 599, 601,
|
||||
607, 613, 617, 619, 631, 641, 643, 647, 653, 659,
|
||||
661, 673, 677, 683, 691, 701, 709, 719, 727, 733,
|
||||
739, 743, 751, 757, 761, 769, 773, 787, 797, 809,
|
||||
811, 821, 823, 827, 829, 839, 853, 857, 859, 863,
|
||||
877, 881, 883, 887, 907, 911, 919, 929, 937, 941,
|
||||
947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013
|
||||
};
|
||||
|
||||
|
||||
@ This takes first |dim| primes and constructs |dim| radical inverses
|
||||
and calls |eval|.
|
||||
|
||||
@<|HaltonSequence| constructor code@>=
|
||||
HaltonSequence::HaltonSequence(int n, int mxn, int dim, const PermutationScheme& p)
|
||||
: num(n), maxn(mxn), per(p), pt(dim)
|
||||
{
|
||||
// todo: raise if |dim > num_primes|
|
||||
// todo: raise if |n > mxn|
|
||||
for (int i = 0; i < dim; i++)
|
||||
ri.push_back(RadicalInverse(num, primes[i], maxn));
|
||||
eval();
|
||||
}
|
||||
|
||||
@
|
||||
@<|HaltonSequence::operator=| code@>=
|
||||
const HaltonSequence& HaltonSequence::operator=(const HaltonSequence& hs)
|
||||
{
|
||||
num = hs.num;
|
||||
maxn = hs.maxn;
|
||||
ri.clear();
|
||||
for (unsigned int i = 0; i < hs.ri.size(); i++)
|
||||
ri.push_back(RadicalInverse(hs.ri[i]));
|
||||
pt = hs.pt;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ This calls |RadicalInverse::increase| for all radical inverses and
|
||||
calls |eval|.
|
||||
|
||||
@<|HaltonSequence::increase| code@>=
|
||||
void HaltonSequence::increase()
|
||||
{
|
||||
for (unsigned int i = 0; i < ri.size(); i++)
|
||||
ri[i].increase();
|
||||
num++;
|
||||
if (num <= maxn)
|
||||
eval();
|
||||
}
|
||||
|
||||
@ This sets point |pt| to radical inverse evaluations in each dimension.
|
||||
@<|HaltonSequence::eval| code@>=
|
||||
void HaltonSequence::eval()
|
||||
{
|
||||
for (unsigned int i = 0; i < ri.size(); i++)
|
||||
pt[i] = ri[i].eval(per);
|
||||
}
|
||||
|
||||
@ Debug print.
|
||||
@<|HaltonSequence::print| code@>=
|
||||
void HaltonSequence::print() const
|
||||
{
|
||||
for (unsigned int i = 0; i < ri.size(); i++)
|
||||
ri[i].print();
|
||||
printf("point=[ ");
|
||||
for (unsigned int i = 0; i < ri.size(); i++)
|
||||
printf("%7.6f ", pt[i]);
|
||||
printf("]\n");
|
||||
}
|
||||
|
||||
@
|
||||
@<|qmcpit| empty constructor code@>=
|
||||
qmcpit::qmcpit()
|
||||
: spec(NULL), halton(NULL), sig(NULL)@+ {}
|
||||
|
||||
@
|
||||
@<|qmcpit| regular constructor code@>=
|
||||
qmcpit::qmcpit(const QMCSpecification& s, int n)
|
||||
: spec(&s), halton(new HaltonSequence(n, s.level(), s.dimen(), s.getPerScheme())),
|
||||
sig(new ParameterSignal(s.dimen()))
|
||||
{
|
||||
}
|
||||
|
||||
@
|
||||
@<|qmcpit| copy constructor code@>=
|
||||
qmcpit::qmcpit(const qmcpit& qpit)
|
||||
: spec(qpit.spec), halton(NULL), sig(NULL)
|
||||
{
|
||||
if (qpit.halton)
|
||||
halton = new HaltonSequence(*(qpit.halton));
|
||||
if (qpit.sig)
|
||||
sig = new ParameterSignal(qpit.spec->dimen());
|
||||
}
|
||||
|
||||
@
|
||||
@<|qmcpit| destructor@>=
|
||||
qmcpit::~qmcpit()
|
||||
{
|
||||
if (halton)
|
||||
delete halton;
|
||||
if (sig)
|
||||
delete sig;
|
||||
}
|
||||
|
||||
@
|
||||
@<|qmcpit::operator==| code@>=
|
||||
bool qmcpit::operator==(const qmcpit& qpit) const
|
||||
{
|
||||
return (spec == qpit.spec) &&
|
||||
((halton == NULL && qpit.halton == NULL) ||
|
||||
(halton != NULL && qpit.halton != NULL && halton->getNum() == qpit.halton->getNum()));
|
||||
}
|
||||
|
||||
@
|
||||
@<|qmcpit::operator=| code@>=
|
||||
const qmcpit& qmcpit::operator=(const qmcpit& qpit)
|
||||
{
|
||||
spec = qpit.spec;
|
||||
if (halton)
|
||||
delete halton;
|
||||
if (qpit.halton)
|
||||
halton = new HaltonSequence(*(qpit.halton));
|
||||
else
|
||||
halton = NULL;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<|qmcpit::operator++| code@>=
|
||||
qmcpit& qmcpit::operator++()
|
||||
{
|
||||
// todo: raise if |halton == null || qmcq == NULL|
|
||||
halton->increase();
|
||||
return *this;
|
||||
}
|
||||
|
||||
@
|
||||
@<|qmcpit::weight| code@>=
|
||||
double qmcpit::weight() const
|
||||
{
|
||||
return 1.0/spec->level();
|
||||
}
|
||||
|
||||
@
|
||||
@<|qmcnpit| empty constructor code@>=
|
||||
qmcnpit::qmcnpit()
|
||||
: qmcpit(), pnt(NULL)@+ {}
|
||||
|
||||
@
|
||||
@<|qmcnpit| regular constructor code@>=
|
||||
qmcnpit::qmcnpit(const QMCSpecification& s, int n)
|
||||
: qmcpit(s, n), pnt(new Vector(s.dimen()))
|
||||
{
|
||||
}
|
||||
|
||||
@
|
||||
@<|qmcnpit| copy constructor code@>=
|
||||
qmcnpit::qmcnpit(const qmcnpit& qpit)
|
||||
: qmcpit(qpit), pnt(NULL)
|
||||
{
|
||||
if (qpit.pnt)
|
||||
pnt = new Vector(*(qpit.pnt));
|
||||
}
|
||||
|
||||
@
|
||||
@<|qmcnpit| destructor@>=
|
||||
qmcnpit::~qmcnpit()
|
||||
{
|
||||
if (pnt)
|
||||
delete pnt;
|
||||
}
|
||||
|
||||
@
|
||||
@<|qmcnpit::operator=| code@>=
|
||||
const qmcnpit& qmcnpit::operator=(const qmcnpit& qpit)
|
||||
{
|
||||
qmcpit::operator=(qpit);
|
||||
if (pnt)
|
||||
delete pnt;
|
||||
if (qpit.pnt)
|
||||
pnt = new Vector(*(qpit.pnt));
|
||||
else
|
||||
pnt = NULL;
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ Here we inccrease a point in Halton sequence ant then store images
|
||||
of the points in |NormalICDF| function.
|
||||
|
||||
@<|qmcnpit::operator++| code@>=
|
||||
qmcnpit& qmcnpit::operator++()
|
||||
{
|
||||
qmcpit::operator++();
|
||||
for (int i = 0; i < halton->point().length(); i++)
|
||||
(*pnt)[i] = NormalICDF::get(halton->point()[i]);
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ Clear from code.
|
||||
@<|WarnockPerScheme::permute| code@>=
|
||||
int WarnockPerScheme::permute(int i, int base, int c) const
|
||||
{
|
||||
return (c+i) % base;
|
||||
}
|
||||
|
||||
@ Clear from code.
|
||||
@<|ReversePerScheme::permute| code@>=
|
||||
int ReversePerScheme::permute(int i, int base, int c) const
|
||||
{
|
||||
return (base-c) % base;
|
||||
}
|
||||
|
||||
@ End of {\tt quasi\_mcarlo.cpp} file.
|
|
@ -0,0 +1,286 @@
|
|||
@q $Id: quasi_mcarlo.hweb 431 2005-08-16 15:41:01Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@*2 Quasi Monte Carlo quadrature. This is {\tt quasi\_mcarlo.h} file.
|
||||
|
||||
This defines quasi Monte Carlo quadratures for cube and for a function
|
||||
multiplied by normal density. The quadrature for a cube is named
|
||||
|QMCarloCubeQuadrature| and integrates:
|
||||
$$\int_{\langle 0,1\rangle^n}f(x){\rm d}x$$
|
||||
The quadrature for a function of normally distributed parameters is
|
||||
named |QMCarloNormalQuadrature| and integrates:
|
||||
$${1\over\sqrt{(2\pi)^n}}\int_{(-\infty,\infty)^n}f(x)e^{-{1\over 2}x^Tx}{\rm d}x$$
|
||||
|
||||
For a cube we define |qmcpit| as iterator of |QMCarloCubeQuadrature|,
|
||||
and for the normal density multiplied function we define |qmcnpit| as
|
||||
iterator of |QMCarloNormalQuadrature|.
|
||||
|
||||
The quasi Monte Carlo method generates low discrepancy points with
|
||||
equal weights. The one dimensional low discrepancy sequences are
|
||||
generated by |RadicalInverse| class, the sequences are combined for
|
||||
higher dimensions by |HaltonSequence| class. The Halton sequence can
|
||||
use a permutation scheme; |PermutattionScheme| is an abstract class
|
||||
for all permutaton schemes. We have three implementations:
|
||||
|WarnockPerScheme|, |ReversePerScheme|, and |IdentityPerScheme|.
|
||||
|
||||
@s PermutationScheme int
|
||||
@s RadicalInverse int
|
||||
@s HaltonSequence int
|
||||
@s QMCSpecification int
|
||||
@s qmcpit int
|
||||
@s QMCarloCubeQuadrature int
|
||||
@s qmcnpit int
|
||||
@s QMCarloNormalQuadrature int
|
||||
@s WarnockPerScheme int
|
||||
@s ReversePerScheme int
|
||||
@s IdentityPerScheme int
|
||||
|
||||
@c
|
||||
#ifndef QUASI_MCARLO_H
|
||||
#define QUASI_MCARLO_H
|
||||
|
||||
#include "int_sequence.h"
|
||||
#include "quadrature.h"
|
||||
|
||||
#include "Vector.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
@<|PermutationScheme| class declaration@>;
|
||||
@<|RadicalInverse| class declaration@>;
|
||||
@<|HaltonSequence| class declaration@>;
|
||||
@<|QMCSpecification| class declaration@>;
|
||||
@<|qmcpit| class declaration@>;
|
||||
@<|QMCarloCubeQuadrature| class declaration@>;
|
||||
@<|qmcnpit| class declaration@>;
|
||||
@<|QMCarloNormalQuadrature| class declaration@>;
|
||||
@<|WarnockPerScheme| class declaration@>;
|
||||
@<|ReversePerScheme| class declaration@>;
|
||||
@<|IdentityPerScheme| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
@ This abstract class declares |permute| method which permutes
|
||||
coefficient |c| having index of |i| fro the base |base| and returns
|
||||
the permuted coefficient which must be in $0,\ldots,base-1$.
|
||||
|
||||
@<|PermutationScheme| class declaration@>=
|
||||
class PermutationScheme {
|
||||
public:@;
|
||||
PermutationScheme()@+ {}
|
||||
virtual ~PermutationScheme()@+ {}
|
||||
virtual int permute(int i, int base, int c) const =0;
|
||||
};
|
||||
|
||||
|
||||
@ This class represents an integer number |num| as
|
||||
$c_0+c_1b+c_2b^2+\ldots+c_jb^j$, where $b$ is |base| and
|
||||
$c_0,\ldots,c_j$ is stored in |coeff|. The size of |IntSequence| coeff
|
||||
does not grow with growing |num|, but is fixed from the very beginning
|
||||
and is set according to supplied maximum |maxn|.
|
||||
|
||||
The basic method is |eval| which evaluates the |RadicalInverse| with a
|
||||
given permutation scheme and returns the point, and |increase| which
|
||||
increases |num| and recalculates the coefficients.
|
||||
|
||||
@<|RadicalInverse| class declaration@>=
|
||||
class RadicalInverse {
|
||||
int num;
|
||||
int base;
|
||||
int maxn;
|
||||
int j;
|
||||
IntSequence coeff;
|
||||
public:@;
|
||||
RadicalInverse(int n, int b, int mxn);
|
||||
RadicalInverse(const RadicalInverse& ri)
|
||||
: num(ri.num), base(ri.base), maxn(ri.maxn), j(ri.j), coeff(ri.coeff)@+ {}
|
||||
const RadicalInverse& operator=(const RadicalInverse& radi)
|
||||
{
|
||||
num = radi.num; base = radi.base; maxn = radi.maxn;
|
||||
j = radi.j; coeff = radi.coeff;
|
||||
return *this;
|
||||
}
|
||||
double eval(const PermutationScheme& p) const;
|
||||
void increase();
|
||||
void print() const;
|
||||
};
|
||||
|
||||
@ This is a vector of |RadicalInverse|s, each |RadicalInverse| has a
|
||||
different prime as its base. The static members |primes| and
|
||||
|num_primes| define a precalculated array of primes. The |increase|
|
||||
method of the class increases indices in all |RadicalInverse|s and
|
||||
sets point |pt| to contain the points in each dimension.
|
||||
|
||||
@<|HaltonSequence| class declaration@>=
|
||||
class HaltonSequence {
|
||||
private:@;
|
||||
static int primes[];
|
||||
static int num_primes;
|
||||
protected:@;
|
||||
int num;
|
||||
int maxn;
|
||||
vector<RadicalInverse> ri;
|
||||
const PermutationScheme& per;
|
||||
Vector pt;
|
||||
public:@;
|
||||
HaltonSequence(int n, int mxn, int dim, const PermutationScheme& p);
|
||||
HaltonSequence(const HaltonSequence& hs)
|
||||
: num(hs.num), maxn(hs.maxn), ri(hs.ri), per(hs.per), pt(hs.pt)@+ {}
|
||||
const HaltonSequence& operator=(const HaltonSequence& hs);
|
||||
void increase();
|
||||
const Vector& point() const
|
||||
{@+ return pt;@+}
|
||||
const int getNum() const
|
||||
{@+ return num;@+}
|
||||
void print() const;
|
||||
protected:@;
|
||||
void eval();
|
||||
};
|
||||
|
||||
@ This is a specification of quasi Monte Carlo quadrature. It consists
|
||||
of dimension |dim|, number of points (or level) |lev|, and the
|
||||
permutation scheme. This class is common to all quasi Monte Carlo
|
||||
classes.
|
||||
|
||||
@<|QMCSpecification| class declaration@>=
|
||||
class QMCSpecification {
|
||||
protected:@;
|
||||
int dim;
|
||||
int lev;
|
||||
const PermutationScheme& per_scheme;
|
||||
public:@;
|
||||
QMCSpecification(int d, int l, const PermutationScheme& p)
|
||||
: dim(d), lev(l), per_scheme(p)@+ {}
|
||||
virtual ~QMCSpecification() {}
|
||||
int dimen() const
|
||||
{@+ return dim;@+}
|
||||
int level() const
|
||||
{@+ return lev;@+}
|
||||
const PermutationScheme& getPerScheme() const
|
||||
{@+ return per_scheme;@+}
|
||||
};
|
||||
|
||||
|
||||
@ This is an iterator for quasi Monte Carlo over a cube
|
||||
|QMCarloCubeQuadrature|. The iterator maintains |HaltonSequence| of
|
||||
the same dimension as given by the specification. An iterator can be
|
||||
constructed from a given number |n|, or by a copy constructor. For
|
||||
technical reasons, there is also an empty constructor; for that
|
||||
reason, every member is a pointer.
|
||||
|
||||
@<|qmcpit| class declaration@>=
|
||||
class qmcpit {
|
||||
protected:@;
|
||||
const QMCSpecification* spec;
|
||||
HaltonSequence* halton;
|
||||
ParameterSignal* sig;
|
||||
public:@;
|
||||
qmcpit();
|
||||
qmcpit(const QMCSpecification& s, int n);
|
||||
qmcpit(const qmcpit& qpit);
|
||||
~qmcpit();
|
||||
bool operator==(const qmcpit& qpit) const;
|
||||
bool operator!=(const qmcpit& qpit) const
|
||||
{@+ return ! operator==(qpit);@+}
|
||||
const qmcpit& operator=(const qmcpit& qpit);
|
||||
qmcpit& operator++();
|
||||
const ParameterSignal& signal() const
|
||||
{@+ return *sig;@+}
|
||||
const Vector& point() const
|
||||
{@+ return halton->point();@+}
|
||||
double weight() const;
|
||||
void print() const
|
||||
{@+ halton->print();@+}
|
||||
};
|
||||
|
||||
@ This is an easy declaration of quasi Monte Carlo quadrature for a
|
||||
cube. Everything important has been done in its iterator |qmcpit|, so
|
||||
we only inherit from general |Quadrature| and reimplement |begin| and
|
||||
|numEvals|.
|
||||
|
||||
@<|QMCarloCubeQuadrature| class declaration@>=
|
||||
class QMCarloCubeQuadrature : public QuadratureImpl<qmcpit>, public QMCSpecification {
|
||||
public:@;
|
||||
QMCarloCubeQuadrature(int d, int l, const PermutationScheme& p)
|
||||
: QuadratureImpl<qmcpit>(d), QMCSpecification(d, l, p)@+ {}
|
||||
virtual ~QMCarloCubeQuadrature()@+ {}
|
||||
int numEvals(int l) const
|
||||
{@+ return l;@+}
|
||||
protected:@;
|
||||
qmcpit begin(int ti, int tn, int lev) const
|
||||
{@+ return qmcpit(*this, ti*level()/tn + 1);@+}
|
||||
};
|
||||
|
||||
@ This is an iterator for |QMCarloNormalQuadrature|. It is equivalent
|
||||
to an iterator for quasi Monte Carlo cube quadrature but a point. The
|
||||
point is obtained from a point of |QMCarloCubeQuadrature| by a
|
||||
transformation $\langle
|
||||
0,1\rangle\rightarrow\langle-\infty,\infty\rangle$ aplied to all
|
||||
dimensions. The transformation yields a normal distribution from a
|
||||
uniform distribution on $\langle0,1\rangle$. It is in fact
|
||||
|NormalICDF|.
|
||||
|
||||
@<|qmcnpit| class declaration@>=
|
||||
class qmcnpit : public qmcpit {
|
||||
protected:@;
|
||||
Vector* pnt;
|
||||
public:@;
|
||||
qmcnpit();
|
||||
qmcnpit(const QMCSpecification& spec, int n);
|
||||
qmcnpit(const qmcnpit& qpit);
|
||||
~qmcnpit();
|
||||
bool operator==(const qmcnpit& qpit) const
|
||||
{@+ return qmcpit::operator==(qpit);@+}
|
||||
bool operator!=(const qmcnpit& qpit) const
|
||||
{@+ return ! operator==(qpit);@+}
|
||||
const qmcnpit& operator=(const qmcnpit& qpit);
|
||||
qmcnpit& operator++();
|
||||
const ParameterSignal& signal() const
|
||||
{@+ return *sig;@+}
|
||||
const Vector& point() const
|
||||
{@+ return *pnt;@+}
|
||||
void print() const
|
||||
{@+ halton->print();pnt->print();@+}
|
||||
};
|
||||
|
||||
@ This is an easy declaration of quasi Monte Carlo quadrature for a
|
||||
cube. Everything important has been done in its iterator |qmcnpit|, so
|
||||
we only inherit from general |Quadrature| and reimplement |begin| and
|
||||
|numEvals|.
|
||||
|
||||
@<|QMCarloNormalQuadrature| class declaration@>=
|
||||
class QMCarloNormalQuadrature : public QuadratureImpl<qmcnpit>, public QMCSpecification {
|
||||
public:@;
|
||||
QMCarloNormalQuadrature(int d, int l, const PermutationScheme& p)
|
||||
: QuadratureImpl<qmcnpit>(d), QMCSpecification(d, l, p)@+ {}
|
||||
virtual ~QMCarloNormalQuadrature()@+ {}
|
||||
int numEvals(int l) const
|
||||
{@+ return l;@+}
|
||||
protected:@;
|
||||
qmcnpit begin(int ti, int tn, int lev) const
|
||||
{@+ return qmcnpit(*this, ti*level()/tn + 1);@+}
|
||||
};
|
||||
|
||||
@ Declares Warnock permutation scheme.
|
||||
@<|WarnockPerScheme| class declaration@>=
|
||||
class WarnockPerScheme : public PermutationScheme {
|
||||
public:@;
|
||||
int permute(int i, int base, int c) const;
|
||||
};
|
||||
|
||||
@ Declares reverse permutation scheme.
|
||||
@<|ReversePerScheme| class declaration@>=
|
||||
class ReversePerScheme : public PermutationScheme {
|
||||
public:@;
|
||||
int permute(int i, int base, int c) const;
|
||||
};
|
||||
|
||||
@ Declares no permutation (identity) scheme.
|
||||
@<|IdentityPerScheme| class declaration@>=
|
||||
class IdentityPerScheme : public PermutationScheme {
|
||||
public:@;
|
||||
int permute(int i, int base, int c) const
|
||||
{@+ return c;@+}
|
||||
};
|
||||
|
||||
@ End of {\tt quasi\_mcarlo.h} file
|
|
@ -0,0 +1,294 @@
|
|||
@q $Id: smolyak.cweb 1208 2007-03-19 21:33:12Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@ This is {\tt smolyak.cpp} file.
|
||||
|
||||
@c
|
||||
#include "smolyak.h"
|
||||
#include "symmetry.h"
|
||||
|
||||
@<|smolpit| empty constructor@>;
|
||||
@<|smolpit| regular constructor@>;
|
||||
@<|smolpit| copy constructor@>;
|
||||
@<|smolpit| destructor@>;
|
||||
@<|smolpit::operator==| code@>;
|
||||
@<|smolpit::operator=| code@>;
|
||||
@<|smolpit::operator++| code@>;
|
||||
@<|smolpit::setPointAndWeight| code@>;
|
||||
@<|smolpit::print| code@>;
|
||||
@<|SmolyakQuadrature| constructor@>;
|
||||
@<|SmolyakQuadrature::numEvals| code@>;
|
||||
@<|SmolyakQuadrature::begin| code@>;
|
||||
@<|SmolyakQuadrature::calcNumEvaluations| code@>;
|
||||
@<|SmolyakQuadrature::designLevelForEvals| code@>;
|
||||
|
||||
@
|
||||
@<|smolpit| empty constructor@>=
|
||||
smolpit::smolpit()
|
||||
: smolq(NULL), isummand(0), jseq(NULL), sig(NULL), p(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
@ This constructs a beginning of |isum| summand in |smolq|. We must be
|
||||
careful here, since |isum| can be past-the-end, so no reference to
|
||||
vectors in |smolq| by |isum| must be done in this case.
|
||||
|
||||
@<|smolpit| regular constructor@>=
|
||||
smolpit::smolpit(const SmolyakQuadrature& q, unsigned int isum)
|
||||
: smolq(&q), isummand(isum), jseq(new IntSequence(q.dimen(), 0)),
|
||||
sig(new ParameterSignal(q.dimen())), p(new Vector(q.dimen()))
|
||||
{
|
||||
if (isummand < q.numSummands()) {
|
||||
setPointAndWeight();
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|smolpit| copy constructor@>=
|
||||
smolpit::smolpit(const smolpit& spit)
|
||||
: smolq(spit.smolq), isummand(spit.isummand), w(spit.w)
|
||||
{
|
||||
if (spit.jseq)
|
||||
jseq = new IntSequence(*(spit.jseq));
|
||||
else
|
||||
jseq = NULL;
|
||||
if (spit.sig)
|
||||
sig = new ParameterSignal(*(spit.sig));
|
||||
else
|
||||
sig = NULL;
|
||||
if (spit.p)
|
||||
p = new Vector(*(spit.p));
|
||||
else
|
||||
p = NULL;
|
||||
}
|
||||
|
||||
@
|
||||
@<|smolpit| destructor@>=
|
||||
smolpit::~smolpit()
|
||||
{
|
||||
if (jseq)
|
||||
delete jseq;
|
||||
if (sig)
|
||||
delete sig;
|
||||
if (p)
|
||||
delete p;
|
||||
}
|
||||
|
||||
@
|
||||
@<|smolpit::operator==| code@>=
|
||||
bool smolpit::operator==(const smolpit& spit) const
|
||||
{
|
||||
bool ret = true;
|
||||
ret = ret & smolq == spit.smolq;
|
||||
ret = ret & isummand == spit.isummand;
|
||||
ret = ret & ((jseq==NULL && spit.jseq==NULL) ||
|
||||
(jseq!=NULL && spit.jseq!=NULL && *jseq == *(spit.jseq)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@
|
||||
@<|smolpit::operator=| code@>=
|
||||
const smolpit& smolpit::operator=(const smolpit& spit)
|
||||
{
|
||||
smolq = spit.smolq;
|
||||
isummand = spit.isummand;
|
||||
w = spit.w;
|
||||
|
||||
if (jseq)
|
||||
delete jseq;
|
||||
if (sig)
|
||||
delete sig;
|
||||
if (p)
|
||||
delete p;
|
||||
|
||||
if (spit.jseq)
|
||||
jseq = new IntSequence(*(spit.jseq));
|
||||
else
|
||||
jseq = NULL;
|
||||
if (spit.sig)
|
||||
sig = new ParameterSignal(*(spit.sig));
|
||||
else
|
||||
sig = NULL;
|
||||
if (spit.p)
|
||||
p = new Vector(*(spit.p));
|
||||
else
|
||||
p = NULL;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ We first try to increase index within the current summand. If we are
|
||||
at maximum, we go to a subsequent summand. Note that in this case all
|
||||
indices in |jseq| will be zero, so no change is needed.
|
||||
|
||||
@<|smolpit::operator++| code@>=
|
||||
smolpit& smolpit::operator++()
|
||||
{
|
||||
// todo: throw if |smolq==NULL| or |jseq==NULL| or |sig==NULL|
|
||||
const IntSequence& levpts = smolq->levpoints[isummand];
|
||||
int i = smolq->dimen()-1;
|
||||
(*jseq)[i]++;
|
||||
while (i >= 0 && (*jseq)[i] == levpts[i]) {
|
||||
(*jseq)[i] = 0;
|
||||
i--;
|
||||
if (i >= 0)
|
||||
(*jseq)[i]++;
|
||||
}
|
||||
sig->signalAfter(std::max(i,0));
|
||||
|
||||
if (i < 0)
|
||||
isummand++;
|
||||
|
||||
if (isummand < smolq->numSummands())
|
||||
setPointAndWeight();
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
@ Here we set the point coordinates according to |jseq| and
|
||||
|isummand|. Also the weight is set here.
|
||||
|
||||
@<|smolpit::setPointAndWeight| code@>=
|
||||
void smolpit::setPointAndWeight()
|
||||
{
|
||||
// todo: raise if |smolq==NULL| or |jseq==NULL| or |sig==NULL| or
|
||||
// |p==NULL| or |isummand>=smolq->numSummands()|
|
||||
int l = smolq->level;
|
||||
int d = smolq->dimen();
|
||||
int sumk = (smolq->levels[isummand]).sum();
|
||||
int m1exp = l + d - sumk - 1;
|
||||
w = (2*(m1exp/2)==m1exp)? 1.0 : -1.0;
|
||||
w *= smolq->psc.noverk(d-1, sumk-l);
|
||||
for (int i = 0; i < d; i++) {
|
||||
int ki = (smolq->levels[isummand])[i];
|
||||
(*p)[i] = (smolq->uquad).point(ki, (*jseq)[i]);
|
||||
w *= (smolq->uquad).weight(ki, (*jseq)[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ Debug print.
|
||||
@<|smolpit::print| code@>=
|
||||
void smolpit::print() const
|
||||
{
|
||||
printf("isum=%-3d: [", isummand);
|
||||
for (int i = 0; i < smolq->dimen(); i++)
|
||||
printf("%2d ", (smolq->levels[isummand])[i]);
|
||||
printf("] j=[");
|
||||
for (int i = 0; i < smolq->dimen(); i++)
|
||||
printf("%2d ", (*jseq)[i]);
|
||||
printf("] %+4.3f*(",w);
|
||||
for (int i = 0; i < smolq->dimen()-1; i++)
|
||||
printf("%+4.3f ", (*p)[i]);
|
||||
printf("%+4.3f)\n",(*p)[smolq->dimen()-1]);
|
||||
}
|
||||
|
||||
@ Here is the constructor of |SmolyakQuadrature|. We have to setup
|
||||
|levels|, |levpoints| and |cumevals|. We have to go through all
|
||||
$d$-dimensional sequences $k$, such that $l\leq \vert k\vert\leq
|
||||
l+d-1$ and all $k_i$ are positive integers. This is equivalent to
|
||||
going through all $k$ such that $l-d\leq\vert k\vert\leq l-1$ and all
|
||||
$k_i$ are non-negative integers. This is equivalent to going through
|
||||
$d+1$ dimensional sequences $(k,x)$ such that $\vert(k,x)\vert =l-1$
|
||||
and $x=0,\ldots,d-1$. The resulting sequence of positive integers is
|
||||
obtained by adding $1$ to all $k_i$.
|
||||
|
||||
@<|SmolyakQuadrature| constructor@>=
|
||||
SmolyakQuadrature::SmolyakQuadrature(int d, int l, const OneDQuadrature& uq)
|
||||
: QuadratureImpl<smolpit>(d), level(l), uquad(uq), psc(d-1,d-1)
|
||||
{
|
||||
// todo: check |l>1|, |l>=d|
|
||||
// todo: check |l>=uquad.miLevel()|, |l<=uquad.maxLevel()|
|
||||
int cum = 0;
|
||||
SymmetrySet ss(l-1, d+1);
|
||||
for (symiterator si(ss); !si.isEnd(); ++si) {
|
||||
if ((*si)[d] <= d-1) {
|
||||
IntSequence lev((const IntSequence&)*si, 0, d);
|
||||
lev.add(1);
|
||||
levels.push_back(lev);
|
||||
IntSequence levpts(d);
|
||||
for (int i = 0; i < d; i++)
|
||||
levpts[i] = uquad.numPoints(lev[i]);
|
||||
levpoints.push_back(levpts);
|
||||
cum += levpts.mult();
|
||||
cumevals.push_back(cum);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ Here we return a number of evalutions of the quadrature for the
|
||||
given level. If the given level is the current one, we simply return
|
||||
the maximum cumulative number of evaluations. Otherwise we call costly
|
||||
|calcNumEvaluations| method.
|
||||
|
||||
@<|SmolyakQuadrature::numEvals| code@>=
|
||||
int SmolyakQuadrature::numEvals(int l) const
|
||||
{
|
||||
if (l != level)
|
||||
return calcNumEvaluations(l);
|
||||
else
|
||||
return cumevals[numSummands()-1];
|
||||
}
|
||||
|
||||
|
||||
@ This divides all the evaluations to |tn| approximately equal groups,
|
||||
and returns the beginning of the specified group |ti|. The granularity
|
||||
of divisions are summands as listed by |levels|.
|
||||
|
||||
@<|SmolyakQuadrature::begin| code@>=
|
||||
smolpit SmolyakQuadrature::begin(int ti, int tn, int l) const
|
||||
{
|
||||
// todo: raise is |level!=l|
|
||||
if (ti == tn)
|
||||
return smolpit(*this, numSummands());
|
||||
|
||||
int totevals = cumevals[numSummands()-1];
|
||||
int evals = (totevals*ti)/tn;
|
||||
unsigned int isum = 0;
|
||||
while (isum+1 < numSummands() && cumevals[isum+1] < evals)
|
||||
isum++;
|
||||
return smolpit(*this, isum);
|
||||
}
|
||||
|
||||
@ This is the same in a structure as |@<|SmolyakQuadrature| constructor@>|.
|
||||
We have to go through all summands and calculate
|
||||
a number of evaluations in each summand.
|
||||
|
||||
@<|SmolyakQuadrature::calcNumEvaluations| code@>=
|
||||
int SmolyakQuadrature::calcNumEvaluations(int lev) const
|
||||
{
|
||||
int cum = 0;
|
||||
SymmetrySet ss(lev-1, dim+1);
|
||||
for (symiterator si(ss); !si.isEnd(); ++si) {
|
||||
if ((*si)[dim] <= dim-1) {
|
||||
IntSequence lev((const IntSequence&)*si, 0, dim);
|
||||
lev.add(1);
|
||||
IntSequence levpts(dim);
|
||||
for (int i = 0; i < dim; i++)
|
||||
levpts[i] = uquad.numPoints(lev[i]);
|
||||
cum += levpts.mult();
|
||||
}
|
||||
}
|
||||
return cum;
|
||||
}
|
||||
|
||||
@ This returns a maximum level such that the number of evaluations is
|
||||
less than the given number.
|
||||
|
||||
@<|SmolyakQuadrature::designLevelForEvals| code@>=
|
||||
void SmolyakQuadrature::designLevelForEvals(int max_evals, int& lev, int& evals) const
|
||||
{
|
||||
int last_evals;
|
||||
evals = 1;
|
||||
lev = 1;
|
||||
do {
|
||||
lev++;
|
||||
last_evals = evals;
|
||||
evals = calcNumEvaluations(lev);
|
||||
} while (lev < uquad.numLevels() && evals <= max_evals);
|
||||
lev--;
|
||||
evals = last_evals;
|
||||
}
|
||||
|
||||
|
||||
@ End of {\tt smolyak.cpp} file
|
|
@ -0,0 +1,123 @@
|
|||
@q $Id: smolyak.hweb 431 2005-08-16 15:41:01Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@*2 Smolyak quadrature. This is {\tt smolyak.h} file
|
||||
|
||||
This file defines Smolyak (sparse grid) multidimensional quadrature
|
||||
for non-nested underlying one dimensional quadrature. Let $Q^1_l$ denote
|
||||
the one dimensional quadrature of $l$ level. Let $n_l$ denote a
|
||||
number of points in the $l$ level. Than the Smolyak quadrature can be
|
||||
defined as
|
||||
$$Q^df=\sum_{l\leq\vert k\vert\leq l+d-1}(-1)^{l+d-\vert k\vert-1}\left(\matrix{d-1\cr
|
||||
\vert k\vert-l}\right)(Q_{k_1}^1\otimes\ldots\otimes Q_{k_d}^1)f,$$
|
||||
where $d$ is the dimension, $k$ is $d$-dimensional sequence of
|
||||
integers, and $\vert k\vert$ denotes a sum of the sequence.
|
||||
|
||||
Here we define |smolpit| as Smolyak iterator and |SmolyakQuadrature|.
|
||||
|
||||
@s smolpit int
|
||||
@s SmolyakQuadrature int
|
||||
@s PascalTriangle int
|
||||
@s SymmetrySet int
|
||||
@s symiterator int
|
||||
|
||||
@c
|
||||
#ifndef SMOLYAK_H
|
||||
#define SMOLYAK_H
|
||||
|
||||
#include "int_sequence.h"
|
||||
#include "tl_static.h"
|
||||
#include "vector_function.h"
|
||||
#include "quadrature.h"
|
||||
|
||||
@<|smolpit| class declaration@>;
|
||||
@<|SmolyakQuadrature| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
@ Here we define the Smolyak point iterator. The Smolyak formula can
|
||||
be broken to a sum of product quadratures with various combinations of
|
||||
levels. The iterator follows this pattern. It maintains an index to a
|
||||
summand and then a point coordinates within the summand (product
|
||||
quadrature). The array of summands to which the |isummand| points is
|
||||
maintained by the |SmolyakQuadrature| class to which the object knows
|
||||
the pointer |smolq|.
|
||||
|
||||
We provide a constructor which points to the beginning of the given
|
||||
summand. This constructor is used in |SmolyakQuadrature::begin| method
|
||||
which approximately divideds all the iterators to subsets of equal
|
||||
size.
|
||||
|
||||
@<|smolpit| class declaration@>=
|
||||
class SmolyakQuadrature;
|
||||
|
||||
class smolpit {
|
||||
protected:@;
|
||||
const SmolyakQuadrature* smolq;
|
||||
unsigned int isummand;
|
||||
IntSequence* jseq;
|
||||
ParameterSignal* sig;
|
||||
Vector* p;
|
||||
double w;
|
||||
public:@;
|
||||
smolpit();
|
||||
smolpit(const SmolyakQuadrature& q, unsigned int isum);
|
||||
smolpit(const smolpit& spit);
|
||||
~smolpit();
|
||||
bool operator==(const smolpit& spit) const;
|
||||
bool operator!=(const smolpit& spit) const
|
||||
{@+ return ! operator==(spit);@+}
|
||||
const smolpit& operator=(const smolpit& spit);
|
||||
smolpit& operator++();
|
||||
const ParameterSignal& signal() const
|
||||
{@+ return *sig;@+}
|
||||
const Vector& point() const
|
||||
{@+ return *p;@+}
|
||||
double weight() const
|
||||
{@+ return w;@+}
|
||||
void print() const;
|
||||
protected:@;
|
||||
void setPointAndWeight();
|
||||
};
|
||||
|
||||
@ Here we define the class |SmolyakQuadrature|. It maintains an array
|
||||
of summands of the Smolyak quadrature formula:
|
||||
$$\sum_{l\leq\vert k\vert\leq l+d-1}(-1)^{l+d-\vert
|
||||
k\vert-1}\left(\matrix{d-1\cr
|
||||
\vert k\vert-l}\right)(Q_{k_1}^1\otimes\ldots\otimes Q_{k_d}^1)f$$
|
||||
Each summand is fully specified by sequence $k$. The summands are here
|
||||
represented (besides $k$) also by sequence of number of points in each
|
||||
level selected by $k$, and also by a cummulative number of
|
||||
evaluations. The latter two are added only for conveniency.
|
||||
|
||||
The summands in the code are given by |levels|, which is a vector of
|
||||
$k$ sequences, further by |levpoints| which is a vector of sequences
|
||||
of nuber of points in each level, and by |cumevals| which is the
|
||||
cumulative number of points, this is $\sum_k\prod_{i=1}^dn_{k_i}$,
|
||||
where the sum is done through all $k$ before the current.
|
||||
|
||||
The |levels| and |levpoints| vectors are used by |smolpit|.
|
||||
|
||||
@<|SmolyakQuadrature| class declaration@>=
|
||||
class SmolyakQuadrature : public QuadratureImpl<smolpit> {
|
||||
friend class smolpit;
|
||||
int level;
|
||||
const OneDQuadrature& uquad;
|
||||
vector<IntSequence> levels;
|
||||
vector<IntSequence> levpoints;
|
||||
vector<int> cumevals;
|
||||
PascalTriangle psc;
|
||||
public:@;
|
||||
SmolyakQuadrature(int d, int l, const OneDQuadrature& uq);
|
||||
virtual ~SmolyakQuadrature()@+ {}
|
||||
virtual int numEvals(int level) const;
|
||||
void designLevelForEvals(int max_eval, int& lev, int& evals) const;
|
||||
protected:@;
|
||||
smolpit begin(int ti, int tn, int level) const;
|
||||
unsigned int numSummands() const
|
||||
{@+ return levels.size();@+}
|
||||
private:@;
|
||||
int calcNumEvaluations(int level) const;
|
||||
};
|
||||
|
||||
@ End of {\tt smolyak.h} file
|
|
@ -0,0 +1,177 @@
|
|||
@q $Id: vector_function.cweb 431 2005-08-16 15:41:01Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@ This is {\tt vector\_function.cpp} file
|
||||
|
||||
@c
|
||||
|
||||
#include "vector_function.h"
|
||||
#include "cpplapack.h"
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#include <string.h>
|
||||
#include <algorithm>
|
||||
|
||||
@<|ParameterSignal| constructor code@>;
|
||||
@<|ParameterSignal| copy constructor code@>;
|
||||
@<|ParameterSignal::signalAfter| code@>;
|
||||
@<|VectorFunctionSet| constructor 1 code@>;
|
||||
@<|VectorFunctionSet| constructor 2 code@>;
|
||||
@<|VectorFunctionSet| destructor code@>;
|
||||
@<|GaussConverterFunction| constructor code 1@>;
|
||||
@<|GaussConverterFunction| constructor code 2@>;
|
||||
@<|GaussConverterFunction| copy constructor code@>;
|
||||
@<|GaussConverterFunction::eval| code@>;
|
||||
@<|GaussConverterFunction::multiplier| code@>;
|
||||
@<|GaussConverterFunction::calcCholeskyFactor| code@>;
|
||||
|
||||
@ Just an easy constructor of sequence of booleans defaulting to
|
||||
change everywhere.
|
||||
|
||||
@<|ParameterSignal| constructor code@>=
|
||||
ParameterSignal::ParameterSignal(int n)
|
||||
: data(new bool[n]), num(n)
|
||||
{
|
||||
for (int i = 0; i < num; i++)
|
||||
data[i] = true;
|
||||
}
|
||||
|
||||
@
|
||||
@<|ParameterSignal| copy constructor code@>=
|
||||
ParameterSignal::ParameterSignal(const ParameterSignal& sig)
|
||||
: data(new bool[sig.num]), num(sig.num)
|
||||
{
|
||||
memcpy(data, sig.data, num);
|
||||
}
|
||||
|
||||
@ This sets |false| (no change) before a given parameter, and |true|
|
||||
(change) after the given parameter (including).
|
||||
|
||||
@<|ParameterSignal::signalAfter| code@>=
|
||||
void ParameterSignal::signalAfter(int l)
|
||||
{
|
||||
for (int i = 0; i < std::min(l,num); i++)
|
||||
data[i] = false;
|
||||
for (int i = l; i < num; i++)
|
||||
data[i] = true;
|
||||
}
|
||||
|
||||
@ This constructs a function set hardcopying also the first.
|
||||
@<|VectorFunctionSet| constructor 1 code@>=
|
||||
VectorFunctionSet::VectorFunctionSet(const VectorFunction& f, int n)
|
||||
: funcs(n), first_shallow(false)
|
||||
{
|
||||
for (int i = 0; i < n; i++)
|
||||
funcs[i] = f.clone();
|
||||
}
|
||||
|
||||
@ This constructs a function set with shallow copy in the first and
|
||||
hard copies in others.
|
||||
|
||||
@<|VectorFunctionSet| constructor 2 code@>=
|
||||
VectorFunctionSet::VectorFunctionSet(VectorFunction& f, int n)
|
||||
: funcs(n), first_shallow(true)
|
||||
{
|
||||
if (n > 0)
|
||||
funcs[0] = &f;
|
||||
for (int i = 1; i < n; i++)
|
||||
funcs[i] = f.clone();
|
||||
}
|
||||
|
||||
@ This deletes the functions. The first is deleted only if it was not
|
||||
a shallow copy.
|
||||
|
||||
@<|VectorFunctionSet| destructor code@>=
|
||||
VectorFunctionSet::~VectorFunctionSet()
|
||||
{
|
||||
unsigned int start = first_shallow ? 1 : 0;
|
||||
for (unsigned int i = start; i < funcs.size(); i++)
|
||||
delete funcs[i];
|
||||
}
|
||||
|
||||
@ Here we construct the object from the given function $f$ and given
|
||||
variance-covariance matrix $\Sigma=$|vcov|. The matrix $A$ is
|
||||
calculated as lower triangular and yields $\Sigma=AA^T$.
|
||||
|
||||
@<|GaussConverterFunction| constructor code 1@>=
|
||||
GaussConverterFunction::GaussConverterFunction(VectorFunction& f, const GeneralMatrix& vcov)
|
||||
: VectorFunction(f), func(&f), delete_flag(false), A(vcov.numRows(), vcov.numRows()),
|
||||
multiplier(calcMultiplier())
|
||||
{
|
||||
// todo: raise if |A.numRows() != indim()|
|
||||
calcCholeskyFactor(vcov);
|
||||
}
|
||||
|
||||
@ Here we construct the object in the same way, however we mark the
|
||||
function as to be deleted.
|
||||
|
||||
@<|GaussConverterFunction| constructor code 2@>=
|
||||
GaussConverterFunction::GaussConverterFunction(VectorFunction* f, const GeneralMatrix& vcov)
|
||||
: VectorFunction(*f), func(f), delete_flag(true), A(vcov.numRows(), vcov.numRows()),
|
||||
multiplier(calcMultiplier())
|
||||
{
|
||||
// todo: raise if |A.numRows() != indim()|
|
||||
calcCholeskyFactor(vcov);
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<|GaussConverterFunction| copy constructor code@>=
|
||||
GaussConverterFunction::GaussConverterFunction(const GaussConverterFunction& f)
|
||||
: VectorFunction(f), func(f.func->clone()), delete_flag(true), A(f.A),
|
||||
multiplier(f.multiplier)
|
||||
{
|
||||
}
|
||||
|
||||
@ Here we evaluate the function
|
||||
$g(y)={1\over\sqrt{\pi^n}}f\left(\sqrt{2}Ay\right)$. Since the matrix $A$ is lower
|
||||
triangular, the change signal for the function $f$ will look like
|
||||
$(0,\ldots,0,1,\ldots,1)$ where the first $1$ is in the same position
|
||||
as the first change in the given signal |sig| of the input
|
||||
$y=$|point|.
|
||||
|
||||
@<|GaussConverterFunction::eval| code@>=
|
||||
void GaussConverterFunction::eval(const Vector& point, const ParameterSignal& sig, Vector& out)
|
||||
{
|
||||
ParameterSignal s(sig);
|
||||
int i = 0;
|
||||
while (i < indim() && !sig[i])
|
||||
i++;
|
||||
s.signalAfter(i);
|
||||
|
||||
Vector x(indim());
|
||||
x.zeros();
|
||||
A.multaVec(x, point);
|
||||
x.mult(sqrt(2.0));
|
||||
|
||||
func->eval(x, s, out);
|
||||
|
||||
out.mult(multiplier);
|
||||
}
|
||||
|
||||
@ This returns $1\over\sqrt{\pi^n}$.
|
||||
@<|GaussConverterFunction::multiplier| code@>=
|
||||
double GaussConverterFunction::calcMultiplier() const
|
||||
{
|
||||
return sqrt(pow(M_PI, -1*indim()));
|
||||
}
|
||||
|
||||
@
|
||||
@<|GaussConverterFunction::calcCholeskyFactor| code@>=
|
||||
void GaussConverterFunction::calcCholeskyFactor(const GeneralMatrix& vcov)
|
||||
{
|
||||
A = vcov;
|
||||
|
||||
int rows = A.numRows();
|
||||
for (int i = 0; i < rows; i++)
|
||||
for (int j = i+1; j < rows; j++)
|
||||
A.get(i,j) = 0.0;
|
||||
|
||||
int info;
|
||||
LAPACK_dpotrf("L", &rows, A.base(), &rows, &info);
|
||||
// todo: raise if |info!=1|
|
||||
}
|
||||
|
||||
|
||||
@ End of {\tt vector\_function.cpp} file
|
|
@ -0,0 +1,156 @@
|
|||
@q $Id: vector_function.hweb 431 2005-08-16 15:41:01Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@*2 Vector function. This is {\tt vector\_function.h} file
|
||||
|
||||
This file defines interface for functions taking a vector as an input
|
||||
and returning a vector (with a different size) as an output. We are
|
||||
also introducing a parameter signalling; it is a boolean vector which
|
||||
tracks parameters which were changed from the previous call. The
|
||||
|VectorFunction| implementation can exploit this information and
|
||||
evaluate the function more efficiently. The information can be
|
||||
completely ignored.
|
||||
|
||||
From the signalling reason, and from other reasons, the function
|
||||
evaluation is not |const|.
|
||||
|
||||
@s ParameterSignal int
|
||||
@s VectorFunction int
|
||||
@s VectorFunctionSet int
|
||||
@s GaussConverterFunction int
|
||||
|
||||
@c
|
||||
#ifndef VECTOR_FUNCTION_H
|
||||
#define VECTOR_FUNCTION_H
|
||||
|
||||
#include "Vector.h"
|
||||
#include "GeneralMatrix.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
@<|ParameterSignal| class declaration@>;
|
||||
@<|VectorFunction| class declaration@>;
|
||||
@<|VectorFunctionSet| class declaration@>;
|
||||
@<|GaussConverterFunction| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
@ This is a simple class representing a vector of booleans. The items
|
||||
night be retrieved or changed, or can be set |true| after some
|
||||
point. This is useful when we multiply the vector with lower
|
||||
triangular matrix.
|
||||
|
||||
|true| means that a parameter was changed.
|
||||
|
||||
@<|ParameterSignal| class declaration@>=
|
||||
class ParameterSignal {
|
||||
protected:@;
|
||||
bool* data;
|
||||
int num;
|
||||
public:@;
|
||||
ParameterSignal(int n);
|
||||
ParameterSignal(const ParameterSignal& sig);
|
||||
~ParameterSignal()
|
||||
{@+ delete [] data;@+}
|
||||
void signalAfter(int l);
|
||||
const bool& operator[](int i) const
|
||||
{@+ return data[i];@+}
|
||||
bool& operator[](int i)
|
||||
{@+ return data[i];@+}
|
||||
};
|
||||
|
||||
@ This is the abstract class for vector function. At this level of
|
||||
abstraction we only need to know size of input vector and a size of
|
||||
output vector.
|
||||
|
||||
The important thing here is a clone method, we will need to make hard
|
||||
copies of vector functions since the evaluations are not |const|. The
|
||||
hardcopies apply for parallelization.
|
||||
|
||||
@<|VectorFunction| class declaration@>=
|
||||
class VectorFunction {
|
||||
protected:@;
|
||||
int in_dim;
|
||||
int out_dim;
|
||||
public:@;
|
||||
VectorFunction(int idim, int odim)
|
||||
: in_dim(idim), out_dim(odim)@+ {}
|
||||
VectorFunction(const VectorFunction& func)
|
||||
: in_dim(func.in_dim), out_dim(func.out_dim)@+ {}
|
||||
virtual ~VectorFunction()@+ {}
|
||||
virtual VectorFunction* clone() const =0;
|
||||
virtual void eval(const Vector& point, const ParameterSignal& sig, Vector& out) =0;
|
||||
int indim() const
|
||||
{@+ return in_dim;@+}
|
||||
int outdim() const
|
||||
{@+ return out_dim;@+}
|
||||
};
|
||||
|
||||
@ This makes |n| copies of |VectorFunction|. The first constructor
|
||||
make exactly |n| new copies, the second constructor copies only the
|
||||
pointer to the first and others are hard (real) copies.
|
||||
|
||||
The class is useful for making a given number of copies at once, and
|
||||
this set can be reused many times if we need mupliple copis of the
|
||||
function (for example for paralelizing the code).
|
||||
|
||||
@<|VectorFunctionSet| class declaration@>=
|
||||
class VectorFunctionSet {
|
||||
protected:@;
|
||||
std::vector<VectorFunction*> funcs;
|
||||
bool first_shallow;
|
||||
public:@;
|
||||
VectorFunctionSet(const VectorFunction& f, int n);
|
||||
VectorFunctionSet(VectorFunction& f, int n);
|
||||
~VectorFunctionSet();
|
||||
VectorFunction& getFunc(int i)
|
||||
{@+ return *(funcs[i]);@+}
|
||||
int getNum() const
|
||||
{@+ return funcs.size(); @+}
|
||||
};
|
||||
|
||||
@ This class wraps another |VectorFunction| to allow integration of a
|
||||
function through normally distributed inputs. Namely, if one wants to
|
||||
integrate
|
||||
$${1\over\sqrt{(2\pi)^n\vert\Sigma\vert}}\int f(x)e^{-{1\over2}x^T\Sigma^{-1}x}{\rm d}x$$
|
||||
then if we write $\Sigma=AA^T$ and $x=\sqrt{2}Ay$, we get integral
|
||||
$${1\over\sqrt{(2\pi)^n\vert\Sigma\vert}}
|
||||
\int f\left(\sqrt{2}Ay\right)e^{-y^Ty}\sqrt{2^n}\vert A\vert{\rm d}y=
|
||||
{1\over\sqrt{\pi^n}}\int f\left(\sqrt{2}Ay\right)e^{-y^Ty}{\rm d}y,$$
|
||||
which means that a given function $f$ we have to wrap to yield a function
|
||||
$$g(y)={1\over\sqrt{\pi^n}}f\left(\sqrt{2}Ay\right).$$
|
||||
This is exactly what this class is doing. This transformation is
|
||||
useful since the Gauss--Hermite points and weights are defined for
|
||||
weighting function $e^{-y^2}$, so this transformation allows using
|
||||
Gauss--Hermite quadratures seemlessly in a context of integration through
|
||||
normally distributed inputs.
|
||||
|
||||
The class maintains a pointer to the function $f$. When the object is
|
||||
constructed by the first constructor, the $f$ is not copied. If the
|
||||
object of this class is copied, then $f$ is copied and we need to
|
||||
remember to destroy it in the desctructor; hence |delete_flag|. The
|
||||
second constructor takes a pointer to the function and differs from
|
||||
the first only by setting |delete_flag| to |true|.
|
||||
|
||||
@<|GaussConverterFunction| class declaration@>=
|
||||
class GaussConverterFunction : public VectorFunction {
|
||||
protected:@;
|
||||
VectorFunction* func;
|
||||
bool delete_flag;
|
||||
GeneralMatrix A;
|
||||
double multiplier;
|
||||
public:@;
|
||||
GaussConverterFunction(VectorFunction& f, const GeneralMatrix& vcov);
|
||||
GaussConverterFunction(VectorFunction* f, const GeneralMatrix& vcov);
|
||||
GaussConverterFunction(const GaussConverterFunction& f);
|
||||
virtual ~GaussConverterFunction()
|
||||
{@+ if (delete_flag) delete func; @+}
|
||||
virtual VectorFunction* clone() const
|
||||
{@+ return new GaussConverterFunction(*this);@+}
|
||||
virtual void eval(const Vector& point, const ParameterSignal& sig, Vector& out);
|
||||
private:@;
|
||||
double calcMultiplier() const;
|
||||
void calcCholeskyFactor(const GeneralMatrix& vcov);
|
||||
};
|
||||
|
||||
@ End of {\tt vector\_function.h} file
|
|
@ -0,0 +1,103 @@
|
|||
CC_FLAGS = -I../.. -I../../sylv/cc -I../../integ/cc -I../../tl/cc $(CC_FLAGS)
|
||||
|
||||
#LDFLAGS = -llapack -lcblas -lf77blas -latlas -lg2c -lstdc++
|
||||
|
||||
ifeq ($(CC),)
|
||||
CC := gcc
|
||||
endif
|
||||
|
||||
ifneq ($(LD_LIBRARY_PATH),) # use LD_LIBRARY_PATH from environment
|
||||
LDFLAGS := -Wl,--library-path $(LD_LIBRARY_PATH) $(LDFLAGS)
|
||||
endif
|
||||
|
||||
ifeq ($(DEBUG),yes)
|
||||
CC_FLAGS := $(CC_FLAGS) -g -DTL_DEBUG=2
|
||||
else
|
||||
CC_FLAGS := $(CC_FLAGS) -O3 -DPOSIX_THREADS
|
||||
endif
|
||||
|
||||
ifeq ($(OS),Windows_NT)
|
||||
CC_FLAGS := -mno-cygwin -mthreads $(CC_FLAGS)
|
||||
LDFLAGS := -mno-cygwin -mthreads $(LDFLAGS) -lpthreadGC2
|
||||
ARCH := w32
|
||||
else
|
||||
LDFLAGS := $(LDFLAGS) -lpthread
|
||||
ARCH := linux
|
||||
endif
|
||||
|
||||
|
||||
sylvcppsource := $(wildcard ../../sylv/cc/*.cpp)
|
||||
sylvhsource := $(wildcard ../../sylv/cc/*.h)
|
||||
sylvobjects := $(patsubst %.cpp, %.o, $(sylvcppsource))
|
||||
|
||||
|
||||
tlcwebsource := $(wildcard ../../tl/cc/*.cweb)
|
||||
tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
|
||||
tlhwebsource := $(wildcard ../../tl/cc/*.hweb)
|
||||
tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
|
||||
tlobjects := $(patsubst %.cweb,%.o,$(tlcwebsource))
|
||||
|
||||
integcwebsource := $(wildcard ../../integ/cc/*.cweb)
|
||||
integcppsource := $(patsubst %.cweb,%.cpp,$(integcwebsource))
|
||||
integhwebsource := $(wildcard ../../integ/cc/*.hweb)
|
||||
integhsource := $(patsubst %.hweb,%.h,$(integhwebsource))
|
||||
integobjects := $(patsubst %.cweb,%.o,$(integcwebsource))
|
||||
|
||||
parserhsource := $(wildcard ../../parser/cc/*.h)
|
||||
parsercppsource := $(wildcard ../../parser/cc/*.cpp)
|
||||
|
||||
utilshsource := $(wildcard ../../utils/cc/*.h)
|
||||
utilscppsource := $(wildcard ../../utils/cc/*.cpp)
|
||||
utilsobjects := $(patsubst %.cpp,%.o,$(utilscppsource))
|
||||
|
||||
../../tl/cc/dummy.ch:
|
||||
make -C ../../tl/cc dummy.ch
|
||||
|
||||
../../tl/cc/%.cpp: ../../tl/cc/%.cweb ../../tl/cc/dummy.ch
|
||||
make -C ../../tl/cc $*.cpp
|
||||
|
||||
../../tl/cc/%.h: ../../tl/cc/%.hweb ../../tl/cc/dummy.ch
|
||||
make -C ../../tl/cc $*.h
|
||||
|
||||
../../tl/cc/%.o: ../../tl/cc/%.cpp $(tlhsource)
|
||||
make -C ../../tl/cc $*.o
|
||||
|
||||
../../integ/cc/dummy.ch:
|
||||
make -C ../../integ/cc dummy.ch
|
||||
|
||||
../../integ/cc/%.cpp: ../../integ/cc/%.cweb ../../integ/cc/dummy.ch
|
||||
make -C ../../integ/cc $*.cpp
|
||||
|
||||
../../integ/cc/%.h: ../../integ/cc/%.hweb ../../integ/cc/dummy.ch
|
||||
make -C ../../integ/cc $*.h
|
||||
|
||||
../../integ/cc/%.o: ../../integ/cc/%.cpp $(integhsource) $(tlhsource)
|
||||
make -C ../../integ/cc $*.o
|
||||
|
||||
|
||||
../../sylv/cc/%.o: ../../sylv/cc/%.cpp $(sylvhsource)
|
||||
make -C ../../sylv/cc $*.o
|
||||
|
||||
../../utils/cc/%.o: ../../utils/cc/%.cpp $(utilshsource)
|
||||
make -C ../../utils/cc $*.o
|
||||
|
||||
../../parser/cc/%.o: ../../parser/cc/%.cpp $(parserhsource)
|
||||
make -C ../../parser/cc $*.o
|
||||
|
||||
../../parser/cc/matrix_tab.o:
|
||||
make -C ../../parser/cc matrix_tab.o
|
||||
|
||||
../../parser/cc/matrix_ll.o:
|
||||
make -C ../../parser/cc matrix_ll.o
|
||||
|
||||
quadrature-points: quadrature-points.cpp $(sylvhsource) $(sylvobjects) $(integhsource) $(integobjects) $(parserhsource) $(utilshsource) $(tlhsource) $(tlobjects) $(utilsobjects)
|
||||
$(CC) $(CC_FLAGS) quadrature-points.cpp -o quadrature-points ../../integ/cc/quadrature.o ../../integ/cc/smolyak.o ../../integ/cc/product.o ../../integ/cc/vector_function.o ../../tl/cc/sthread.o ../../tl/cc/symmetry.o ../../tl/cc/equivalence.o ../../tl/cc/int_sequence.o ../../tl/cc/tl_static.o ../../tl/cc/permutation.o ../../parser/cc/matrix_parser.o ../../parser/cc/matrix_tab.o ../../parser/cc/matrix_ll.o ../../parser/cc/parser_exception.o ../../sylv/cc/GeneralMatrix.o ../../sylv/cc/Vector.o ../../sylv/cc/SymSchurDecomp.o ../../sylv/cc/SylvException.o ../../utils/cc/memory_file.o $(LDFLAGS)
|
||||
|
||||
|
||||
clear:
|
||||
make -C ../../tl/cc/ clear
|
||||
make -C ../../integ/cc clear
|
||||
make -C ../../parser/cc clear
|
||||
make -C ../../utils/cc clear
|
||||
make -C ../../sylv/cc clear
|
||||
rm -rf quadrature-points
|
|
@ -0,0 +1,192 @@
|
|||
|
||||
|
||||
#include "parser/cc/matrix_parser.h"
|
||||
#include "utils/cc/memory_file.h"
|
||||
#include "utils/cc/exception.h"
|
||||
#include "sylv/cc/GeneralMatrix.h"
|
||||
#include "sylv/cc/Vector.h"
|
||||
#include "sylv/cc/SymSchurDecomp.h"
|
||||
#include "sylv/cc/SylvException.h"
|
||||
#include "integ/cc/quadrature.h"
|
||||
#include "integ/cc/smolyak.h"
|
||||
#include "integ/cc/product.h"
|
||||
|
||||
#include <getopt.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include <cmath>
|
||||
|
||||
struct QuadParams {
|
||||
const char* outname;
|
||||
const char* vcovname;
|
||||
int max_level;
|
||||
double discard_weight;
|
||||
QuadParams(int argc, char** argv);
|
||||
void check_consistency() const;
|
||||
private:
|
||||
enum {opt_max_level, opt_discard_weight, opt_vcov};
|
||||
};
|
||||
|
||||
QuadParams::QuadParams(int argc, char** argv)
|
||||
: outname(NULL), vcovname(NULL), max_level(3), discard_weight(0.0)
|
||||
{
|
||||
if (argc == 1) {
|
||||
// print the help and exit
|
||||
exit(1);
|
||||
}
|
||||
|
||||
outname = argv[argc-1];
|
||||
argc--;
|
||||
|
||||
struct option const opts [] = {
|
||||
{"max-level", required_argument, NULL, opt_max_level},
|
||||
{"discard-weight", required_argument, NULL, opt_discard_weight},
|
||||
{"vcov", required_argument, NULL, opt_vcov},
|
||||
{NULL, 0, NULL, 0}
|
||||
};
|
||||
|
||||
int ret;
|
||||
int index;
|
||||
while (-1 != (ret = getopt_long(argc, argv, "", opts, &index))) {
|
||||
switch (ret) {
|
||||
case opt_max_level:
|
||||
if (1 != sscanf(optarg, "%d", &max_level))
|
||||
fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg);
|
||||
break;
|
||||
case opt_discard_weight:
|
||||
if (1 != sscanf(optarg, "%lf", &discard_weight))
|
||||
fprintf(stderr, "Couldn't parse float %s, ignored\n", optarg);
|
||||
break;
|
||||
case opt_vcov:
|
||||
vcovname = optarg;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
check_consistency();
|
||||
}
|
||||
|
||||
void QuadParams::check_consistency() const
|
||||
{
|
||||
if (outname == NULL) {
|
||||
fprintf(stderr, "Error: output name not set\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (vcovname == NULL) {
|
||||
fprintf(stderr, "Error: vcov file name not set\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/** Utility class for ordering pointers to vectors according their
|
||||
* ordering. */
|
||||
struct OrderVec {
|
||||
bool operator()(const Vector* a, const Vector* b) const
|
||||
{return *a < *b;}
|
||||
};
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
QuadParams params(argc, argv);
|
||||
|
||||
// open output file for writing
|
||||
FILE* fout;
|
||||
if (NULL == (fout=fopen(params.outname, "w"))) {
|
||||
fprintf(stderr, "Could not open %s for writing\n", params.outname);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
// open memory file for vcov
|
||||
ogu::MemoryFile vcov_mf(params.vcovname);
|
||||
|
||||
// parse the vcov matrix
|
||||
ogp::MatrixParser mp;
|
||||
mp.parse(vcov_mf.length(), vcov_mf.base());
|
||||
if (mp.nrows() != mp.ncols())
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"VCOV matrix not square");
|
||||
// and put to the GeneralMatrix
|
||||
GeneralMatrix vcov(mp.nrows(), mp.ncols());
|
||||
vcov.zeros();
|
||||
for (ogp::MPIterator it = mp.begin(); it != mp.end(); ++it)
|
||||
vcov.get(it.row(), it.col()) = *it;
|
||||
|
||||
// calculate the factor A of vcov, so that A*A^T=VCOV
|
||||
GeneralMatrix A(vcov.numRows(), vcov.numRows());
|
||||
SymSchurDecomp ssd(vcov);
|
||||
ssd.getFactor(A);
|
||||
|
||||
// construct Gauss-Hermite quadrature
|
||||
GaussHermite ghq;
|
||||
// construct Smolyak quadrature
|
||||
int level = params.max_level;
|
||||
SmolyakQuadrature sq(vcov.numRows(), level, ghq);
|
||||
|
||||
printf("Dimension: %d\n", vcov.numRows());
|
||||
printf("Maximum level: %d\n", level);
|
||||
printf("Total number of nodes: %d\n", sq.numEvals(level));
|
||||
|
||||
// put the points to the vector
|
||||
std::vector<Vector*> points;
|
||||
for (smolpit qit = sq.start(level); qit != sq.end(level); ++qit)
|
||||
points.push_back(new Vector((const Vector&)qit.point()));
|
||||
// sort and uniq
|
||||
OrderVec ordvec;
|
||||
std::sort(points.begin(), points.end(), ordvec);
|
||||
std::vector<Vector*>::iterator new_end = std::unique(points.begin(), points.end());
|
||||
for (std::vector<Vector*>::iterator it = new_end; it != points.end(); ++it)
|
||||
delete *it;
|
||||
points.erase(new_end, points.end());
|
||||
|
||||
printf("Duplicit nodes removed: %d\n", sq.numEvals(level)-points.size());
|
||||
|
||||
// calculate weights and mass
|
||||
double mass = 0.0;
|
||||
std::vector<double> weights;
|
||||
for (int i = 0; i < (int)points.size(); i++) {
|
||||
weights.push_back(std::exp(-points[i]->dot(*(points[i]))));
|
||||
mass += weights.back();
|
||||
}
|
||||
|
||||
// calculate discarded mass
|
||||
double discard_mass = 0.0;
|
||||
for (int i = 0; i < (int)weights.size(); i++)
|
||||
if (weights[i]/mass < params.discard_weight)
|
||||
discard_mass += weights[i];
|
||||
|
||||
printf("Total mass discarded: %f\n", discard_mass/mass);
|
||||
|
||||
// dump the results
|
||||
int npoints = 0;
|
||||
double upscale_weight = 1/(mass-discard_mass);
|
||||
Vector x(vcov.numRows());
|
||||
for (int i = 0; i < (int)weights.size(); i++)
|
||||
if (weights[i]/mass >= params.discard_weight) {
|
||||
// print the upscaled weight
|
||||
fprintf(fout, "%20.16g", upscale_weight*weights[i]);
|
||||
// multiply point with the factor A and sqrt(2)
|
||||
A.multVec(0.0, x, std::sqrt(2.), *(points[i]));
|
||||
// print the coordinates
|
||||
for (int j = 0; j < x.length(); j++)
|
||||
fprintf(fout, " %20.16g", x[j]);
|
||||
fprintf(fout, "\n");
|
||||
npoints++;
|
||||
}
|
||||
|
||||
printf("Final number of points: %d\n", npoints);
|
||||
|
||||
fclose(fout);
|
||||
|
||||
} catch (const SylvException& e) {
|
||||
e.printMessage();
|
||||
return 1;
|
||||
} catch (const ogu::Exception& e) {
|
||||
e.print();
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
# $Id: Makefile 843 2006-07-28 08:54:19Z tamas $
|
||||
# Copyright 2005, Ondra Kamenik
|
||||
|
||||
|
||||
LD_LIBS := -llapack -lcblas -lf77blas -latlas -lg2c -lpthread
|
||||
CC_FLAGS := -Wall -I../cc -I ../../tl/cc -I../../sylv/cc
|
||||
ifeq ($(DEBUG),yes)
|
||||
CC_FLAGS := $(CC_FLAGS) -g -DTL_DEBUG=2
|
||||
else
|
||||
CC_FLAGS := $(CC_FLAGS) -O2 -DPOSIX_THREADS
|
||||
endif
|
||||
|
||||
matrix_interface := GeneralMatrix Vector SylvException
|
||||
matobjs := $(patsubst %, ../../sylv/cc/%.o, $(matrix_interface))
|
||||
cwebsource := $(wildcard ../cc/*.cweb)
|
||||
cppsource := $(patsubst %.cweb,%.cpp,$(cwebsource))
|
||||
objects := $(patsubst %.cweb,%.o,$(cwebsource))
|
||||
hwebsource := $(wildcard ../cc/*.hweb)
|
||||
hsource := $(patsubst %.hweb,%.h,$(hwebsource))
|
||||
tlcwebsource := $(wildcard ../../tl/cc/*.cweb)
|
||||
tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
|
||||
tlobjects := $(patsubst %.cweb,%.o,$(tlcwebsource))
|
||||
tlhwebsource := $(wildcard ../../tl/cc/*.hweb)
|
||||
tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
|
||||
|
||||
../cc/dummy.ch:
|
||||
make -C ../cc dummy.ch
|
||||
|
||||
../cc/%.cpp: ../cc/%.cweb ../cc/dummy.ch
|
||||
make -C ../cc $*.cpp
|
||||
|
||||
../cc/%.h: ../cc/%.hweb ../cc/dummy.ch
|
||||
make -C ../cc $*.h
|
||||
|
||||
../cc/%.o: ../cc/%.cpp $(hsource)
|
||||
make -C ../cc $*.o
|
||||
|
||||
../../tl/cc/dummy.ch:
|
||||
make -C ../../tl/cc dummy.ch
|
||||
|
||||
../../tl/cc/%.cpp: ../../tl/cc/%.cweb ../../tl/cc/dummy.ch
|
||||
make -C ../../tl/cc $*.cpp
|
||||
|
||||
../../tl/cc/%.h: ../../tl/cc/%.hweb ../../tl/cc/dummy.ch
|
||||
make -C ../../tl/cc $*.h
|
||||
|
||||
../../tl/cc/%.o: ../../tl/cc/%.cpp $(tlhsource)
|
||||
make -C ../../tl/cc $*.o
|
||||
|
||||
%.o: %.cpp $(hwebsource) $(hsource) $(tlhwebsource) $(tlhsource)
|
||||
$(CC) $(CC_FLAGS) -c $*.cpp
|
||||
|
||||
tests: $(hwebsource) $(cwebsoure) $(hsource) $(cppsource) \
|
||||
$(tlhwebsource) $(tlcwebsoure) $(tlhsource) $(tlcppsource) \
|
||||
tests.o $(objects) $(tlobjects)
|
||||
$(CC) $(CC_FLAGS) $(objects) $(tlobjects) $(matobjs) tests.o -o tests $(LD_LIBS)
|
||||
|
||||
clear:
|
||||
rm -f *.o
|
||||
rm -f tests
|
||||
make -C ../cc clear
|
||||
make -C ../../tl/cc clear
|
|
@ -0,0 +1,544 @@
|
|||
/* $Id: tests.cpp 431 2005-08-16 15:41:01Z kamenik $ */
|
||||
/* Copyright 2005, Ondra Kamenik */
|
||||
|
||||
#include "GeneralMatrix.h"
|
||||
#include "cpplapack.h"
|
||||
#include "SylvException.h"
|
||||
|
||||
#include "rfs_tensor.h"
|
||||
#include "normal_moments.h"
|
||||
|
||||
#include "vector_function.h"
|
||||
#include "quadrature.h"
|
||||
#include "smolyak.h"
|
||||
#include "product.h"
|
||||
#include "quasi_mcarlo.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/time.h>
|
||||
#include <math.h>
|
||||
|
||||
const int num_threads = 2; // does nothing if DEBUG defined
|
||||
|
||||
// evaluates unfolded (Dx)^k power, where x is a vector, D is a
|
||||
// Cholesky factor (lower triangular)
|
||||
class MomentFunction : public VectorFunction {
|
||||
GeneralMatrix D;
|
||||
int k;
|
||||
public:
|
||||
MomentFunction(const GeneralMatrix& inD, int kk)
|
||||
: VectorFunction(inD.numRows(), UFSTensor::calcMaxOffset(inD.numRows(), kk)),
|
||||
D(inD), k(kk) {}
|
||||
MomentFunction(const MomentFunction& func)
|
||||
: VectorFunction(func), D(func.D), k(func.k) {}
|
||||
VectorFunction* clone() const
|
||||
{return new MomentFunction(*this);}
|
||||
void eval(const Vector& point, const ParameterSignal& sig, Vector& out);
|
||||
};
|
||||
|
||||
void MomentFunction::eval(const Vector& point, const ParameterSignal& sig, Vector& out)
|
||||
{
|
||||
if (point.length() != indim() || out.length() != outdim()) {
|
||||
printf("Wrong length of vectors in MomentFunction::eval\n");
|
||||
exit(1);
|
||||
}
|
||||
Vector y(point);
|
||||
y.zeros();
|
||||
D.multaVec(y, point);
|
||||
URSingleTensor ypow(y, k);
|
||||
out.zeros();
|
||||
out.add(1.0, ypow.getData());
|
||||
}
|
||||
|
||||
class TensorPower : public VectorFunction {
|
||||
int k;
|
||||
public:
|
||||
TensorPower(int nvar, int kk)
|
||||
: VectorFunction(nvar, UFSTensor::calcMaxOffset(nvar, kk)), k(kk) {}
|
||||
TensorPower(const TensorPower& func)
|
||||
: VectorFunction(func), k(func.k) {}
|
||||
VectorFunction* clone() const
|
||||
{return new TensorPower(*this);}
|
||||
void eval(const Vector& point, const ParameterSignal& sig, Vector& out);
|
||||
};
|
||||
|
||||
void TensorPower::eval(const Vector& point, const ParameterSignal& sig, Vector& out)
|
||||
{
|
||||
if (point.length() != indim() || out.length() != outdim()) {
|
||||
printf("Wrong length of vectors in TensorPower::eval\n");
|
||||
exit(1);
|
||||
}
|
||||
URSingleTensor ypow(point, k);
|
||||
out.zeros();
|
||||
out.add(1.0, ypow.getData());
|
||||
}
|
||||
|
||||
|
||||
// evaluates (1+1/d)^d*(x_1*...*x_d)^(1/d), its integral over <0,1>^d
|
||||
// is 1.0, and its variation grows exponetially
|
||||
// d = dim
|
||||
class Function1 : public VectorFunction {
|
||||
int dim;
|
||||
public:
|
||||
Function1(int d)
|
||||
: VectorFunction(d, 1), dim(d) {}
|
||||
Function1(const Function1& f)
|
||||
: VectorFunction(f.indim(), f.outdim()), dim(f.dim) {}
|
||||
VectorFunction* clone() const
|
||||
{return new Function1(*this);}
|
||||
virtual void eval(const Vector& point, const ParameterSignal& sig, Vector& out);
|
||||
};
|
||||
|
||||
void Function1::eval(const Vector& point, const ParameterSignal& sig, Vector& out)
|
||||
{
|
||||
if (point.length() != dim || out.length() != 1) {
|
||||
printf("Wrong length of vectors in Function1::eval\n");
|
||||
exit(1);
|
||||
}
|
||||
double r = 1;
|
||||
for (int i = 0; i < dim; i++)
|
||||
r *= point[i];
|
||||
r = pow(r, 1.0/dim);
|
||||
r *= pow(1.0 + 1.0/dim, (double)dim);
|
||||
out[0] = r;
|
||||
}
|
||||
|
||||
// evaluates Function1 but with transformation x_i=0.5(y_i+1)
|
||||
// this makes the new function integrate over <-1,1>^d to 1.0
|
||||
class Function1Trans : public Function1 {
|
||||
public:
|
||||
Function1Trans(int d)
|
||||
: Function1(d) {}
|
||||
Function1Trans(const Function1Trans& func)
|
||||
: Function1(func) {}
|
||||
VectorFunction* clone() const
|
||||
{return new Function1Trans(*this);}
|
||||
virtual void eval(const Vector& point, const ParameterSignal& sig, Vector& out);
|
||||
};
|
||||
|
||||
void Function1Trans::eval(const Vector& point, const ParameterSignal& sig, Vector& out)
|
||||
{
|
||||
Vector p(point.length());
|
||||
for (int i = 0; i < p.length(); i++)
|
||||
p[i] = 0.5*(point[i]+1);
|
||||
Function1::eval(p, sig, out);
|
||||
out.mult(pow(0.5,indim()));
|
||||
}
|
||||
|
||||
|
||||
// WallTimer class. Constructor saves the wall time, destructor
|
||||
// cancels the current time from the saved, and prints the message
|
||||
// with time information
|
||||
class WallTimer {
|
||||
char mes[100];
|
||||
struct timeval start;
|
||||
bool new_line;
|
||||
public:
|
||||
WallTimer(const char* m, bool nl = true)
|
||||
{strcpy(mes, m);new_line = nl; gettimeofday(&start, NULL);}
|
||||
~WallTimer()
|
||||
{
|
||||
struct timeval end;
|
||||
gettimeofday(&end, NULL);
|
||||
printf("%s%8.4g", mes,
|
||||
end.tv_sec-start.tv_sec + (end.tv_usec-start.tv_usec)*1.0e-6);
|
||||
if (new_line)
|
||||
printf("\n");
|
||||
}
|
||||
};
|
||||
|
||||
/****************************************************/
|
||||
/* declaration of TestRunnable class */
|
||||
/****************************************************/
|
||||
class TestRunnable {
|
||||
char name[100];
|
||||
public:
|
||||
int dim; // dimension of the solved problem
|
||||
int nvar; // number of variable of the solved problem
|
||||
TestRunnable(const char* n, int d, int nv)
|
||||
: dim(d), nvar(nv)
|
||||
{strncpy(name, n, 100);}
|
||||
bool test() const;
|
||||
virtual bool run() const =0;
|
||||
const char* getName() const
|
||||
{return name;}
|
||||
protected:
|
||||
static bool smolyak_normal_moments(const GeneralMatrix& m, int imom, int level);
|
||||
static bool product_normal_moments(const GeneralMatrix& m, int imom, int level);
|
||||
static bool qmc_normal_moments(const GeneralMatrix& m, int imom, int level);
|
||||
static bool smolyak_product_cube(const VectorFunction& func, const Vector& res,
|
||||
double tol, int level);
|
||||
static bool qmc_cube(const VectorFunction& func, double res, double tol, int level);
|
||||
};
|
||||
|
||||
bool TestRunnable::test() const
|
||||
{
|
||||
printf("Running test <%s>\n",name);
|
||||
bool passed;
|
||||
{
|
||||
WallTimer tim("Wall clock time ", false);
|
||||
passed = run();
|
||||
}
|
||||
if (passed) {
|
||||
printf("............................ passed\n\n");
|
||||
return passed;
|
||||
} else {
|
||||
printf("............................ FAILED\n\n");
|
||||
return passed;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/****************************************************/
|
||||
/* definition of TestRunnable static methods */
|
||||
/****************************************************/
|
||||
bool TestRunnable::smolyak_normal_moments(const GeneralMatrix& m, int imom, int level)
|
||||
{
|
||||
// first make m*m' and then Cholesky factor
|
||||
GeneralMatrix mtr(m, "transpose");
|
||||
GeneralMatrix msq(m, mtr);
|
||||
|
||||
// make vector function
|
||||
int dim = m.numRows();
|
||||
TensorPower tp(dim, imom);
|
||||
GaussConverterFunction func(tp, msq);
|
||||
|
||||
// smolyak quadrature
|
||||
Vector smol_out(UFSTensor::calcMaxOffset(dim, imom));
|
||||
{
|
||||
WallTimer tim("\tSmolyak quadrature time: ");
|
||||
GaussHermite gs;
|
||||
SmolyakQuadrature quad(dim, level, gs);
|
||||
quad.integrate(func, level, num_threads, smol_out);
|
||||
printf("\tNumber of Smolyak evaluations: %d\n", quad.numEvals(level));
|
||||
}
|
||||
|
||||
// check against theoretical moments
|
||||
UNormalMoments moments(imom, msq);
|
||||
smol_out.add(-1.0, (moments.get(Symmetry(imom)))->getData());
|
||||
printf("\tError: %16.12g\n", smol_out.getMax());
|
||||
return smol_out.getMax() < 1.e-7;
|
||||
}
|
||||
|
||||
bool TestRunnable::product_normal_moments(const GeneralMatrix& m, int imom, int level)
|
||||
{
|
||||
// first make m*m' and then Cholesky factor
|
||||
GeneralMatrix mtr(m, "transpose");
|
||||
GeneralMatrix msq(m, mtr);
|
||||
|
||||
// make vector function
|
||||
int dim = m.numRows();
|
||||
TensorPower tp(dim, imom);
|
||||
GaussConverterFunction func(tp, msq);
|
||||
|
||||
// product quadrature
|
||||
Vector prod_out(UFSTensor::calcMaxOffset(dim, imom));
|
||||
{
|
||||
WallTimer tim("\tProduct quadrature time: ");
|
||||
GaussHermite gs;
|
||||
ProductQuadrature quad(dim, gs);
|
||||
quad.integrate(func, level, num_threads, prod_out);
|
||||
printf("\tNumber of product evaluations: %d\n", quad.numEvals(level));
|
||||
}
|
||||
|
||||
// check against theoretical moments
|
||||
UNormalMoments moments(imom, msq);
|
||||
prod_out.add(-1.0, (moments.get(Symmetry(imom)))->getData());
|
||||
printf("\tError: %16.12g\n", prod_out.getMax());
|
||||
return prod_out.getMax() < 1.e-7;
|
||||
}
|
||||
|
||||
bool TestRunnable::qmc_normal_moments(const GeneralMatrix& m, int imom, int level)
|
||||
{
|
||||
// first make m*m' and then Cholesky factor
|
||||
GeneralMatrix mtr(m, "transpose");
|
||||
GeneralMatrix msq(m, mtr);
|
||||
GeneralMatrix mchol(msq);
|
||||
int rows = mchol.numRows();
|
||||
for (int i = 0; i < rows; i++)
|
||||
for (int j = i+1; j < rows; j++)
|
||||
mchol.get(i,j) = 0.0;
|
||||
int info;
|
||||
LAPACK_dpotrf("L", &rows, mchol.base(), &rows, &info);
|
||||
|
||||
// make vector function
|
||||
MomentFunction func(mchol, imom);
|
||||
|
||||
// permutation schemes
|
||||
WarnockPerScheme wps;
|
||||
ReversePerScheme rps;
|
||||
IdentityPerScheme ips;
|
||||
PermutationScheme* scheme[] = {&wps, &rps, &ips};
|
||||
const char* labs[] = {"Warnock", "Reverse", "Identity"};
|
||||
|
||||
// theoretical result
|
||||
int dim = mchol.numRows();
|
||||
UNormalMoments moments(imom, msq);
|
||||
Vector res((const Vector&)((moments.get(Symmetry(imom)))->getData()));
|
||||
|
||||
// quasi monte carlo normal quadrature
|
||||
double max_error = 0.0;
|
||||
Vector qmc_out(UFSTensor::calcMaxOffset(dim, imom));
|
||||
for (int i = 0; i < 3; i++) {
|
||||
{
|
||||
char mes[100];
|
||||
sprintf(mes, "\tQMC normal quadrature time %8s: ", labs[i]);
|
||||
WallTimer tim(mes);
|
||||
QMCarloNormalQuadrature quad(dim, level, *(scheme[i]));
|
||||
quad.integrate(func, level, num_threads, qmc_out);
|
||||
}
|
||||
qmc_out.add(-1.0, res);
|
||||
printf("\tError %8s: %16.12g\n", labs[i], qmc_out.getMax());
|
||||
if (qmc_out.getMax() > max_error) {
|
||||
max_error = qmc_out.getMax();
|
||||
}
|
||||
}
|
||||
|
||||
return max_error < 1.e-7;
|
||||
}
|
||||
|
||||
|
||||
bool TestRunnable::smolyak_product_cube(const VectorFunction& func, const Vector& res,
|
||||
double tol, int level)
|
||||
{
|
||||
if (res.length() != func.outdim()) {
|
||||
fprintf(stderr, "Incompatible dimensions of check value and function.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
GaussLegendre glq;
|
||||
Vector out(func.outdim());
|
||||
double smol_error;
|
||||
double prod_error;
|
||||
{
|
||||
WallTimer tim("\tSmolyak quadrature time: ");
|
||||
SmolyakQuadrature quad(func.indim(), level, glq);
|
||||
quad.integrate(func, level, num_threads, out);
|
||||
out.add(-1.0, res);
|
||||
smol_error = out.getMax();
|
||||
printf("\tNumber of Smolyak evaluations: %d\n", quad.numEvals(level));
|
||||
printf("\tError: %16.12g\n", smol_error);
|
||||
}
|
||||
{
|
||||
WallTimer tim("\tProduct quadrature time: ");
|
||||
ProductQuadrature quad(func.indim(), glq);
|
||||
quad.integrate(func, level, num_threads, out);
|
||||
out.add(-1.0, res);
|
||||
prod_error = out.getMax();
|
||||
printf("\tNumber of product evaluations: %d\n", quad.numEvals(level));
|
||||
printf("\tError: %16.12g\n", prod_error);
|
||||
}
|
||||
|
||||
return smol_error < tol && prod_error < tol;
|
||||
}
|
||||
|
||||
bool TestRunnable::qmc_cube(const VectorFunction& func, double res, double tol, int level)
|
||||
{
|
||||
Vector r(1);
|
||||
double error1;
|
||||
{
|
||||
WallTimer tim("\tQuasi-Monte Carlo (Warnock scrambling) time: ");
|
||||
WarnockPerScheme wps;
|
||||
QMCarloCubeQuadrature qmc(func.indim(), level, wps);
|
||||
// qmc.savePoints("warnock.txt", level);
|
||||
qmc.integrate(func, level, num_threads, r);
|
||||
error1 = std::max(res - r[0], r[0] - res);
|
||||
printf("\tQuasi-Monte Carlo (Warnock scrambling) error: %16.12g\n",
|
||||
error1);
|
||||
}
|
||||
double error2;
|
||||
{
|
||||
WallTimer tim("\tQuasi-Monte Carlo (reverse scrambling) time: ");
|
||||
ReversePerScheme rps;
|
||||
QMCarloCubeQuadrature qmc(func.indim(), level, rps);
|
||||
// qmc.savePoints("reverse.txt", level);
|
||||
qmc.integrate(func, level, num_threads, r);
|
||||
error2 = std::max(res - r[0], r[0] - res);
|
||||
printf("\tQuasi-Monte Carlo (reverse scrambling) error: %16.12g\n",
|
||||
error2);
|
||||
}
|
||||
double error3;
|
||||
{
|
||||
WallTimer tim("\tQuasi-Monte Carlo (no scrambling) time: ");
|
||||
IdentityPerScheme ips;
|
||||
QMCarloCubeQuadrature qmc(func.indim(), level, ips);
|
||||
// qmc.savePoints("identity.txt", level);
|
||||
qmc.integrate(func, level, num_threads, r);
|
||||
error3 = std::max(res - r[0], r[0] - res);
|
||||
printf("\tQuasi-Monte Carlo (no scrambling) error: %16.12g\n",
|
||||
error3);
|
||||
}
|
||||
|
||||
|
||||
return error1 < tol && error2 < tol && error3 < tol;
|
||||
}
|
||||
|
||||
/****************************************************/
|
||||
/* definition of TestRunnable subclasses */
|
||||
/****************************************************/
|
||||
class SmolyakNormalMom1 : public TestRunnable {
|
||||
public:
|
||||
SmolyakNormalMom1()
|
||||
: TestRunnable("Smolyak normal moments (dim=2, level=4, order=4)", 4, 2) {}
|
||||
|
||||
bool run() const
|
||||
{
|
||||
GeneralMatrix m(2,2);
|
||||
m.zeros(); m.get(0,0)=1; m.get(1,1)=1;
|
||||
return smolyak_normal_moments(m, 4, 4);
|
||||
}
|
||||
};
|
||||
|
||||
class SmolyakNormalMom2 : public TestRunnable {
|
||||
public:
|
||||
SmolyakNormalMom2()
|
||||
: TestRunnable("Smolyak normal moments (dim=3, level=8, order=8)", 8, 3) {}
|
||||
|
||||
bool run() const
|
||||
{
|
||||
GeneralMatrix m(3,3);
|
||||
m.zeros();
|
||||
m.get(0,0)=1; m.get(0,2)=0.5; m.get(1,1)=1;
|
||||
m.get(1,0)=0.5;m.get(2,2)=2;m.get(2,1)=4;
|
||||
return smolyak_normal_moments(m, 8, 8);
|
||||
}
|
||||
};
|
||||
|
||||
class ProductNormalMom1 : public TestRunnable {
|
||||
public:
|
||||
ProductNormalMom1()
|
||||
: TestRunnable("Product normal moments (dim=2, level=4, order=4)", 4, 2) {}
|
||||
|
||||
bool run() const
|
||||
{
|
||||
GeneralMatrix m(2,2);
|
||||
m.zeros(); m.get(0,0)=1; m.get(1,1)=1;
|
||||
return product_normal_moments(m, 4, 4);
|
||||
}
|
||||
};
|
||||
|
||||
class ProductNormalMom2 : public TestRunnable {
|
||||
public:
|
||||
ProductNormalMom2()
|
||||
: TestRunnable("Product normal moments (dim=3, level=8, order=8)", 8, 3) {}
|
||||
|
||||
bool run() const
|
||||
{
|
||||
GeneralMatrix m(3,3);
|
||||
m.zeros();
|
||||
m.get(0,0)=1; m.get(0,2)=0.5; m.get(1,1)=1;
|
||||
m.get(1,0)=0.5;m.get(2,2)=2;m.get(2,1)=4;
|
||||
return product_normal_moments(m, 8, 8);
|
||||
}
|
||||
};
|
||||
|
||||
class QMCNormalMom1 : public TestRunnable {
|
||||
public:
|
||||
QMCNormalMom1()
|
||||
: TestRunnable("QMC normal moments (dim=2, level=1000, order=4)", 4, 2) {}
|
||||
|
||||
bool run() const
|
||||
{
|
||||
GeneralMatrix m(2,2);
|
||||
m.zeros(); m.get(0,0)=1; m.get(1,1)=1;
|
||||
return qmc_normal_moments(m, 4, 1000);
|
||||
}
|
||||
};
|
||||
|
||||
class QMCNormalMom2 : public TestRunnable {
|
||||
public:
|
||||
QMCNormalMom2()
|
||||
: TestRunnable("QMC normal moments (dim=3, level=10000, order=8)", 8, 3) {}
|
||||
|
||||
bool run() const
|
||||
{
|
||||
GeneralMatrix m(3,3);
|
||||
m.zeros();
|
||||
m.get(0,0)=1; m.get(0,2)=0.5; m.get(1,1)=1;
|
||||
m.get(1,0)=0.5;m.get(2,2)=2;m.get(2,1)=4;
|
||||
return qmc_normal_moments(m, 8, 10000);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
// note that here we pass 1,1 to tls since smolyak has its own PascalTriangle
|
||||
class F1GaussLegendre : public TestRunnable {
|
||||
public:
|
||||
F1GaussLegendre()
|
||||
: TestRunnable("Function1 Gauss-Legendre (dim=6, level=13", 1, 1) {}
|
||||
|
||||
bool run() const
|
||||
{
|
||||
Function1Trans f1(6);
|
||||
Vector res(1); res[0] = 1.0;
|
||||
return smolyak_product_cube(f1, res, 1e-2, 13);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class F1QuasiMCarlo : public TestRunnable {
|
||||
public:
|
||||
F1QuasiMCarlo()
|
||||
: TestRunnable("Function1 Quasi-Monte Carlo (dim=6, level=1000000)", 1, 1) {}
|
||||
|
||||
bool run() const
|
||||
{
|
||||
Function1 f1(6);
|
||||
return qmc_cube(f1, 1.0, 1.e-4, 1000000);
|
||||
}
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
TestRunnable* all_tests[50];
|
||||
// fill in vector of all tests
|
||||
int num_tests = 0;
|
||||
all_tests[num_tests++] = new SmolyakNormalMom1();
|
||||
all_tests[num_tests++] = new SmolyakNormalMom2();
|
||||
all_tests[num_tests++] = new ProductNormalMom1();
|
||||
all_tests[num_tests++] = new ProductNormalMom2();
|
||||
all_tests[num_tests++] = new QMCNormalMom1();
|
||||
all_tests[num_tests++] = new QMCNormalMom2();
|
||||
/*
|
||||
all_tests[num_tests++] = new F1GaussLegendre();
|
||||
all_tests[num_tests++] = new F1QuasiMCarlo();
|
||||
*/
|
||||
// find maximum dimension and maximum nvar
|
||||
int dmax=0;
|
||||
int nvmax = 0;
|
||||
for (int i = 0; i < num_tests; i++) {
|
||||
if (dmax < all_tests[i]->dim)
|
||||
dmax = all_tests[i]->dim;
|
||||
if (nvmax < all_tests[i]->nvar)
|
||||
nvmax = all_tests[i]->nvar;
|
||||
}
|
||||
tls.init(dmax, nvmax); // initialize library
|
||||
THREAD_GROUP::max_parallel_threads = num_threads;
|
||||
|
||||
// launch the tests
|
||||
int success = 0;
|
||||
for (int i = 0; i < num_tests; i++) {
|
||||
try {
|
||||
if (all_tests[i]->test())
|
||||
success++;
|
||||
} catch (const TLException& e) {
|
||||
printf("Caugth TL exception in <%s>:\n", all_tests[i]->getName());
|
||||
e.print();
|
||||
} catch (SylvException& e) {
|
||||
printf("Caught Sylv exception in <%s>:\n", all_tests[i]->getName());
|
||||
e.printMessage();
|
||||
}
|
||||
}
|
||||
|
||||
printf("There were %d tests that failed out of %d tests run.\n",
|
||||
num_tests - success, num_tests);
|
||||
|
||||
// destroy
|
||||
for (int i = 0; i < num_tests; i++) {
|
||||
delete all_tests[i];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
# $Id: Makefile 2344 2009-02-09 20:36:08Z michel $
|
||||
# Copyright 2004, Ondra Kamenik
|
||||
|
||||
include ../Makefile.include
|
||||
|
||||
CC_FLAGS := -I../tl/cc -I../sylv/cc -I../integ/cc $(CC_FLAGS)
|
||||
|
||||
xbsylvcppsource := $(wildcard ../sylv/cc/*.cpp)
|
||||
sylvhsource := $(wildcard ../sylv/cc/*.h)
|
||||
sylvobjects := $(patsubst %.cpp, %.o, $(sylvcppsource))
|
||||
tlcwebsource := $(wildcard ../tl/cc/*.cweb)
|
||||
tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
|
||||
tlhwebsource := $(wildcard ../tl/cc/*.hweb)
|
||||
tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
|
||||
tlobjects := $(patsubst %.cweb,%.o,$(tlcwebsource))
|
||||
integhwebsource := $(wildcard ../integ/cc/*.hweb)
|
||||
integhsource := $(patsubst %.hweb,%.h,$(integhwebsource))
|
||||
cwebsource := $(wildcard *.cweb)
|
||||
cppsource := $(patsubst %.cweb,%.cpp,$(cwebsource))
|
||||
objects := $(patsubst %.cweb,%.o,$(cwebsource))
|
||||
hwebsource := $(wildcard *.hweb)
|
||||
hsource := $(patsubst %.hweb,%.h,$(hwebsource))
|
||||
|
||||
../integ/cc/dummy.ch:
|
||||
make -C ../integ/cc dummy.ch
|
||||
|
||||
../tl/cc/dummy.ch:
|
||||
make -C ../tl/cc dummy.ch
|
||||
|
||||
../tl/cc/%.cpp: ../tl/cc/%.cweb ../tl/cc/dummy.ch
|
||||
make -C ../tl/cc $*.cpp
|
||||
|
||||
../tl/cc/%.h: ../tl/cc/%.hweb ../tl/cc/dummy.ch
|
||||
make -C ../tl/cc $*.h
|
||||
|
||||
../integ/cc/%.h: ../integ/cc/%.hweb ../integ/cc/dummy.ch
|
||||
make -C ../integ/cc $*.h
|
||||
|
||||
../tl/cc/%.o: ../tl/cc/%.cpp $(tlhsource)
|
||||
make -C ../tl/cc $*.o
|
||||
|
||||
../sylv/cc/%.o: ../sylv/cc/%.cpp $(sylvhsource)
|
||||
make -C ../sylv/cc $*.o
|
||||
|
||||
dummy.ch:
|
||||
touch dummy.ch
|
||||
|
||||
%.cpp: %.cweb dummy.ch
|
||||
ctangle -bhp $*.cweb dummy.ch $*.cpp
|
||||
|
||||
%.h: %.hweb dummy.ch
|
||||
ctangle -bhp $*.hweb dummy.ch $*.h
|
||||
|
||||
%.o: %.cpp $(hwebsource) $(hsource) $(tlhwebsource) $(tlhsource) \
|
||||
$(integhwebsource) $(integhsource)
|
||||
$(CC) $(CC_FLAGS) -c $*.cpp
|
||||
|
||||
tests: $(hwebsource) $(cwebsoure) $(hsource) $(cppsource) \
|
||||
$(tlhwebsource) $(tlcwebsoure) $(tlhsource) $(tlcppsource) \
|
||||
$(sylvhsource) $(sylvcppsource) \
|
||||
tests.o $(objects) $(tlobjects) $(sylvobjects)
|
||||
$(CC) $(CC_FLAGS) $(objects) $(tlobjects) $(sylvobjects) tests.o -o tests $(LD_LIBS)
|
||||
|
||||
kord.pdf: doc
|
||||
|
||||
doc: main.web $(hwebsource) $(cwebsource)
|
||||
cweave -bhp main.web
|
||||
pdftex main
|
||||
mv main.pdf kord.pdf
|
||||
|
||||
clear:
|
||||
rm -f $(hsource)
|
||||
rm -f $(cppsource)
|
||||
rm -f *.o
|
||||
rm -f tests
|
||||
make -C ../tl/cc clear
|
||||
make -C ../sylv/cc clear
|
||||
rm -f main.{idx,dvi,pdf,scn,log,tex,toc}
|
|
@ -0,0 +1,421 @@
|
|||
@q $Id: approximation.cweb 2344 2009-02-09 20:36:08Z michel $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@ Start of {\tt approximation.cpp} file.
|
||||
|
||||
@c
|
||||
#include "kord_exception.h"
|
||||
#include "approximation.h"
|
||||
#include "first_order.h"
|
||||
#include "korder_stoch.h"
|
||||
|
||||
@<|ZAuxContainer| constructor code@>;
|
||||
@<|ZAuxContainer::getType| code@>;
|
||||
@<|Approximation| constructor code@>;
|
||||
@<|Approximation| destructor code@>;
|
||||
@<|Approximation::getFoldDecisionRule| code@>;
|
||||
@<|Approximation::getUnfoldDecisionRule| code@>;
|
||||
@<|Approximation::approxAtSteady| code@>;
|
||||
@<|Approximation::walkStochSteady| code@>;
|
||||
@<|Approximation::saveRuleDerivs| code@>;
|
||||
@<|Approximation::calcStochShift| code@>;
|
||||
@<|Approximation::check| code@>;
|
||||
@<|Approximation::calcYCov| code@>;
|
||||
|
||||
@
|
||||
@<|ZAuxContainer| constructor code@>=
|
||||
ZAuxContainer::ZAuxContainer(const _Ctype* gss, int ngss, int ng, int ny, int nu)
|
||||
: StackContainer<FGSTensor>(4,1)
|
||||
{
|
||||
stack_sizes[0] = ngss; stack_sizes[1] = ng;
|
||||
stack_sizes[2] = ny; stack_sizes[3] = nu;
|
||||
conts[0] = gss;
|
||||
calculateOffsets();
|
||||
}
|
||||
|
||||
|
||||
@ The |getType| method corresponds to
|
||||
$f(g^{**}(y^*,u',\sigma),0,0,0)$. For the first argument we return
|
||||
|matrix|, for other three we return |zero|.
|
||||
|
||||
@<|ZAuxContainer::getType| code@>=
|
||||
ZAuxContainer::itype ZAuxContainer::getType(int i, const Symmetry& s) const
|
||||
{
|
||||
if (i == 0)
|
||||
if (s[2] > 0)
|
||||
return zero;
|
||||
else
|
||||
return matrix;
|
||||
return zero;
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<|Approximation| constructor code@>=
|
||||
Approximation::Approximation(DynamicModel& m, Journal& j, int ns, bool dr_centr, double qz_crit)
|
||||
: model(m), journal(j), rule_ders(NULL), rule_ders_ss(NULL), fdr(NULL), udr(NULL),
|
||||
ypart(model.nstat(), model.npred(), model.nboth(), model.nforw()),
|
||||
mom(UNormalMoments(model.order(), model.getVcov())), nvs(4), steps(ns),
|
||||
dr_centralize(dr_centr), qz_criterium(qz_crit), ss(ypart.ny(), steps+1)
|
||||
{
|
||||
nvs[0] = ypart.nys(); nvs[1] = model.nexog();
|
||||
nvs[2] = model.nexog(); nvs[3] = 1;
|
||||
|
||||
ss.nans();
|
||||
}
|
||||
|
||||
@
|
||||
@<|Approximation| destructor code@>=
|
||||
Approximation::~Approximation()
|
||||
{
|
||||
if (rule_ders_ss) delete rule_ders_ss;
|
||||
if (rule_ders) delete rule_ders;
|
||||
if (fdr) delete fdr;
|
||||
if (udr) delete udr;
|
||||
}
|
||||
|
||||
@ This just returns |fdr| with a check that it is created.
|
||||
@<|Approximation::getFoldDecisionRule| code@>=
|
||||
const FoldDecisionRule& Approximation::getFoldDecisionRule() const
|
||||
{
|
||||
KORD_RAISE_IF(fdr == NULL,
|
||||
"Folded decision rule has not been created in Approximation::getFoldDecisionRule");
|
||||
return *fdr;
|
||||
}
|
||||
|
||||
|
||||
@ This just returns |udr| with a check that it is created.
|
||||
@<|Approximation::getUnfoldDecisionRule| code@>=
|
||||
const UnfoldDecisionRule& Approximation::getUnfoldDecisionRule() const
|
||||
{
|
||||
KORD_RAISE_IF(udr == NULL,
|
||||
"Unfolded decision rule has not been created in Approximation::getUnfoldDecisionRule");
|
||||
return *udr;
|
||||
}
|
||||
|
||||
|
||||
@ This methods assumes that the deterministic steady state is
|
||||
|model.getSteady()|. It makes an approximation about it and stores the
|
||||
derivatives to |rule_ders| and |rule_ders_ss|. Also it runs a |check|
|
||||
for $\sigma=0$.
|
||||
|
||||
@<|Approximation::approxAtSteady| code@>=
|
||||
void Approximation::approxAtSteady()
|
||||
{
|
||||
model.calcDerivativesAtSteady();
|
||||
FirstOrder fo(model.nstat(), model.npred(), model.nboth(), model.nforw(),
|
||||
model.nexog(), *(model.getModelDerivatives().get(Symmetry(1))),
|
||||
journal, qz_criterium);
|
||||
KORD_RAISE_IF_X(! fo.isStable(),
|
||||
"The model is not Blanchard-Kahn stable",
|
||||
KORD_MD_NOT_STABLE);
|
||||
|
||||
if (model.order() >= 2) {
|
||||
KOrder korder(model.nstat(), model.npred(), model.nboth(), model.nforw(),
|
||||
model.getModelDerivatives(), fo.getGy(), fo.getGu(),
|
||||
model.getVcov(), journal);
|
||||
korder.switchToFolded();
|
||||
for (int k = 2; k <= model.order(); k++)
|
||||
korder.performStep<KOrder::fold>(k);
|
||||
|
||||
saveRuleDerivs(korder.getFoldDers());
|
||||
} else {
|
||||
FirstOrderDerivs<KOrder::fold> fo_ders(fo);
|
||||
saveRuleDerivs(fo_ders);
|
||||
}
|
||||
check(0.0);
|
||||
}
|
||||
|
||||
@ This is the core routine of |Approximation| class.
|
||||
|
||||
First we solve for the approximation about the deterministic steady
|
||||
state. Then we perform |steps| cycles toward the stochastic steady
|
||||
state. Each cycle moves the size of shocks by |dsigma=1.0/steps|. At
|
||||
the end of a cycle, we have |rule_ders| being the derivatives at
|
||||
stochastic steady state for $\sigma=sigma\_so\_far+dsigma$ and
|
||||
|model.getSteady()| being the steady state.
|
||||
|
||||
If the number of |steps| is zero, the decision rule |dr| at the bottom
|
||||
is created from derivatives about deterministic steady state, with
|
||||
size of $\sigma=1$. Otherwise, the |dr| is created from the
|
||||
approximation about stochastic steady state with $\sigma=0$.
|
||||
|
||||
Within each cycle, we first make a backup of the last steady (from
|
||||
initialization or from a previous cycle), then we calculate the fix
|
||||
point of the last rule with $\sigma=dsigma$. This becomes a new steady
|
||||
state at the $\sigma=sigma\_so\_far+dsigma$. We calculate expectations
|
||||
of $g^{**}(y,\sigma\eta_{t+1},\sigma$ expressed as a Taylor expansion
|
||||
around the new $\sigma$ and the new steady state. Then we solve for
|
||||
the decision rule with explicit $g^{**}$ at $t+1$ and save the rule.
|
||||
|
||||
After we reached $\sigma=1$, the decision rule is formed.
|
||||
|
||||
The biproduct of this method is the matrix |ss|, whose columns are
|
||||
steady states for subsequent $\sigma$s. The first column is the
|
||||
deterministic steady state, the last column is the stochastic steady
|
||||
state for a full size of shocks ($\sigma=1$). There are |steps+1|
|
||||
columns.
|
||||
|
||||
@<|Approximation::walkStochSteady| code@>=
|
||||
void Approximation::walkStochSteady()
|
||||
{
|
||||
@<initial approximation at deterministic steady@>;
|
||||
double sigma_so_far = 0.0;
|
||||
double dsigma = (steps == 0)? 0.0 : 1.0/steps;
|
||||
for (int i = 1; i <= steps; i++) {
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Approximation about stochastic steady for sigma=" << sigma_so_far+dsigma << endrec;
|
||||
|
||||
Vector last_steady((const Vector&)model.getSteady());
|
||||
|
||||
@<calculate fix-point of the last rule for |dsigma|@>;
|
||||
@<calculate |hh| as expectations of the last $g^{**}$@>;
|
||||
@<form |KOrderStoch|, solve and save@>;
|
||||
|
||||
check(sigma_so_far+dsigma);
|
||||
sigma_so_far += dsigma;
|
||||
}
|
||||
|
||||
@<construct the resulting decision rules@>;
|
||||
}
|
||||
|
||||
@ Here we solve for the deterministic steady state, calculate
|
||||
approximation at the deterministic steady and save the steady state
|
||||
to |ss|.
|
||||
|
||||
@<initial approximation at deterministic steady@>=
|
||||
model.solveDeterministicSteady();
|
||||
approxAtSteady();
|
||||
Vector steady0(ss, 0);
|
||||
steady0 = (const Vector&)model.getSteady();
|
||||
|
||||
@ We form the |DRFixPoint| object from the last rule with
|
||||
$\sigma=dsigma$. Then we save the steady state to |ss|. The new steady
|
||||
is also put to |model.getSteady()|.
|
||||
|
||||
@<calculate fix-point of the last rule for |dsigma|@>=
|
||||
DRFixPoint<KOrder::fold> fp(*rule_ders, ypart, model.getSteady(), dsigma);
|
||||
bool converged = fp.calcFixPoint(DecisionRule::horner, model.getSteady());
|
||||
JournalRecord rec(journal);
|
||||
rec << "Fix point calcs: iter=" << fp.getNumIter() << ", newton_iter="
|
||||
<< fp.getNewtonTotalIter() << ", last_newton_iter=" << fp.getNewtonLastIter() << ".";
|
||||
if (converged)
|
||||
rec << " Converged." << endrec;
|
||||
else {
|
||||
rec << " Not converged!!" << endrec;
|
||||
KORD_RAISE_X("Fix point calculation not converged", KORD_FP_NOT_CONV);
|
||||
}
|
||||
Vector steadyi(ss, i);
|
||||
steadyi = (const Vector&)model.getSteady();
|
||||
|
||||
@ We form the steady state shift |dy|, which is the new steady state
|
||||
minus the old steady state. Then we create |StochForwardDerivs|
|
||||
object, which calculates the derivatives of $g^{**}$ expectations at
|
||||
new sigma and new steady.
|
||||
|
||||
@<calculate |hh| as expectations of the last $g^{**}$@>=
|
||||
Vector dy((const Vector&)model.getSteady());
|
||||
dy.add(-1.0, last_steady);
|
||||
|
||||
StochForwardDerivs<KOrder::fold> hh(ypart, model.nexog(), *rule_ders_ss, mom, dy,
|
||||
dsigma, sigma_so_far);
|
||||
JournalRecord rec1(journal);
|
||||
rec1 << "Calculation of g** expectations done" << endrec;
|
||||
|
||||
|
||||
@ We calculate derivatives of the model at the new steady, form
|
||||
|KOrderStoch| object and solve, and save the rule.
|
||||
|
||||
@<form |KOrderStoch|, solve and save@>=
|
||||
model.calcDerivativesAtSteady();
|
||||
KOrderStoch korder_stoch(ypart, model.nexog(), model.getModelDerivatives(),
|
||||
hh, journal);
|
||||
for (int d = 1; d <= model.order(); d++) {
|
||||
korder_stoch.performStep<KOrder::fold>(d);
|
||||
}
|
||||
saveRuleDerivs(korder_stoch.getFoldDers());
|
||||
|
||||
|
||||
@
|
||||
@<construct the resulting decision rules@>=
|
||||
if (fdr) {
|
||||
delete fdr;
|
||||
fdr = NULL;
|
||||
}
|
||||
if (udr) {
|
||||
delete udr;
|
||||
udr = NULL;
|
||||
}
|
||||
|
||||
fdr = new FoldDecisionRule(*rule_ders, ypart, model.nexog(),
|
||||
model.getSteady(), 1.0-sigma_so_far);
|
||||
if (steps == 0 && dr_centralize) {
|
||||
@<centralize decision rule for zero steps@>;
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<centralize decision rule for zero steps@>=
|
||||
DRFixPoint<KOrder::fold> fp(*rule_ders, ypart, model.getSteady(), 1.0);
|
||||
bool converged = fp.calcFixPoint(DecisionRule::horner, model.getSteady());
|
||||
JournalRecord rec(journal);
|
||||
rec << "Fix point calcs: iter=" << fp.getNumIter() << ", newton_iter="
|
||||
<< fp.getNewtonTotalIter() << ", last_newton_iter=" << fp.getNewtonLastIter() << ".";
|
||||
if (converged)
|
||||
rec << " Converged." << endrec;
|
||||
else {
|
||||
rec << " Not converged!!" << endrec;
|
||||
KORD_RAISE_X("Fix point calculation not converged", KORD_FP_NOT_CONV);
|
||||
}
|
||||
|
||||
{
|
||||
JournalRecordPair recp(journal);
|
||||
recp << "Centralizing about fix-point." << endrec;
|
||||
FoldDecisionRule* dr_backup = fdr;
|
||||
fdr = new FoldDecisionRule(*dr_backup, model.getSteady());
|
||||
delete dr_backup;
|
||||
}
|
||||
|
||||
|
||||
@ Here we simply make a new hardcopy of the given rule |rule_ders|,
|
||||
and make a new container of in-place subtensors of the derivatives
|
||||
corresponding to forward looking variables. The given container comes
|
||||
from a temporary object and will be destroyed.
|
||||
|
||||
@<|Approximation::saveRuleDerivs| code@>=
|
||||
void Approximation::saveRuleDerivs(const FGSContainer& g)
|
||||
{
|
||||
if (rule_ders) {
|
||||
delete rule_ders;
|
||||
delete rule_ders_ss;
|
||||
}
|
||||
rule_ders = new FGSContainer(g);
|
||||
rule_ders_ss = new FGSContainer(4);
|
||||
for (FGSContainer::iterator run = (*rule_ders).begin(); run != (*rule_ders).end(); ++run) {
|
||||
FGSTensor* ten = new FGSTensor(ypart.nstat+ypart.npred, ypart.nyss(), *((*run).second));
|
||||
rule_ders_ss->insert(ten);
|
||||
}
|
||||
}
|
||||
|
||||
@ This method calculates a shift of the system equations due to
|
||||
integrating shocks at a given $\sigma$ and current steady state. More precisely, if
|
||||
$$F(y,u,u',\sigma)=f(g^{**}(g^*(y,u,\sigma),u',\sigma),g(y,u,\sigma),y,u)$$
|
||||
then the method returns a vector
|
||||
$$\sum_{d=1}{1\over d!}\sigma^d\left[F_{u'^d}\right]_{\alpha_1\ldots\alpha_d}
|
||||
\Sigma^{\alpha_1\ldots\alpha_d}$$
|
||||
|
||||
For a calculation of $\left[F_{u'^d}\right]$ we use |@<|ZAuxContainer|
|
||||
class declaration@>|, so we create its object. In each cycle we
|
||||
calculate $\left[F_{u'^d}\right]$@q'@>, and then multiply with the shocks,
|
||||
and add the ${\sigma^d\over d!}$ multiple to the result.
|
||||
|
||||
@<|Approximation::calcStochShift| code@>=
|
||||
void Approximation::calcStochShift(Vector& out, double at_sigma) const
|
||||
{
|
||||
KORD_RAISE_IF(out.length() != ypart.ny(),
|
||||
"Wrong length of output vector for Approximation::calcStochShift");
|
||||
out.zeros();
|
||||
|
||||
ZAuxContainer zaux(rule_ders_ss, ypart.nyss(), ypart.ny(),
|
||||
ypart.nys(), model.nexog());
|
||||
|
||||
int dfac = 1;
|
||||
for (int d = 1; d <= rule_ders->getMaxDim(); d++, dfac*=d) {
|
||||
if ( KOrder::is_even(d)) {
|
||||
Symmetry sym(0,d,0,0);
|
||||
@<calculate $F_{u'^d}$ via |ZAuxContainer|@>;@q'@>
|
||||
@<multiply with shocks and add to result@>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<calculate $F_{u'^d}$ via |ZAuxContainer|@>=
|
||||
FGSTensor* ten = new FGSTensor(ypart.ny(), TensorDimens(sym, nvs));
|
||||
ten->zeros();
|
||||
for (int l = 1; l <= d; l++) {
|
||||
const FSSparseTensor* f = model.getModelDerivatives().get(Symmetry(l));
|
||||
zaux.multAndAdd(*f, *ten);
|
||||
}
|
||||
|
||||
@
|
||||
@<multiply with shocks and add to result@>=
|
||||
FGSTensor* tmp = new FGSTensor(ypart.ny(), TensorDimens(Symmetry(0,0,0,0), nvs));
|
||||
tmp->zeros();
|
||||
ten->contractAndAdd(1, *tmp, *(mom.get(Symmetry(d))));
|
||||
|
||||
out.add(pow(at_sigma,d)/dfac, tmp->getData());
|
||||
delete ten;
|
||||
delete tmp;
|
||||
|
||||
|
||||
@ This method calculates and reports
|
||||
$$f(\bar y)+\sum_{d=1}{1\over d!}\sigma^d\left[F_{u'^d}\right]_{\alpha_1\ldots\alpha_d}
|
||||
\Sigma^{\alpha_1\ldots\alpha_d}$$
|
||||
at $\bar y$, zero shocks and $\sigma$. This number should be zero.
|
||||
|
||||
We evaluate the error both at a given $\sigma$ and $\sigma=1.0$.
|
||||
|
||||
@<|Approximation::check| code@>=
|
||||
void Approximation::check(double at_sigma) const
|
||||
{
|
||||
Vector stoch_shift(ypart.ny());
|
||||
Vector system_resid(ypart.ny());
|
||||
Vector xx(model.nexog());
|
||||
xx.zeros();
|
||||
model.evaluateSystem(system_resid, model.getSteady(), xx);
|
||||
calcStochShift(stoch_shift, at_sigma);
|
||||
stoch_shift.add(1.0, system_resid);
|
||||
JournalRecord rec1(journal);
|
||||
rec1 << "Error of current approximation for shocks at sigma " << at_sigma
|
||||
<< " is " << stoch_shift.getMax() << endrec;
|
||||
calcStochShift(stoch_shift, 1.0);
|
||||
stoch_shift.add(1.0, system_resid);
|
||||
JournalRecord rec2(journal);
|
||||
rec2 << "Error of current approximation for full shocks is " << stoch_shift.getMax() << endrec;
|
||||
}
|
||||
|
||||
@ The method returns unconditional variance of endogenous variables
|
||||
based on the first order. The first order approximation looks like
|
||||
$$\hat y_t=g_{y^*}\hat y^*_{t-1}+g_uu_t$$
|
||||
where $\hat y$ denotes a deviation from the steady state. It can be written as
|
||||
$$\hat y_t=\left[0\, g_{y^*}\, 0\right]\hat y_{t-1}+g_uu_t$$
|
||||
which yields unconditional covariance $V$ for which
|
||||
$$V=GVG^T + g_u\Sigma g_u^T,$$
|
||||
where $G=[0\, g_{y^*}\, 0]$ and $\Sigma$ is the covariance of the shocks.
|
||||
|
||||
For solving this Lyapunov equation we use the Sylvester module, which
|
||||
solves equation of the type
|
||||
$$AX+BX(C\otimes\cdots\otimes C)=D$$
|
||||
So we invoke the Sylvester solver for the first dimension with $A=I$,
|
||||
$B=-G$, $C=G^T$ and $D=g_u\Sigma g_u^T$.
|
||||
|
||||
|
||||
@<|Approximation::calcYCov| code@>=
|
||||
TwoDMatrix* Approximation::calcYCov() const
|
||||
{
|
||||
const TwoDMatrix& gy = *(rule_ders->get(Symmetry(1,0,0,0)));
|
||||
const TwoDMatrix& gu = *(rule_ders->get(Symmetry(0,1,0,0)));
|
||||
TwoDMatrix G(model.numeq(), model.numeq());
|
||||
G.zeros();
|
||||
G.place(gy, 0, model.nstat());
|
||||
TwoDMatrix B((const TwoDMatrix&)G);
|
||||
B.mult(-1.0);
|
||||
TwoDMatrix C(G, "transpose");
|
||||
TwoDMatrix A(model.numeq(), model.numeq());
|
||||
A.zeros();
|
||||
for (int i = 0; i < model.numeq(); i++)
|
||||
A.get( i,i) = 1.0;
|
||||
|
||||
TwoDMatrix guSigma(gu, model.getVcov());
|
||||
TwoDMatrix guTrans(gu, "transpose");
|
||||
TwoDMatrix* X = new TwoDMatrix(guSigma, guTrans);
|
||||
|
||||
GeneralSylvester gs(1, model.numeq(), model.numeq(), 0,
|
||||
A.base(), B.base(), C.base(), X->base());
|
||||
gs.solve();
|
||||
|
||||
return X;
|
||||
}
|
||||
|
||||
@ End of {\tt approximation.cpp} file.
|
|
@ -0,0 +1,157 @@
|
|||
@q $Id: approximation.hweb 2352 2009-09-03 19:18:15Z michel $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@*2 Approximating model solution. Start of {\tt approximation.h} file.
|
||||
|
||||
The class |Approximation| in this file is a main interface to the
|
||||
algorithms calculating approximations to the decision rule about
|
||||
deterministic and stochastic steady states.
|
||||
|
||||
The approximation about a deterministic steady state is solved by
|
||||
classes |@<|FirstOrder| class declaration@>| and |@<|KOrder| class
|
||||
declaration@>|. The approximation about the stochastic steady state is
|
||||
solved by class |@<|KOrderStoch| class declaration@>| together with a
|
||||
method of |Approximation| class |@<|Approximation::walkStochSteady|
|
||||
code@>|.
|
||||
|
||||
The approximation about the stochastic steady state is done with
|
||||
explicit expression of forward derivatives of $g^{**}$. More formally,
|
||||
we have to solve the decision rule $g$ from the implicit system:
|
||||
$$E_t(f(g^{**}(g^*(y^*,u_t,\sigma),u_{t+1},\sigma),g(y^*,u_t,\sigma),y_t,u_t))=0$$
|
||||
The term within the expectations can be Taylor expanded, and the
|
||||
expectation can be driven into the formula. However, when doing this
|
||||
at $\sigma\not=0$, the term $g^{**}$ at $\sigma\not=0$ is dependent on
|
||||
$u_{t+1}$ and thus the integral of its approximation includes all
|
||||
derivatives wrt. $u$ of $g^{**}$. Note that for $\sigma=0$, the
|
||||
derivatives of $g^{**}$ in this context are constant. This is the main
|
||||
difference between the approximation at deterministic steady
|
||||
($\sigma=0$), and stochastic steady ($\sigma\not=0$). This means that
|
||||
$k$-order derivative of the above equation at $\sigma\not=0$ depends of
|
||||
all derivatives of $g^**$ (including those with order greater than
|
||||
$k$).
|
||||
|
||||
The explicit expression of the forward $g^{**}$ means that the
|
||||
derivatives of $g$ are not solved simultaneously, but that the forward
|
||||
derivatives of $g^{**}$ are calculated as an extrapolation based on
|
||||
the approximation at lower $\sigma$. This is exactly what does the
|
||||
|@<|Approximation::walkStochSteady| code@>|. It starts at the
|
||||
deterministic steady state, and in a few steps it adds to $\sigma$
|
||||
explicitly expressing forward $g^{**}$ from a previous step.
|
||||
|
||||
Further details on the both solution methods are given in (todo: put
|
||||
references here when they exist).
|
||||
|
||||
Very important note: all classes here used for calculation of decision
|
||||
rule approximation are folded. For the time being, it seems that faa
|
||||
Di Bruno formula is quicker for folded tensors, and that is why we
|
||||
stick to folded tensors here. However, when the calcs are done, we
|
||||
calculate also its unfolded versions, to be available for simulations
|
||||
and so on.
|
||||
|
||||
@s ZAuxContainer int
|
||||
@s Approximation int
|
||||
@c
|
||||
#ifndef APPROXIMATION_H
|
||||
#define APPROXIMATION_H
|
||||
|
||||
#include "dynamic_model.h"
|
||||
#include "decision_rule.h"
|
||||
#include "korder.h"
|
||||
#include "journal.h"
|
||||
|
||||
@<|ZAuxContainer| class declaration@>;
|
||||
@<|Approximation| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@ This class is used to calculate derivatives by faa Di Bruno of the
|
||||
$$f(g^{**}(g^*(y^*,u,\sigma),u',\sigma),g(y^*,u,\sigma),y^*,u)$$ with
|
||||
respect $u'$. In order to keep it as simple as possible, the class
|
||||
represents an equivalent (with respect to $u'$) container for
|
||||
$f(g^{**}(y^*,u',\sigma),0,0,0)$. The class is used only for
|
||||
evaluation of approximation error in |Approximation| class, which is
|
||||
calculated in |Approximation::calcStochShift| method.
|
||||
|
||||
Since it is a folded version, we inherit from
|
||||
|StackContainer<FGSTensor>| and |FoldedStackContainer|. To construct
|
||||
it, we need only the $g^{**}$ container and size of stacks.
|
||||
|
||||
@<|ZAuxContainer| class declaration@>=
|
||||
class ZAuxContainer : public StackContainer<FGSTensor>, public FoldedStackContainer {
|
||||
public:@;
|
||||
typedef StackContainer<FGSTensor>::_Ctype _Ctype;
|
||||
typedef StackContainer<FGSTensor>::itype itype;
|
||||
ZAuxContainer(const _Ctype* gss, int ngss, int ng, int ny, int nu);
|
||||
itype getType(int i, const Symmetry& s) const;
|
||||
};
|
||||
|
||||
|
||||
|
||||
@ This class provides an interface to approximation algorithms. The
|
||||
core method is |walkStochSteady| which calculates the approximation
|
||||
about stochastic steady state in a given number of steps. The number
|
||||
is given as a parameter |ns| of the constructor. If the number is
|
||||
equal to zero, the resulted approximation is about the deterministic
|
||||
steady state.
|
||||
|
||||
An object is constructed from the |DynamicModel|, and the number of
|
||||
steps |ns|. Also, we pass a reference to journal. That's all. The
|
||||
result of the core method |walkStochSteady| is a decision rule |dr|
|
||||
and a matrix |ss| whose columns are steady states for increasing
|
||||
$\sigma$ during the walk. Both can be retrived by public methods. The
|
||||
first column of the matrix is the deterministic steady state, the last
|
||||
is the stochastic steady state for the full size shocks.
|
||||
|
||||
The method |walkStochSteady| calls the following methods:
|
||||
|approxAtSteady| calculates an initial approximation about the
|
||||
deterministic steady, |saveRuleDerivs| saves derivatives of a rule for
|
||||
the following step in |rule_ders| and |rule_ders_ss| (see
|
||||
|@<|Approximation::saveRuleDerivs| code@>| for their description),
|
||||
|check| reports an error of the current approximation and
|
||||
|calcStochShift| (called from |check|) calculates a shift of the
|
||||
system equations due to uncertainity.
|
||||
|
||||
dr_centralize is a new option. dynare++ was automatically expressing
|
||||
results around the fixed point instead of the deterministic steady
|
||||
state. dr_centralize controls this behavior.
|
||||
|
||||
|
||||
@<|Approximation| class declaration@>=
|
||||
class Approximation {
|
||||
DynamicModel& model;
|
||||
Journal& journal;
|
||||
FGSContainer* rule_ders;
|
||||
FGSContainer* rule_ders_ss;
|
||||
FoldDecisionRule* fdr;
|
||||
UnfoldDecisionRule* udr;
|
||||
const PartitionY ypart;
|
||||
const FNormalMoments mom;
|
||||
IntSequence nvs;
|
||||
int steps;
|
||||
bool dr_centralize;
|
||||
double qz_criterium;
|
||||
TwoDMatrix ss;
|
||||
public:@;
|
||||
Approximation(DynamicModel& m, Journal& j, int ns, bool dr_centr, double qz_crit);
|
||||
virtual ~Approximation();
|
||||
|
||||
const FoldDecisionRule& getFoldDecisionRule() const;
|
||||
const UnfoldDecisionRule& getUnfoldDecisionRule() const;
|
||||
const TwoDMatrix& getSS() const
|
||||
{@+ return ss;@+}
|
||||
const DynamicModel& getModel() const
|
||||
{@+ return model;@+}
|
||||
|
||||
void walkStochSteady();
|
||||
TwoDMatrix* calcYCov() const;
|
||||
protected:@;
|
||||
void approxAtSteady();
|
||||
void calcStochShift(Vector& out, double at_sigma) const;
|
||||
void saveRuleDerivs(const FGSContainer& g);
|
||||
void check(double at_sigma) const;
|
||||
};
|
||||
|
||||
|
||||
@ End of {\tt approximation.h} file.
|
||||
|
|
@ -0,0 +1,690 @@
|
|||
@q $Id: decision_rule.cweb 1896 2008-06-24 04:01:05Z kamenik $ @>
|
||||
@q Copyright 2004, Ondra Kamenik @>
|
||||
|
||||
@ Start of {\tt decision\_rule.cpp} file.
|
||||
@c
|
||||
|
||||
#include "kord_exception.h"
|
||||
#include "decision_rule.h"
|
||||
#include "dynamic_model.h"
|
||||
|
||||
#include "SymSchurDecomp.h"
|
||||
#include "cpplapack.h"
|
||||
|
||||
#include <limits>
|
||||
|
||||
template <>
|
||||
int DRFixPoint<KOrder::fold>::max_iter = 10000;
|
||||
template <>
|
||||
int DRFixPoint<KOrder::unfold>::max_iter = 10000;
|
||||
template <>
|
||||
double DRFixPoint<KOrder::fold>::tol = 1.e-10;
|
||||
template <>
|
||||
double DRFixPoint<KOrder::unfold>::tol = 1.e-10;
|
||||
template <>
|
||||
int DRFixPoint<KOrder::fold>::max_newton_iter = 50;
|
||||
template <>
|
||||
int DRFixPoint<KOrder::unfold>::max_newton_iter = 50;
|
||||
template <>
|
||||
int DRFixPoint<KOrder::fold>::newton_pause = 100;
|
||||
template <>
|
||||
int DRFixPoint<KOrder::unfold>::newton_pause = 100;
|
||||
@#
|
||||
@<|FoldDecisionRule| conversion from |UnfoldDecisionRule|@>;
|
||||
@<|UnfoldDecisionRule| conversion from |FoldDecisionRule|@>;
|
||||
@<|SimResults| destructor@>;
|
||||
@<|SimResults::simulate| code1@>;
|
||||
@<|SimResults::simulate| code2@>;
|
||||
@<|SimResults::addDataSet| code@>;
|
||||
@<|SimResults::writeMat4| code1@>;
|
||||
@<|SimResults::writeMat4| code2@>;
|
||||
@<|SimResultsStats::simulate| code@>;
|
||||
@<|SimResultsStats::writeMat4| code@>;
|
||||
@<|SimResultsStats::calcMean| code@>;
|
||||
@<|SimResultsStats::calcVcov| code@>;
|
||||
@<|SimResultsDynamicStats::simulate| code@>;
|
||||
@<|SimResultsDynamicStats::writeMat4| code@>;
|
||||
@<|SimResultsDynamicStats::calcMean| code@>;
|
||||
@<|SimResultsDynamicStats::calcVariance| code@>;
|
||||
@<|SimResultsIRF::simulate| code1@>;
|
||||
@<|SimResultsIRF::simulate| code2@>;
|
||||
@<|SimResultsIRF::calcMeans| code@>;
|
||||
@<|SimResultsIRF::calcVariances| code@>;
|
||||
@<|SimResultsIRF::writeMat4| code@>;
|
||||
@<|RTSimResultsStats::simulate| code1@>;
|
||||
@<|RTSimResultsStats::simulate| code2@>;
|
||||
@<|RTSimResultsStats::writeMat4| code@>;
|
||||
@<|IRFResults| constructor@>;
|
||||
@<|IRFResults| destructor@>;
|
||||
@<|IRFResults::writeMat4| code@>;
|
||||
@<|SimulationWorker::operator()()| code@>;
|
||||
@<|SimulationIRFWorker::operator()()| code@>;
|
||||
@<|RTSimulationWorker::operator()()| code@>;
|
||||
@<|RandomShockRealization::choleskyFactor| code@>;
|
||||
@<|RandomShockRealization::schurFactor| code@>;
|
||||
@<|RandomShockRealization::get| code@>;
|
||||
@<|ExplicitShockRealization| constructor code@>;
|
||||
@<|ExplicitShockRealization::get| code@>;
|
||||
@<|ExplicitShockRealization::addToShock| code@>;
|
||||
@<|GenShockRealization::get| code@>;
|
||||
|
||||
@
|
||||
@<|FoldDecisionRule| conversion from |UnfoldDecisionRule|@>=
|
||||
FoldDecisionRule::FoldDecisionRule(const UnfoldDecisionRule& udr)
|
||||
: DecisionRuleImpl<KOrder::fold>(ctraits<KOrder::fold>::Tpol(udr.nrows(), udr.nvars()),
|
||||
udr.ypart, udr.nu, udr.ysteady)
|
||||
{
|
||||
for (ctraits<KOrder::unfold>::Tpol::const_iterator it = udr.begin();
|
||||
it != udr.end(); ++it) {
|
||||
insert(new ctraits<KOrder::fold>::Ttensym(*((*it).second)));
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|UnfoldDecisionRule| conversion from |FoldDecisionRule|@>=
|
||||
UnfoldDecisionRule::UnfoldDecisionRule(const FoldDecisionRule& fdr)
|
||||
: DecisionRuleImpl<KOrder::unfold>(ctraits<KOrder::unfold>::Tpol(fdr.nrows(), fdr.nvars()),
|
||||
fdr.ypart, fdr.nu, fdr.ysteady)
|
||||
{
|
||||
for (ctraits<KOrder::fold>::Tpol::const_iterator it = fdr.begin();
|
||||
it != fdr.end(); ++it) {
|
||||
insert(new ctraits<KOrder::unfold>::Ttensym(*((*it).second)));
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResults| destructor@>=
|
||||
SimResults::~SimResults()
|
||||
{
|
||||
for (int i = 0; i < getNumSets(); i++) {
|
||||
delete data[i];
|
||||
delete shocks[i];
|
||||
}
|
||||
}
|
||||
|
||||
@ This runs simulations with an output to journal file. Note that we
|
||||
report how many simulations had to be thrown out due to Nan or Inf.
|
||||
|
||||
@<|SimResults::simulate| code1@>=
|
||||
void SimResults::simulate(int num_sim, const DecisionRule& dr, const Vector& start,
|
||||
const TwoDMatrix& vcov, Journal& journal)
|
||||
{
|
||||
JournalRecordPair paa(journal);
|
||||
paa << "Performing " << num_sim << " stochastic simulations for "
|
||||
<< num_per << " periods" << endrec;
|
||||
simulate(num_sim, dr, start, vcov);
|
||||
int thrown = num_sim - data.size();
|
||||
if (thrown > 0) {
|
||||
JournalRecord rec(journal);
|
||||
rec << "I had to throw " << thrown << " simulations away due to Nan or Inf" << endrec;
|
||||
}
|
||||
}
|
||||
|
||||
@ This runs a given number of simulations by creating
|
||||
|SimulationWorker| for each simulation and inserting them to the
|
||||
thread group.
|
||||
|
||||
@<|SimResults::simulate| code2@>=
|
||||
void SimResults::simulate(int num_sim, const DecisionRule& dr, const Vector& start,
|
||||
const TwoDMatrix& vcov)
|
||||
{
|
||||
std::vector<RandomShockRealization> rsrs;
|
||||
rsrs.reserve(num_sim);
|
||||
|
||||
THREAD_GROUP gr;
|
||||
for (int i = 0; i < num_sim; i++) {
|
||||
RandomShockRealization sr(vcov, system_random_generator.int_uniform());
|
||||
rsrs.push_back(sr);
|
||||
THREAD* worker = new
|
||||
SimulationWorker(*this, dr, DecisionRule::horner,
|
||||
num_per, start, rsrs.back());
|
||||
gr.insert(worker);
|
||||
}
|
||||
gr.run();
|
||||
}
|
||||
|
||||
@ This adds the data with the realized shocks. If the data is not
|
||||
finite, the both data and shocks are thrown away.
|
||||
|
||||
@<|SimResults::addDataSet| code@>=
|
||||
bool SimResults::addDataSet(TwoDMatrix* d, ExplicitShockRealization* sr)
|
||||
{
|
||||
KORD_RAISE_IF(d->nrows() != num_y,
|
||||
"Incompatible number of rows for SimResults::addDataSets");
|
||||
KORD_RAISE_IF(d->ncols() != num_per,
|
||||
"Incompatible number of cols for SimResults::addDataSets");
|
||||
if (d->isFinite()) {
|
||||
data.push_back(d);
|
||||
shocks.push_back(sr);
|
||||
return true;
|
||||
} else {
|
||||
delete d;
|
||||
delete sr;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResults::writeMat4| code1@>=
|
||||
void SimResults::writeMat4(const char* base, const char* lname) const
|
||||
{
|
||||
char matfile_name[100];
|
||||
sprintf(matfile_name, "%s.mat", base);
|
||||
FILE* out;
|
||||
if (NULL != (out=fopen(matfile_name, "wb"))) {
|
||||
writeMat4(out, lname);
|
||||
fclose(out);
|
||||
}
|
||||
}
|
||||
|
||||
@ This save the results as matrices with given prefix and with index
|
||||
appended. If there is only one matrix, the index is not appended.
|
||||
|
||||
@<|SimResults::writeMat4| code2@>=
|
||||
void SimResults::writeMat4(FILE* fd, const char* lname) const
|
||||
{
|
||||
char tmp[100];
|
||||
for (int i = 0; i < getNumSets(); i++) {
|
||||
if (getNumSets() > 1)
|
||||
sprintf(tmp, "%s_data%d", lname, i+1);
|
||||
else
|
||||
sprintf(tmp, "%s_data", lname);
|
||||
ConstTwoDMatrix m(*(data[i]));
|
||||
m.writeMat4(fd, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResultsStats::simulate| code@>=
|
||||
void SimResultsStats::simulate(int num_sim, const DecisionRule& dr,
|
||||
const Vector& start,
|
||||
const TwoDMatrix& vcov, Journal& journal)
|
||||
{
|
||||
SimResults::simulate(num_sim, dr, start, vcov, journal);
|
||||
{
|
||||
JournalRecordPair paa(journal);
|
||||
paa << "Calculating means from the simulations." << endrec;
|
||||
calcMean();
|
||||
}
|
||||
{
|
||||
JournalRecordPair paa(journal);
|
||||
paa << "Calculating covariances from the simulations." << endrec;
|
||||
calcVcov();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ Here we do not save the data itself, we save only mean and vcov.
|
||||
@<|SimResultsStats::writeMat4| code@>=
|
||||
void SimResultsStats::writeMat4(FILE* fd, const char* lname) const
|
||||
{
|
||||
char tmp[100];
|
||||
sprintf(tmp, "%s_mean", lname);
|
||||
ConstTwoDMatrix m(num_y, 1, mean.base());
|
||||
m.writeMat4(fd, tmp);
|
||||
sprintf(tmp, "%s_vcov", lname);
|
||||
ConstTwoDMatrix(vcov).writeMat4(fd, tmp);
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResultsStats::calcMean| code@>=
|
||||
void SimResultsStats::calcMean()
|
||||
{
|
||||
mean.zeros();
|
||||
if (data.size()*num_per > 0) {
|
||||
double mult = 1.0/data.size()/num_per;
|
||||
for (unsigned int i = 0; i < data.size(); i++) {
|
||||
for (int j = 0; j < num_per; j++) {
|
||||
ConstVector col(*data[i], j);
|
||||
mean.add(mult, col);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResultsStats::calcVcov| code@>=
|
||||
void SimResultsStats::calcVcov()
|
||||
{
|
||||
if (data.size()*num_per > 1) {
|
||||
vcov.zeros();
|
||||
double mult = 1.0/(data.size()*num_per - 1);
|
||||
for (unsigned int i = 0; i < data.size(); i++) {
|
||||
const TwoDMatrix& d = *(data[i]);
|
||||
for (int j = 0; j < num_per; j++) {
|
||||
for (int m = 0; m < num_y; m++) {
|
||||
for (int n = m; n < num_y; n++) {
|
||||
double s = (d.get(m,j)-mean[m])*(d.get(n,j)-mean[n]);
|
||||
vcov.get(m,n) += mult*s;
|
||||
if (m != n)
|
||||
vcov.get(n,m) += mult*s;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
vcov.infs();
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResultsDynamicStats::simulate| code@>=
|
||||
void SimResultsDynamicStats::simulate(int num_sim, const DecisionRule& dr,
|
||||
const Vector& start,
|
||||
const TwoDMatrix& vcov, Journal& journal)
|
||||
{
|
||||
SimResults::simulate(num_sim, dr, start, vcov, journal);
|
||||
{
|
||||
JournalRecordPair paa(journal);
|
||||
paa << "Calculating means of the conditional simulations." << endrec;
|
||||
calcMean();
|
||||
}
|
||||
{
|
||||
JournalRecordPair paa(journal);
|
||||
paa << "Calculating variances of the conditional simulations." << endrec;
|
||||
calcVariance();
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResultsDynamicStats::writeMat4| code@>=
|
||||
void SimResultsDynamicStats::writeMat4(FILE* fd, const char* lname) const
|
||||
{
|
||||
char tmp[100];
|
||||
sprintf(tmp, "%s_cond_mean", lname);
|
||||
ConstTwoDMatrix(mean).writeMat4(fd, tmp);
|
||||
sprintf(tmp, "%s_cond_variance", lname);
|
||||
ConstTwoDMatrix(variance).writeMat4(fd, tmp);
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResultsDynamicStats::calcMean| code@>=
|
||||
void SimResultsDynamicStats::calcMean()
|
||||
{
|
||||
mean.zeros();
|
||||
if (data.size() > 0) {
|
||||
double mult = 1.0/data.size();
|
||||
for (int j = 0; j < num_per; j++) {
|
||||
Vector meanj(mean, j);
|
||||
for (unsigned int i = 0; i < data.size(); i++) {
|
||||
ConstVector col(*data[i], j);
|
||||
meanj.add(mult, col);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResultsDynamicStats::calcVariance| code@>=
|
||||
void SimResultsDynamicStats::calcVariance()
|
||||
{
|
||||
if (data.size() > 1) {
|
||||
variance.zeros();
|
||||
double mult = 1.0/(data.size()-1);
|
||||
for (int j = 0; j < num_per; j++) {
|
||||
ConstVector meanj(mean, j);
|
||||
Vector varj(variance, j);
|
||||
for (int i = 0; i < (int)data.size(); i++) {
|
||||
Vector col(ConstVector((*data[i]), j));
|
||||
col.add(-1.0, meanj);
|
||||
for (int k = 0; k < col.length(); k++)
|
||||
col[k] = col[k]*col[k];
|
||||
varj.add(mult, col);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
variance.infs();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<|SimResultsIRF::simulate| code1@>=
|
||||
void SimResultsIRF::simulate(const DecisionRule& dr, const Vector& start,
|
||||
Journal& journal)
|
||||
{
|
||||
JournalRecordPair paa(journal);
|
||||
paa << "Performing " << control.getNumSets() << " IRF simulations for "
|
||||
<< num_per << " periods; shock=" << ishock << ", impulse=" << imp << endrec;
|
||||
simulate(dr, start);
|
||||
int thrown = control.getNumSets() - data.size();
|
||||
if (thrown > 0) {
|
||||
JournalRecord rec(journal);
|
||||
rec << "I had to throw " << thrown
|
||||
<< " simulations away due to Nan or Inf" << endrec;
|
||||
}
|
||||
calcMeans();
|
||||
calcVariances();
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResultsIRF::simulate| code2@>=
|
||||
void SimResultsIRF::simulate(const DecisionRule& dr, const Vector& start)
|
||||
{
|
||||
THREAD_GROUP gr;
|
||||
for (int idata = 0; idata < control.getNumSets(); idata++) {
|
||||
THREAD* worker = new
|
||||
SimulationIRFWorker(*this, dr, DecisionRule::horner,
|
||||
num_per, start, idata, ishock, imp);
|
||||
gr.insert(worker);
|
||||
}
|
||||
gr.run();
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResultsIRF::calcMeans| code@>=
|
||||
void SimResultsIRF::calcMeans()
|
||||
{
|
||||
means.zeros();
|
||||
if (data.size() > 0) {
|
||||
for (unsigned int i = 0; i < data.size(); i++)
|
||||
means.add(1.0, *(data[i]));
|
||||
means.mult(1.0/data.size());
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResultsIRF::calcVariances| code@>=
|
||||
void SimResultsIRF::calcVariances()
|
||||
{
|
||||
if (data.size() > 1) {
|
||||
variances.zeros();
|
||||
for (unsigned int i = 0; i < data.size(); i++) {
|
||||
TwoDMatrix d((const TwoDMatrix&)(*(data[i])));
|
||||
d.add(-1.0, means);
|
||||
for (int j = 0; j < d.nrows(); j++)
|
||||
for (int k = 0; k < d.ncols(); k++)
|
||||
variances.get(j,k) += d.get(j,k)*d.get(j,k);
|
||||
d.mult(1.0/(data.size()-1));
|
||||
}
|
||||
} else {
|
||||
variances.infs();
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimResultsIRF::writeMat4| code@>=
|
||||
void SimResultsIRF::writeMat4(FILE* fd, const char* lname) const
|
||||
{
|
||||
char tmp[100];
|
||||
sprintf(tmp, "%s_mean", lname);
|
||||
means.writeMat4(fd, tmp);
|
||||
sprintf(tmp, "%s_var", lname);
|
||||
variances.writeMat4(fd, tmp);
|
||||
}
|
||||
|
||||
@
|
||||
@<|RTSimResultsStats::simulate| code1@>=
|
||||
void RTSimResultsStats::simulate(int num_sim, const DecisionRule& dr, const Vector& start,
|
||||
const TwoDMatrix& v, Journal& journal)
|
||||
{
|
||||
JournalRecordPair paa(journal);
|
||||
paa << "Performing " << num_sim << " real-time stochastic simulations for "
|
||||
<< num_per << " periods" << endrec;
|
||||
simulate(num_sim, dr, start, v);
|
||||
mean = nc.getMean();
|
||||
mean.add(1.0, dr.getSteady());
|
||||
nc.getVariance(vcov);
|
||||
if (thrown_periods > 0) {
|
||||
JournalRecord rec(journal);
|
||||
rec << "I had to throw " << thrown_periods << " periods away due to Nan or Inf" << endrec;
|
||||
JournalRecord rec1(journal);
|
||||
rec1 << "This affected " << incomplete_simulations << " out of "
|
||||
<< num_sim << " simulations" << endrec;
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|RTSimResultsStats::simulate| code2@>=
|
||||
void RTSimResultsStats::simulate(int num_sim, const DecisionRule& dr, const Vector& start,
|
||||
const TwoDMatrix& vcov)
|
||||
{
|
||||
std::vector<RandomShockRealization> rsrs;
|
||||
rsrs.reserve(num_sim);
|
||||
|
||||
THREAD_GROUP gr;
|
||||
for (int i = 0; i < num_sim; i++) {
|
||||
RandomShockRealization sr(vcov, system_random_generator.int_uniform());
|
||||
rsrs.push_back(sr);
|
||||
THREAD* worker = new
|
||||
RTSimulationWorker(*this, dr, DecisionRule::horner,
|
||||
num_per, start, rsrs.back());
|
||||
gr.insert(worker);
|
||||
}
|
||||
gr.run();
|
||||
}
|
||||
|
||||
@
|
||||
@<|RTSimResultsStats::writeMat4| code@>=
|
||||
void RTSimResultsStats::writeMat4(FILE* fd, const char* lname)
|
||||
{
|
||||
char tmp[100];
|
||||
sprintf(tmp, "%s_rt_mean", lname);
|
||||
ConstTwoDMatrix m(nc.getDim(), 1, mean.base());
|
||||
m.writeMat4(fd, tmp);
|
||||
sprintf(tmp, "%s_rt_vcov", lname);
|
||||
ConstTwoDMatrix(vcov).writeMat4(fd, tmp);
|
||||
}
|
||||
|
||||
@
|
||||
@<|IRFResults| constructor@>=
|
||||
IRFResults::IRFResults(const DynamicModel& mod, const DecisionRule& dr,
|
||||
const SimResults& control, const vector<int>& ili,
|
||||
Journal& journal)
|
||||
: model(mod), irf_list_ind(ili)
|
||||
{
|
||||
int num_per = control.getNumPer();
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Calculating IRFs against control for " << (int)irf_list_ind.size() << " shocks and for "
|
||||
<< num_per << " periods" << endrec;
|
||||
const TwoDMatrix& vcov = mod.getVcov();
|
||||
for (unsigned int ii = 0; ii < irf_list_ind.size(); ii++) {
|
||||
int ishock = irf_list_ind[ii];
|
||||
double stderror = sqrt(vcov.get(ishock,ishock));
|
||||
irf_res.push_back(new SimResultsIRF(control, model.numeq(), num_per,
|
||||
ishock, stderror));
|
||||
irf_res.push_back(new SimResultsIRF(control, model.numeq(), num_per,
|
||||
ishock, -stderror));
|
||||
}
|
||||
|
||||
for (unsigned int ii = 0; ii < irf_list_ind.size(); ii++) {
|
||||
irf_res[2*ii]->simulate(dr, model.getSteady(), journal);
|
||||
irf_res[2*ii+1]->simulate(dr, model.getSteady(), journal);
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|IRFResults| destructor@>=
|
||||
IRFResults::~IRFResults()
|
||||
{
|
||||
for (unsigned int i = 0; i < irf_res.size(); i++)
|
||||
delete irf_res[i];
|
||||
}
|
||||
|
||||
@
|
||||
@<|IRFResults::writeMat4| code@>=
|
||||
void IRFResults::writeMat4(FILE* fd, const char* prefix) const
|
||||
{
|
||||
for (unsigned int i = 0; i < irf_list_ind.size(); i++) {
|
||||
char tmp[100];
|
||||
int ishock = irf_list_ind[i];
|
||||
const char* shockname = model.getExogNames().getName(ishock);
|
||||
sprintf(tmp, "%s_irfp_%s", prefix, shockname);
|
||||
irf_res[2*i]->writeMat4(fd, tmp);
|
||||
sprintf(tmp, "%s_irfm_%s", prefix, shockname);
|
||||
irf_res[2*i+1]->writeMat4(fd, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|SimulationWorker::operator()()| code@>=
|
||||
void SimulationWorker::operator()()
|
||||
{
|
||||
ExplicitShockRealization* esr = new ExplicitShockRealization(sr, np);
|
||||
TwoDMatrix* m = dr.simulate(em, np, st, *esr);
|
||||
{
|
||||
SYNCHRO syn(&res, "simulation");
|
||||
res.addDataSet(m, esr);
|
||||
}
|
||||
}
|
||||
|
||||
@ Here we create a new instance of |ExplicitShockRealization| of the
|
||||
corresponding control, add the impulse, and simulate.
|
||||
|
||||
@<|SimulationIRFWorker::operator()()| code@>=
|
||||
void SimulationIRFWorker::operator()()
|
||||
{
|
||||
ExplicitShockRealization* esr =
|
||||
new ExplicitShockRealization(res.control.getShocks(idata));
|
||||
esr->addToShock(ishock, 0, imp);
|
||||
TwoDMatrix* m = dr.simulate(em, np, st, *esr);
|
||||
m->add(-1.0, res.control.getData(idata));
|
||||
{
|
||||
SYNCHRO syn(&res, "simulation");
|
||||
res.addDataSet(m, esr);
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|RTSimulationWorker::operator()()| code@>=
|
||||
void RTSimulationWorker::operator()()
|
||||
{
|
||||
NormalConj nc(res.nc.getDim());
|
||||
const PartitionY& ypart = dr.getYPart();
|
||||
int nu = dr.nexog();
|
||||
const Vector& ysteady = dr.getSteady();
|
||||
|
||||
@<initialize vectors and subvectors for simulation@>;
|
||||
@<simulate the first real-time period@>;
|
||||
@<simulate other real-time periods@>;
|
||||
{
|
||||
SYNCHRO syn(&res, "rtsimulation");
|
||||
res.nc.update(nc);
|
||||
if (res.num_per-ip > 0) {
|
||||
res.incomplete_simulations++;
|
||||
res.thrown_periods += res.num_per-ip;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<initialize vectors and subvectors for simulation@>=
|
||||
Vector dyu(ypart.nys()+nu);
|
||||
ConstVector ystart_pred(ystart, ypart.nstat, ypart.nys());
|
||||
ConstVector ysteady_pred(ysteady, ypart.nstat, ypart.nys());
|
||||
Vector dy(dyu, 0, ypart.nys());
|
||||
Vector u(dyu, ypart.nys(), nu);
|
||||
Vector y(nc.getDim());
|
||||
ConstVector ypred(y, ypart.nstat, ypart.nys());
|
||||
|
||||
@
|
||||
@<simulate the first real-time period@>=
|
||||
int ip = 0;
|
||||
dy = ystart_pred;
|
||||
dy.add(-1.0, ysteady_pred);
|
||||
sr.get(ip, u);
|
||||
dr.eval(em, y, dyu);
|
||||
nc.update(y);
|
||||
|
||||
@
|
||||
@<simulate other real-time periods@>=
|
||||
while (y.isFinite() && ip < res.num_per) {
|
||||
ip++;
|
||||
dy = ypred;
|
||||
sr.get(ip, u);
|
||||
dr.eval(em, y, dyu);
|
||||
nc.update(y);
|
||||
}
|
||||
|
||||
@ This calculates factorization $FF^T=V$ in the Cholesky way. It does
|
||||
not work for semidefinite matrices.
|
||||
|
||||
@<|RandomShockRealization::choleskyFactor| code@>=
|
||||
void RandomShockRealization::choleskyFactor(const TwoDMatrix& v)
|
||||
{
|
||||
factor = v;
|
||||
int rows = factor.nrows();
|
||||
for (int i = 0; i < rows; i++)
|
||||
for (int j = i+1; j < rows; j++)
|
||||
factor.get(i,j) = 0.0;
|
||||
int info;
|
||||
|
||||
LAPACK_dpotrf("L", &rows, factor.base(), &rows, &info);
|
||||
KORD_RAISE_IF(info != 0,
|
||||
"Info!=0 in RandomShockRealization::choleskyFactor");
|
||||
}
|
||||
|
||||
@ This calculates $FF^T=V$ factorization by symmetric Schur
|
||||
decomposition. It works for semidifinite matrices.
|
||||
|
||||
@<|RandomShockRealization::schurFactor| code@>=
|
||||
void RandomShockRealization::schurFactor(const TwoDMatrix& v)
|
||||
{
|
||||
SymSchurDecomp ssd(v);
|
||||
ssd.getFactor(factor);
|
||||
}
|
||||
|
||||
@
|
||||
@<|RandomShockRealization::get| code@>=
|
||||
void RandomShockRealization::get(int n, Vector& out)
|
||||
{
|
||||
KORD_RAISE_IF(out.length() != numShocks(),
|
||||
"Wrong length of out vector in RandomShockRealization::get");
|
||||
Vector d(out.length());
|
||||
for (int i = 0; i < d.length(); i++) {
|
||||
d[i] = mtwister.normal();
|
||||
}
|
||||
out.zeros();
|
||||
factor.multaVec(out, ConstVector(d));
|
||||
}
|
||||
|
||||
@
|
||||
@<|ExplicitShockRealization| constructor code@>=
|
||||
ExplicitShockRealization::ExplicitShockRealization(ShockRealization& sr,
|
||||
int num_per)
|
||||
: shocks(sr.numShocks(), num_per)
|
||||
{
|
||||
for (int j = 0; j < num_per; j++) {
|
||||
Vector jcol(shocks, j);
|
||||
sr.get(j, jcol);
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|ExplicitShockRealization::get| code@>=
|
||||
void ExplicitShockRealization::get(int n, Vector& out)
|
||||
{
|
||||
KORD_RAISE_IF(out.length() != numShocks(),
|
||||
"Wrong length of out vector in ExplicitShockRealization::get");
|
||||
int i = n % shocks.ncols();
|
||||
ConstVector icol(shocks, i);
|
||||
out = icol;
|
||||
}
|
||||
|
||||
@
|
||||
@<|ExplicitShockRealization::addToShock| code@>=
|
||||
void ExplicitShockRealization::addToShock(int ishock, int iper, double val)
|
||||
{
|
||||
KORD_RAISE_IF(ishock < 0 || ishock > numShocks(),
|
||||
"Wrong index of shock in ExplicitShockRealization::addToShock");
|
||||
int j = iper % shocks.ncols();
|
||||
shocks.get(ishock, j) += val;
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<|GenShockRealization::get| code@>=
|
||||
void GenShockRealization::get(int n, Vector& out)
|
||||
{
|
||||
KORD_RAISE_IF(out.length() != numShocks(),
|
||||
"Wrong length of out vector in GenShockRealization::get");
|
||||
ExplicitShockRealization::get(n, out);
|
||||
Vector r(numShocks());
|
||||
RandomShockRealization::get(n, r);
|
||||
for (int j = 0; j < numShocks(); j++)
|
||||
if (! isfinite(out[j]))
|
||||
out[j] = r[j];
|
||||
}
|
||||
|
||||
|
||||
@ End of {\tt decision\_rule.cpp} file.
|
|
@ -0,0 +1,988 @@
|
|||
@q $Id: decision_rule.hweb 2336 2009-01-14 10:37:02Z kamenik $ @>
|
||||
@q Copyright 2004, Ondra Kamenik @>
|
||||
|
||||
@*2 Decision rule and simulation. Start of {\tt decision\_rule.h} file.
|
||||
|
||||
The main purpose of this file is a decision rule representation which
|
||||
can run a simulation. So we define an interface for classes providing
|
||||
realizations of random shocks, and define the class
|
||||
|DecisionRule|. The latter basically takes tensor container of
|
||||
derivatives of policy rules, and adds them up with respect to
|
||||
$\sigma$. The class allows to specify the $\sigma$ different from $1$.
|
||||
|
||||
In addition, we provide classes for running simulations and storing
|
||||
the results, calculating some statistics and generating IRF. The class
|
||||
|DRFixPoint| allows for calculation of the fix point of a given
|
||||
decision rule.
|
||||
|
||||
@s DecisionRule int
|
||||
@s DecisionRuleImpl int
|
||||
@s FoldDecisionRule int
|
||||
@s UnfoldDecisionRule int
|
||||
@s ShockRealization int
|
||||
@s DRFixPoint int
|
||||
@s SimResults int
|
||||
@s SimResultsStats int
|
||||
@s SimResultsDynamicStats int
|
||||
@s RTSimResultsStats int
|
||||
@s SimResultsIRF int
|
||||
@s IRFResults int
|
||||
@s SimulationWorker int
|
||||
@s RTSimulationWorker int
|
||||
@s SimulationIRFWorker int
|
||||
@s RandomShockRealization int
|
||||
@s ExplicitShockRealization int
|
||||
@s GenShockRealization int
|
||||
@s IRFShockRealization int
|
||||
|
||||
@c
|
||||
#ifndef DECISION_RULE_H
|
||||
#define DECISION_RULE_H
|
||||
|
||||
#include "kord_exception.h"
|
||||
#include "korder.h"
|
||||
#include "normal_conjugate.h"
|
||||
#include "mersenne_twister.h"
|
||||
|
||||
@<|ShockRealization| class declaration@>;
|
||||
@<|DecisionRule| class declaration@>;
|
||||
@<|DecisionRuleImpl| class declaration@>;
|
||||
@<|FoldDecisionRule| class declaration@>;
|
||||
@<|UnfoldDecisionRule| class declaration@>;
|
||||
@<|DRFixPoint| class declaration@>;
|
||||
@<|SimResults| class declaration@>;
|
||||
@<|SimResultsStats| class declaration@>;
|
||||
@<|SimResultsDynamicStats| class declaration@>;
|
||||
@<|SimResultsIRF| class declaration@>;
|
||||
@<|RTSimResultsStats| class declaration@>;
|
||||
@<|IRFResults| class declaration@>;
|
||||
@<|SimulationWorker| class declaration@>;
|
||||
@<|SimulationIRFWorker| class declaration@>;
|
||||
@<|RTSimulationWorker| class declaration@>;
|
||||
@<|RandomShockRealization| class declaration@>;
|
||||
@<|ExplicitShockRealization| class declaration@>;
|
||||
@<|GenShockRealization| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
@ This is a general interface to a shock realizations. The interface
|
||||
has only one method returning the shock realizations at the given
|
||||
time. This method is not constant, since it may change a state of the
|
||||
object.
|
||||
|
||||
@<|ShockRealization| class declaration@>=
|
||||
class ShockRealization {
|
||||
public:@;
|
||||
virtual ~ShockRealization()@+ {}
|
||||
virtual void get(int n, Vector& out) =0;
|
||||
virtual int numShocks() const =0;
|
||||
};
|
||||
|
||||
@ This class is an abstract interface to decision rule. Its main
|
||||
purpose is to define a common interface for simulation of a decision
|
||||
rule. We need only a simulate, evaluate, cetralized clone and output
|
||||
method. The |simulate| method simulates the rule for a given
|
||||
realization of the shocks. |eval| is a primitive evaluation (it takes
|
||||
a vector of state variables (predetermined, both and shocks) and
|
||||
returns the next period variables. Both input and output are in
|
||||
deviations from the rule's steady. |evaluate| method makes only one
|
||||
step of simulation (in terms of absolute values, not
|
||||
deviations). |centralizedClone| returns a new copy of the decision
|
||||
rule, which is centralized about provided fix-point. And finally
|
||||
|writeMat4| writes the decision rule to the Matlab MAT-4 file.
|
||||
|
||||
@<|DecisionRule| class declaration@>=
|
||||
class DecisionRule {
|
||||
public:@;
|
||||
enum emethod {@+ horner, trad @+};
|
||||
virtual ~DecisionRule()@+ {}
|
||||
virtual TwoDMatrix* simulate(emethod em, int np, const Vector& ystart,
|
||||
ShockRealization& sr) const =0;
|
||||
virtual void eval(emethod em, Vector& out, const ConstVector& v) const =0;
|
||||
virtual void evaluate(emethod em, Vector& out, const ConstVector& ys,
|
||||
const ConstVector& u) const =0;
|
||||
virtual void writeMat4(FILE* fd, const char* prefix) const =0;
|
||||
virtual DecisionRule* centralizedClone(const Vector& fixpoint) const =0;
|
||||
virtual const Vector& getSteady() const =0;
|
||||
virtual int nexog() const =0;
|
||||
virtual const PartitionY& getYPart() const =0;
|
||||
};
|
||||
|
||||
@ The main purpose of this class is to implement |DecisionRule|
|
||||
interface, which is a simulation. To be able to do this we have to
|
||||
know the partitioning of state vector $y$ since we will need to pick
|
||||
only predetermined part $y^*$. Also, we need to know the steady state.
|
||||
|
||||
The decision rule will take the form: $$y_t-\bar
|
||||
y=\sum_{i=0}^n\left[g_{(yu)^i}\right]_{\alpha_1\ldots\alpha_i}\prod_{m=1}^i
|
||||
\left[\matrix{y^*_{t-1}-\bar y^*\cr u_t}\right]^{\alpha_m},$$ where
|
||||
the tensors $\left[g_{(yu)^i}\right]$ are tensors of the constructed
|
||||
container, and $\bar y$ is the steady state.
|
||||
|
||||
If we know the fix point of the rule (conditional zero shocks)
|
||||
$\tilde y$, the rule can be transformed to so called ``centralized''
|
||||
form. This is very similar to the form above but the zero dimensional
|
||||
tensor is zero:
|
||||
$$y_t-\tilde y=\sum_{i=1}^n
|
||||
\left[\tilde g_{(yu)^i}\right]_{\alpha_1\ldots\alpha_i}\prod_{m=1}^i
|
||||
\left[\matrix{y^*_{t-1}-\tilde y^*\cr u_t}\right]^{\alpha_m}.$$
|
||||
We provide a method and a constructor to transform a rule to the centralized form.
|
||||
|
||||
The class is templated, the template argument is either |KOrder::fold|
|
||||
or |KOrder::unfold|. So, there are two implementations of |DecisionRule| interface.
|
||||
|
||||
@<|DecisionRuleImpl| class declaration@>=
|
||||
template <int t>
|
||||
class DecisionRuleImpl : public ctraits<t>::Tpol, public DecisionRule {
|
||||
protected:@;
|
||||
typedef typename ctraits<t>::Tpol _Tparent;
|
||||
const Vector ysteady;
|
||||
const PartitionY ypart;
|
||||
const int nu;
|
||||
public:@;
|
||||
DecisionRuleImpl(const _Tparent& pol, const PartitionY& yp, int nuu,
|
||||
const Vector& ys)
|
||||
: ctraits<t>::Tpol(pol), ysteady(ys), ypart(yp), nu(nuu)@+ {}
|
||||
DecisionRuleImpl(_Tparent& pol, const PartitionY& yp, int nuu,
|
||||
const Vector& ys)
|
||||
: ctraits<t>::Tpol(0, yp.ny(), pol), ysteady(ys), ypart(yp),
|
||||
nu(nuu)@+ {}
|
||||
DecisionRuleImpl(const _Tg& g, const PartitionY& yp, int nuu,
|
||||
const Vector& ys, double sigma)
|
||||
: ctraits<t>::Tpol(yp.ny(), yp.nys()+nuu), ysteady(ys), ypart(yp), nu(nuu)
|
||||
{@+ fillTensors(g, sigma);@+}
|
||||
DecisionRuleImpl(const DecisionRuleImpl<t>& dr, const ConstVector& fixpoint)
|
||||
: ctraits<t>::Tpol(dr.ypart.ny(), dr.ypart.nys()+dr.nu),
|
||||
ysteady(fixpoint), ypart(dr.ypart), nu(dr.nu)
|
||||
{@+ centralize(dr);@+}
|
||||
const Vector& getSteady() const
|
||||
{@+ return ysteady;@+}
|
||||
@<|DecisionRuleImpl::simulate| code@>;
|
||||
@<|DecisionRuleImpl::evaluate| code@>;
|
||||
@<|DecisionRuleImpl::centralizedClone| code@>;
|
||||
@<|DecisionRuleImpl::writeMat4| code@>;
|
||||
int nexog() const
|
||||
{@+ return nu;@+}
|
||||
const PartitionY& getYPart() const
|
||||
{@+ return ypart;}
|
||||
protected:@;
|
||||
@<|DecisionRuleImpl::fillTensors| code@>;
|
||||
@<|DecisionRuleImpl::centralize| code@>;
|
||||
@<|DecisionRuleImpl::eval| code@>;
|
||||
};
|
||||
|
||||
@ Here we have to fill the tensor polynomial. This involves two
|
||||
separated actions. First is to evaluate the approximation at a given
|
||||
$\sigma$, the second is to compile the tensors $[g_{{(yu)}^{i+j}}]$ from
|
||||
$[g_{y^iu^j}]$. The first action is done here, the second is done by
|
||||
method |addSubTensor| of a full symmetry tensor.
|
||||
|
||||
The way how the evaluation is done is described here:
|
||||
|
||||
The $q-$order approximation to the solution can be written as:
|
||||
|
||||
$$
|
||||
\eqalign{
|
||||
y_t-\bar y &= \sum_{l=1}^q{1\over l!}\left[\sum_{i+j+k=l}
|
||||
\left(\matrix{l\cr i,j,k}\right)\left[g_{y^iu^j\sigma^k}\right]
|
||||
_{\alpha_1\ldots\alpha_j\beta_1\ldots\beta_j}
|
||||
\prod_{m=1}^i[y^*_{t-1}-\bar y^*]^{\alpha_m}
|
||||
\prod_{n=1}^j[u_t]^{\beta_m}\sigma^k\right]\cr
|
||||
&= \sum_{l=1}^q\left[\sum_{i+j\leq l}\left(\matrix{i+j\cr i}\right)
|
||||
\left[\sum_{k=0}^{l-i-j}{1\over l!}
|
||||
\left(\matrix{l\cr k}\right)\left[g_{y^iu^j\sigma^k}\right]\sigma^k\right]
|
||||
\prod_{m=1}^i[y^*_{t-1}-\bar y^*]^{\alpha_m}
|
||||
\prod_{n=1}^j[u_t]^{\beta_m}\sigma^k\right]
|
||||
}
|
||||
$$
|
||||
|
||||
This means that for each $i+j+k=l$ we have to add
|
||||
$${1\over l!}\left(\matrix{l\cr
|
||||
k}\right)\left[g_{y^iu^j\sigma^k}\right]\cdot\sigma^k=
|
||||
{1\over (i+j)!k!}\left[g_{y^iu^j\sigma^k}\right]\cdot\sigma^k$$ to
|
||||
$g_{(yu)^{i+j}}$. In addition, note that the multiplier
|
||||
$\left(\matrix{i+j\cr i}\right)$ is applied when the fully symmetric
|
||||
tensor $[g_{(yu)^{i+j}}]$ is evaluated.
|
||||
|
||||
So we go through $i+j=d=0\ldots q$ and in each loop we form the fully
|
||||
symmetric tensor $[g_{(yu)^l}]$ and insert it to the container.
|
||||
|
||||
@<|DecisionRuleImpl::fillTensors| code@>=
|
||||
void fillTensors(const _Tg& g, double sigma)
|
||||
{
|
||||
IntSequence tns(2);
|
||||
tns[0] = ypart.nys(); tns[1] = nu;
|
||||
int dfact = 1;
|
||||
for (int d = 0; d <= g.getMaxDim(); d++, dfact*=d) {
|
||||
_Ttensym* g_yud = new _Ttensym(ypart.ny(), ypart.nys()+nu, d);
|
||||
g_yud->zeros();
|
||||
@<fill tensor of |g_yud| of dimension |d|@>;
|
||||
insert(g_yud);
|
||||
}
|
||||
}
|
||||
|
||||
@ Here we have to fill the tensor $\left[g_{(yu)^d}\right]$. So we go
|
||||
through all pairs $(i,j)$ giving $i+j=d$, and through all $k$ from
|
||||
zero up to maximal dimension minus $d$. In this way we go through all
|
||||
symmetries of $g_{y^iu^j\sigma^k}$ which will be added to $g_{(yu)^d}$.
|
||||
|
||||
Note that at the beginning, |dfact| is a factorial of |d|. We
|
||||
calculate |kfact| is equal to $k!$. As indicated in
|
||||
|@<|DecisionRuleImpl::fillTensors| code@>|, the added tensor is thus
|
||||
multiplied with ${1\over d!k!}\sigma^k$.
|
||||
|
||||
@<fill tensor of |g_yud| of dimension |d|@>=
|
||||
for (int i = 0; i <= d; i++) {
|
||||
int j = d-i;
|
||||
int kfact = 1;
|
||||
_Ttensor tmp(ypart.ny(),
|
||||
TensorDimens(Symmetry(i,j), tns));
|
||||
tmp.zeros();
|
||||
for (int k = 0; k+d <= g.getMaxDim(); k++, kfact*=k) {
|
||||
Symmetry sym(i,j,0,k);
|
||||
if (g.check(sym)) {
|
||||
double mult = pow(sigma,k)/dfact/kfact;
|
||||
tmp.add(mult,*(g.get(sym)));
|
||||
}
|
||||
}
|
||||
g_yud->addSubTensor(tmp);
|
||||
}
|
||||
|
||||
@ The centralization is straightforward. We suppose here that the
|
||||
object's steady state is the fix point $\tilde y$. It is clear that
|
||||
the new derivatives $\left[\tilde g_{(yu)^i}\right]$ will be equal to
|
||||
the derivatives of the original decision rule |dr| at the new steady
|
||||
state $\tilde y$. So, the new derivatives are obtained by derivating the
|
||||
given decision rule $dr$ and evaluating its polynomial at
|
||||
$$dstate=\left[\matrix{\tilde y^*-\bar y^*\cr 0}\right],$$
|
||||
where $\bar y$ is the steady state of the original rule |dr|.
|
||||
|
||||
@<|DecisionRuleImpl::centralize| code@>=
|
||||
void centralize(const DecisionRuleImpl& dr)
|
||||
{
|
||||
Vector dstate(ypart.nys() + nu);
|
||||
dstate.zeros();
|
||||
Vector dstate_star(dstate, 0, ypart.nys());
|
||||
ConstVector newsteady_star(ysteady, ypart.nstat, ypart.nys());
|
||||
ConstVector oldsteady_star(dr.ysteady, ypart.nstat, ypart.nys());
|
||||
dstate_star.add(1.0, newsteady_star);
|
||||
dstate_star.add(-1.0, oldsteady_star);
|
||||
|
||||
_Tpol pol(dr);
|
||||
int dfac = 1;
|
||||
for (int d = 1; d <= dr.getMaxDim(); d++, dfac *= d) {
|
||||
pol.derivative(d-1);
|
||||
_Ttensym* der = pol.evalPartially(d, dstate);
|
||||
der->mult(1.0/dfac);
|
||||
insert(der);
|
||||
}
|
||||
}
|
||||
|
||||
@ Here we evaluate repeatedly the polynomial storing results in the
|
||||
created matrix. For exogenous shocks, we use |ShockRealization|
|
||||
class, for predetermined variables, we use |ystart| as the first
|
||||
state. The |ystart| vector is required to be all state variables
|
||||
|ypart.ny()|, although only the predetermined part of |ystart| is
|
||||
used.
|
||||
|
||||
We simulate in terms of $\Delta y$, this is, at the beginning the
|
||||
|ysteady| is canceled from |ystart|, we simulate, and at the end
|
||||
|ysteady| is added to all columns of the result.
|
||||
|
||||
@<|DecisionRuleImpl::simulate| code@>=
|
||||
TwoDMatrix* simulate(emethod em, int np, const Vector& ystart,
|
||||
ShockRealization& sr) const
|
||||
{
|
||||
KORD_RAISE_IF(ysteady.length() != ystart.length(),
|
||||
"Start and steady lengths differ in DecisionRuleImpl::simulate");
|
||||
TwoDMatrix* res = new TwoDMatrix(ypart.ny(), np);
|
||||
|
||||
@<initialize vectors and subvectors for simulation@>;
|
||||
@<perform the first step of simulation@>;
|
||||
@<perform all other steps of simulations@>;
|
||||
@<add the steady state to all columns of |res|@>;
|
||||
return res;
|
||||
}
|
||||
|
||||
@ Here allocate the stack vector $(\Delta y^*, u)$, define the
|
||||
subvectors |dy|, and |u|, then we pickup predetermined parts of
|
||||
|ystart| and |ysteady|.
|
||||
|
||||
@<initialize vectors and subvectors for simulation@>=
|
||||
Vector dyu(ypart.nys()+nu);
|
||||
ConstVector ystart_pred(ystart, ypart.nstat, ypart.nys());
|
||||
ConstVector ysteady_pred(ysteady, ypart.nstat, ypart.nys());
|
||||
Vector dy(dyu, 0, ypart.nys());
|
||||
Vector u(dyu, ypart.nys(), nu);
|
||||
|
||||
|
||||
@ We cancel |ysteady| from |ystart|, get realization to |u|, and
|
||||
evaluate the polynomial.
|
||||
|
||||
@<perform the first step of simulation@>=
|
||||
dy = ystart_pred;
|
||||
dy.add(-1.0, ysteady_pred);
|
||||
sr.get(0, u);
|
||||
Vector out(*res, 0);
|
||||
eval(em, out, dyu);
|
||||
|
||||
@ Also clear. If the result at some period is not finite, we pad the
|
||||
rest of the matrix with zeros and return immediatelly.
|
||||
|
||||
@<perform all other steps of simulations@>=
|
||||
for (int i = 1; i < np; i++) {
|
||||
ConstVector ym(*res, i-1);
|
||||
ConstVector dym(ym, ypart.nstat, ypart.nys());
|
||||
dy = dym;
|
||||
sr.get(i, u);
|
||||
Vector out(*res, i);
|
||||
eval(em, out, dyu);
|
||||
if (! out.isFinite()) {
|
||||
if (i+1 < np) {
|
||||
TwoDMatrix rest(*res, i+1, np-i-1);
|
||||
rest.zeros();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
@ Even clearer.
|
||||
@<add the steady state to all columns of |res|@>=
|
||||
for (int i = 0; i < res->ncols(); i++) {
|
||||
Vector col(*res, i);
|
||||
col.add(1.0, ysteady);
|
||||
}
|
||||
|
||||
|
||||
@ This is one period evaluation of the decision rule. The simulation
|
||||
is a sequence of repeated one period evaluations with a difference,
|
||||
that the steady state (fix point) is cancelled and added once. Hence
|
||||
we have two special methods.
|
||||
|
||||
@<|DecisionRuleImpl::evaluate| code@>=
|
||||
void evaluate(emethod em, Vector& out, const ConstVector& ys,
|
||||
const ConstVector& u) const
|
||||
{
|
||||
KORD_RAISE_IF(ys.length() != ypart.nys() || u.length() != nu,
|
||||
"Wrong dimensions of input vectors in DecisionRuleImpl::evaluate");
|
||||
KORD_RAISE_IF(out.length() != ypart.ny(),
|
||||
"Wrong dimension of output vector in DecisionRuleImpl::evaluate");
|
||||
ConstVector ysteady_pred(ysteady, ypart.nstat, ypart.nys());
|
||||
Vector ys_u(ypart.nys()+nu);
|
||||
Vector ys_u1(ys_u, 0, ypart.nys());
|
||||
ys_u1 = ys;
|
||||
ys_u1.add(-1.0, ysteady_pred);
|
||||
Vector ys_u2(ys_u, ypart.nys(), nu);
|
||||
ys_u2 = u;
|
||||
eval(em, out, ys_u);
|
||||
out.add(1.0, ysteady);
|
||||
}
|
||||
|
||||
@ This is easy. We just return the newly created copy using the
|
||||
centralized constructor.
|
||||
|
||||
@<|DecisionRuleImpl::centralizedClone| code@>=
|
||||
DecisionRule* centralizedClone(const Vector& fixpoint) const
|
||||
{
|
||||
return new DecisionRuleImpl<t>(*this, fixpoint);
|
||||
}
|
||||
|
||||
@ Here we only encapsulate two implementations to one, deciding
|
||||
according to the parameter.
|
||||
|
||||
@<|DecisionRuleImpl::eval| code@>=
|
||||
void eval(emethod em, Vector& out, const ConstVector& v) const
|
||||
{
|
||||
if (em == DecisionRule::horner)
|
||||
_Tparent::evalHorner(out, v);
|
||||
else
|
||||
_Tparent::evalTrad(out, v);
|
||||
}
|
||||
|
||||
@ Write the decision rule and steady state to the MAT--4 file.
|
||||
@<|DecisionRuleImpl::writeMat4| code@>=
|
||||
void writeMat4(FILE* fd, const char* prefix) const
|
||||
{
|
||||
ctraits<t>::Tpol::writeMat4(fd, prefix);
|
||||
TwoDMatrix dum(ysteady.length(), 1);
|
||||
dum.getData() = ysteady;
|
||||
char tmp[100];
|
||||
sprintf(tmp, "%s_ss", prefix);
|
||||
ConstTwoDMatrix(dum).writeMat4(fd, tmp);
|
||||
}
|
||||
|
||||
@ This is exactly the same as |DecisionRuleImpl<KOrder::fold>|. The
|
||||
only difference is that we have a conversion from
|
||||
|UnfoldDecisionRule|, which is exactly
|
||||
|DecisionRuleImpl<KOrder::unfold>|.
|
||||
|
||||
@<|FoldDecisionRule| class declaration@>=
|
||||
class UnfoldDecisionRule;
|
||||
class FoldDecisionRule : public DecisionRuleImpl<KOrder::fold> {
|
||||
friend class UnfoldDecisionRule;
|
||||
public:@;
|
||||
FoldDecisionRule(const ctraits<KOrder::fold>::Tpol& pol, const PartitionY& yp, int nuu,
|
||||
const Vector& ys)
|
||||
: DecisionRuleImpl<KOrder::fold>(pol, yp, nuu, ys) {}
|
||||
FoldDecisionRule(ctraits<KOrder::fold>::Tpol& pol, const PartitionY& yp, int nuu,
|
||||
const Vector& ys)
|
||||
: DecisionRuleImpl<KOrder::fold>(pol, yp, nuu, ys) {}
|
||||
FoldDecisionRule(const ctraits<KOrder::fold>::Tg& g, const PartitionY& yp, int nuu,
|
||||
const Vector& ys, double sigma)
|
||||
: DecisionRuleImpl<KOrder::fold>(g, yp, nuu, ys, sigma) {}
|
||||
FoldDecisionRule(const DecisionRuleImpl<KOrder::fold>& dr, const ConstVector& fixpoint)
|
||||
: DecisionRuleImpl<KOrder::fold>(dr, fixpoint) {}
|
||||
FoldDecisionRule(const UnfoldDecisionRule& udr);
|
||||
};
|
||||
|
||||
@ This is exactly the same as |DecisionRuleImpl<KOrder::unfold>|, but
|
||||
with a conversion from |FoldDecisionRule|, which is exactly
|
||||
|DecisionRuleImpl<KOrder::fold>|.
|
||||
|
||||
@<|UnfoldDecisionRule| class declaration@>=
|
||||
class UnfoldDecisionRule : public DecisionRuleImpl<KOrder::unfold> {
|
||||
friend class FoldDecisionRule;
|
||||
public:@;
|
||||
UnfoldDecisionRule(const ctraits<KOrder::unfold>::Tpol& pol, const PartitionY& yp, int nuu,
|
||||
const Vector& ys)
|
||||
: DecisionRuleImpl<KOrder::unfold>(pol, yp, nuu, ys) {}
|
||||
UnfoldDecisionRule(ctraits<KOrder::unfold>::Tpol& pol, const PartitionY& yp, int nuu,
|
||||
const Vector& ys)
|
||||
: DecisionRuleImpl<KOrder::unfold>(pol, yp, nuu, ys) {}
|
||||
UnfoldDecisionRule(const ctraits<KOrder::unfold>::Tg& g, const PartitionY& yp, int nuu,
|
||||
const Vector& ys, double sigma)
|
||||
: DecisionRuleImpl<KOrder::unfold>(g, yp, nuu, ys, sigma) {}
|
||||
UnfoldDecisionRule(const DecisionRuleImpl<KOrder::unfold>& dr, const ConstVector& fixpoint)
|
||||
: DecisionRuleImpl<KOrder::unfold>(dr, fixpoint) {}
|
||||
UnfoldDecisionRule(const FoldDecisionRule& udr);
|
||||
};
|
||||
|
||||
|
||||
@ This class serves for calculation of the fix point of the decision
|
||||
rule given that the shocks are zero. The class is very similar to the
|
||||
|DecisionRuleImpl|. Besides the calculation of the fix point, the only
|
||||
difference between |DRFixPoint| and |DecisionRuleImpl| is that the
|
||||
derivatives wrt. shocks are ignored (since shocks are zero during the
|
||||
calculations). That is why have a different |fillTensor| method.
|
||||
|
||||
The solution algorithm is Newton and is described in
|
||||
|@<|DRFixPoint::solveNewton| code@>|. It solves $F(y)=0$, where
|
||||
$F=g(y,0)-y$. The function $F$ is given by its derivatives |bigf|. The
|
||||
Jacobian of the solved system is given by derivatives stored in
|
||||
|bigfder|.
|
||||
|
||||
@<|DRFixPoint| class declaration@>=
|
||||
template <int t>
|
||||
class DRFixPoint : public ctraits<t>::Tpol {
|
||||
typedef typename ctraits<t>::Tpol _Tparent;
|
||||
static int max_iter;
|
||||
static int max_newton_iter;
|
||||
static int newton_pause;
|
||||
static double tol;
|
||||
const Vector ysteady;
|
||||
const PartitionY ypart;
|
||||
_Tparent* bigf;
|
||||
_Tparent* bigfder;
|
||||
public:@;
|
||||
typedef typename DecisionRule::emethod emethod;
|
||||
@<|DRFixPoint| constructor code@>;
|
||||
@<|DRFixPoint| destructor code@>;
|
||||
@<|DRFixPoint::calcFixPoint| code@>;
|
||||
int getNumIter() const
|
||||
{@+ return iter;@+}
|
||||
int getNewtonLastIter() const
|
||||
{@+ return newton_iter_last;@+}
|
||||
int getNewtonTotalIter() const
|
||||
{@+ return newton_iter_total;@+}
|
||||
protected:@;
|
||||
@<|DRFixPoint::fillTensors| code@>;
|
||||
@<|DRFixPoint::solveNewton| code@>;
|
||||
private:@;
|
||||
int iter;
|
||||
int newton_iter_last;
|
||||
int newton_iter_total;
|
||||
};
|
||||
|
||||
|
||||
@ Here we have to setup the function $F=g(y,0)-y$ and ${\partial
|
||||
F\over\partial y}$. The former is taken from the given derivatives of
|
||||
$g$ where a unit matrix is subtracted from the first derivative
|
||||
(|Symmetry(1)|). Then the derivative of the $F$ polynomial is
|
||||
calculated.
|
||||
|
||||
@<|DRFixPoint| constructor code@>=
|
||||
DRFixPoint(const _Tg& g, const PartitionY& yp,
|
||||
const Vector& ys, double sigma)
|
||||
: ctraits<t>::Tpol(yp.ny(), yp.nys()),
|
||||
ysteady(ys), ypart(yp), bigf(NULL), bigfder(NULL)
|
||||
{
|
||||
fillTensors(g, sigma);
|
||||
_Tparent yspol(ypart.nstat, ypart.nys(), *this);
|
||||
bigf = new _Tparent((const _Tparent&) yspol);
|
||||
_Ttensym* frst = bigf->get(Symmetry(1));
|
||||
for (int i = 0; i < ypart.nys(); i++)
|
||||
frst->get(i,i) = frst->get(i,i) - 1;
|
||||
bigfder = new _Tparent(*bigf, 0);
|
||||
}
|
||||
|
||||
@
|
||||
@<|DRFixPoint| destructor code@>=
|
||||
virtual ~DRFixPoint()
|
||||
{
|
||||
if (bigf)
|
||||
delete bigf;
|
||||
if (bigfder)
|
||||
delete bigfder;
|
||||
}
|
||||
|
||||
@ Here we fill the tensors for the |DRFixPoint| class. We ignore the
|
||||
derivatives $g_{y^iu^j\sigma^k}$ for which $j>0$. So we go through all
|
||||
dimensions |d|, and all |k| such that |d+k| is between the maximum
|
||||
dimension and |d|, and add ${\sigma^k\over d!k!}g_{y^d\sigma^k}$ to
|
||||
the tensor $g_{(y)^d}$.
|
||||
|
||||
@<|DRFixPoint::fillTensors| code@>=
|
||||
void fillTensors(const _Tg& g, double sigma)
|
||||
{
|
||||
int dfact = 1;
|
||||
for (int d = 0; d <= g.getMaxDim(); d++, dfact*=d) {
|
||||
_Ttensym* g_yd = new _Ttensym(ypart.ny(), ypart.nys(), d);
|
||||
g_yd->zeros();
|
||||
int kfact = 1;
|
||||
for (int k = 0; d+k <= g.getMaxDim(); k++, kfact*=k) {
|
||||
if (g.check(Symmetry(d,0,0,k))) {
|
||||
const _Ttensor* ten = g.get(Symmetry(d,0,0,k));
|
||||
double mult = pow(sigma,k)/dfact/kfact;
|
||||
g_yd->add(mult, *ten);
|
||||
}
|
||||
}
|
||||
insert(g_yd);
|
||||
}
|
||||
}
|
||||
|
||||
@ This tries to solve polynomial equation $F(y)=0$, where $F$
|
||||
polynomial is |bigf| and its derivative is in |bigfder|. It returns
|
||||
true if the Newton converged. The method takes the given vector as
|
||||
initial guess, and rewrites it with a solution. The method guarantees
|
||||
to return the vector, which has smaller norm of the residual. That is
|
||||
why the input/output vector |y| is always changed.
|
||||
|
||||
The method proceeds with a Newton step, if the Newton step improves
|
||||
the residual error. So we track residual errors in |flastnorm| and
|
||||
|fnorm| (former and current). In addition, at each step we search for
|
||||
an underrelaxation parameter |urelax|, which improves the residual. If
|
||||
|urelax| is less that |urelax_threshold|, we stop searching and stop
|
||||
the Newton.
|
||||
|
||||
@<|DRFixPoint::solveNewton| code@>=
|
||||
bool solveNewton(Vector& y)
|
||||
{
|
||||
const double urelax_threshold = 1.e-5;
|
||||
Vector sol((const Vector&) y);
|
||||
Vector delta(y.length());
|
||||
newton_iter_last = 0;
|
||||
bool delta_finite = true;
|
||||
double flastnorm = 0.0;
|
||||
double fnorm = 0.0;
|
||||
bool converged = false;
|
||||
double urelax = 1.0;
|
||||
|
||||
do {
|
||||
_Ttensym* jacob = bigfder->evalPartially(1, sol);
|
||||
bigf->evalHorner(delta, sol);
|
||||
if (newton_iter_last == 0)
|
||||
flastnorm = delta.getNorm();
|
||||
delta_finite = delta.isFinite();
|
||||
if (delta_finite) {
|
||||
ConstTwoDMatrix(*jacob).multInvLeft(delta);
|
||||
@<find |urelax| improving residual@>;
|
||||
sol.add(-urelax, delta);
|
||||
delta_finite = delta.isFinite();
|
||||
}
|
||||
delete jacob;
|
||||
newton_iter_last++;
|
||||
converged = delta_finite && fnorm < tol;
|
||||
flastnorm = fnorm;
|
||||
} while (!converged && newton_iter_last < max_newton_iter
|
||||
&& urelax > urelax_threshold);
|
||||
|
||||
newton_iter_total += newton_iter_last;
|
||||
if (! converged)
|
||||
newton_iter_last = 0;
|
||||
y = (const Vector&)sol;
|
||||
return converged;
|
||||
}
|
||||
|
||||
@ Here we find the |urelax|. We cycle as long as the new residual size
|
||||
|fnorm| is greater than last residual size |flastnorm|. If the urelax
|
||||
is less than |urelax_threshold| we give up. The |urelax| is damped by
|
||||
the ratio of |flastnorm| and |fnorm|. It the ratio is close to one, we
|
||||
damp by one half.
|
||||
|
||||
@<find |urelax| improving residual@>=
|
||||
bool urelax_found = false;
|
||||
urelax = 1.0;
|
||||
while (!urelax_found && urelax > urelax_threshold) {
|
||||
Vector soltmp((const Vector&)sol);
|
||||
soltmp.add(-urelax, delta);
|
||||
Vector f(sol.length());
|
||||
bigf->evalHorner(f, soltmp);
|
||||
fnorm = f.getNorm();
|
||||
if (fnorm <= flastnorm)
|
||||
urelax_found = true;
|
||||
else
|
||||
urelax *= std::min(0.5, flastnorm/fnorm);
|
||||
}
|
||||
|
||||
|
||||
@ This method solves the fix point of the no-shocks rule
|
||||
$y_{t+1}=f(y_t)$. It combines dull steps with Newton attempts. The
|
||||
dull steps correspond to evaluations setting $y_{t+1}=f(y_t)$. For
|
||||
reasonable models the dull steps converge to the fix-point but very
|
||||
slowly. That is why we make Newton attempt from time to time. The
|
||||
frequency of the Newton attempts is given by |newton_pause|. We
|
||||
perform the calculations in deviations from the steady state. So, at
|
||||
the end, we have to add the steady state.
|
||||
|
||||
The method also sets the members |iter|, |newton_iter_last| and
|
||||
|newton_iter_total|. These numbers can be examined later.
|
||||
|
||||
The |out| vector is not touched if the algorithm has not convered.
|
||||
|
||||
@<|DRFixPoint::calcFixPoint| code@>=
|
||||
bool calcFixPoint(emethod em, Vector& out)
|
||||
{
|
||||
KORD_RAISE_IF(out.length() != ypart.ny(),
|
||||
"Wrong length of out in DRFixPoint::calcFixPoint");
|
||||
|
||||
Vector delta(ypart.nys());
|
||||
Vector ystar(ypart.nys());
|
||||
ystar.zeros();
|
||||
|
||||
iter = 0;
|
||||
newton_iter_last = 0;
|
||||
newton_iter_total = 0;
|
||||
bool converged = false;
|
||||
do {
|
||||
if ((iter/newton_pause)*newton_pause == iter)
|
||||
converged = solveNewton(ystar);
|
||||
if (! converged) {
|
||||
bigf->evalHorner(delta, ystar);
|
||||
KORD_RAISE_IF_X(! delta.isFinite(),
|
||||
"NaN or Inf asserted in DRFixPoint::calcFixPoint",
|
||||
KORD_FP_NOT_FINITE);
|
||||
ystar.add(1.0, delta);
|
||||
converged = delta.getNorm() < tol;
|
||||
}
|
||||
iter++;
|
||||
} while (iter < max_iter && ! converged);
|
||||
|
||||
if (converged) {
|
||||
_Tparent::evalHorner(out, ystar);
|
||||
out.add(1.0, ysteady);
|
||||
}
|
||||
|
||||
return converged;
|
||||
}
|
||||
|
||||
|
||||
@ This is a basically a number of matrices of the same dimensions,
|
||||
which can be obtained as simulation results from a given decision rule
|
||||
and shock realizations. We also store the realizations of shocks.
|
||||
|
||||
@<|SimResults| class declaration@>=
|
||||
class ExplicitShockRealization;
|
||||
class SimResults {
|
||||
protected:@;
|
||||
int num_y;
|
||||
int num_per;
|
||||
vector<TwoDMatrix*> data;
|
||||
vector<ExplicitShockRealization*> shocks;
|
||||
public:@;
|
||||
SimResults(int ny, int nper)
|
||||
: num_y(ny), num_per(nper)@+ {}
|
||||
virtual ~SimResults();
|
||||
void simulate(int num_sim, const DecisionRule& dr, const Vector& start,
|
||||
const TwoDMatrix& vcov, Journal& journal);
|
||||
void simulate(int num_sim, const DecisionRule& dr, const Vector& start,
|
||||
const TwoDMatrix& vcov);
|
||||
int getNumPer() const
|
||||
{@+ return num_per;@+}
|
||||
int getNumSets() const
|
||||
{@+ return (int)data.size();@+}
|
||||
const TwoDMatrix& getData(int i) const
|
||||
{@+ return *(data[i]);@+}
|
||||
const ExplicitShockRealization& getShocks(int i) const
|
||||
{ @+ return *(shocks[i]);@+}
|
||||
bool addDataSet(TwoDMatrix* d, ExplicitShockRealization* sr);
|
||||
void writeMat4(const char* base, const char* lname) const;
|
||||
void writeMat4(FILE* fd, const char* lname) const;
|
||||
};
|
||||
|
||||
@ This does the same as |SimResults| plus it calculates means and
|
||||
covariances of the simulated data.
|
||||
|
||||
@<|SimResultsStats| class declaration@>=
|
||||
class SimResultsStats : public SimResults {
|
||||
protected:@;
|
||||
Vector mean;
|
||||
TwoDMatrix vcov;
|
||||
public:@;
|
||||
SimResultsStats(int ny, int nper)
|
||||
: SimResults(ny, nper), mean(ny), vcov(ny,ny)@+ {}
|
||||
void simulate(int num_sim, const DecisionRule& dr, const Vector& start,
|
||||
const TwoDMatrix& vcov, Journal& journal);
|
||||
void writeMat4(FILE* fd, const char* lname) const;
|
||||
protected:@;
|
||||
void calcMean();
|
||||
void calcVcov();
|
||||
};
|
||||
|
||||
@ This does the similar thing as |SimResultsStats| but the statistics are
|
||||
not calculated over all periods but only within each period. Then we
|
||||
do not calculate covariances with periods but only variances.
|
||||
|
||||
@<|SimResultsDynamicStats| class declaration@>=
|
||||
class SimResultsDynamicStats : public SimResults {
|
||||
protected:@;
|
||||
TwoDMatrix mean;
|
||||
TwoDMatrix variance;
|
||||
public:@;
|
||||
SimResultsDynamicStats(int ny, int nper)
|
||||
: SimResults(ny, nper), mean(ny,nper), variance(ny,nper)@+ {}
|
||||
void simulate(int num_sim, const DecisionRule& dr, const Vector& start,
|
||||
const TwoDMatrix& vcov, Journal& journal);
|
||||
void writeMat4(FILE* fd, const char* lname) const;
|
||||
protected:@;
|
||||
void calcMean();
|
||||
void calcVariance();
|
||||
};
|
||||
|
||||
|
||||
@ This goes through control simulation results, and for each control
|
||||
it adds a given impulse to a given shock and runs a simulation. The
|
||||
control simulation is then cancelled and the result is stored. After
|
||||
that these results are averaged with variances calculated.
|
||||
|
||||
The means and the variances are then written to the MAT-4 file.
|
||||
|
||||
@<|SimResultsIRF| class declaration@>=
|
||||
class SimulationIRFWorker;
|
||||
class SimResultsIRF : public SimResults {
|
||||
friend class SimulationIRFWorker;
|
||||
protected:@;
|
||||
const SimResults& control;
|
||||
int ishock;
|
||||
double imp;
|
||||
TwoDMatrix means;
|
||||
TwoDMatrix variances;
|
||||
public:@;
|
||||
SimResultsIRF(const SimResults& cntl, int ny, int nper, int i, double impulse)
|
||||
: SimResults(ny, nper), control(cntl),
|
||||
ishock(i), imp(impulse),
|
||||
means(ny, nper), variances(ny, nper)@+ {}
|
||||
void simulate(const DecisionRule& dr, const Vector& start,
|
||||
Journal& journal);
|
||||
void simulate(const DecisionRule& dr, const Vector& start);
|
||||
void writeMat4(FILE* fd, const char* lname) const;
|
||||
protected:@;
|
||||
void calcMeans();
|
||||
void calcVariances();
|
||||
};
|
||||
|
||||
@ This simulates and gathers all statistics from the real time
|
||||
simulations. In the |simulate| method, it runs |RTSimulationWorker|s
|
||||
which accummulate information from their own estimates. The estimation
|
||||
is done by means of |NormalConj| class, which is a conjugate family of
|
||||
densities for normal distibutions.
|
||||
|
||||
@<|RTSimResultsStats| class declaration@>=
|
||||
class RTSimulationWorker;
|
||||
class RTSimResultsStats {
|
||||
friend class RTSimulationWorker;
|
||||
protected:@;
|
||||
Vector mean;
|
||||
TwoDMatrix vcov;
|
||||
int num_per;
|
||||
NormalConj nc;
|
||||
int incomplete_simulations;
|
||||
int thrown_periods;
|
||||
public:@;
|
||||
RTSimResultsStats(int ny, int nper)
|
||||
: mean(ny), vcov(ny, ny),
|
||||
num_per(nper), nc(ny),
|
||||
incomplete_simulations(0), thrown_periods(0)@+ {}
|
||||
void simulate(int num_sim, const DecisionRule& dr, const Vector& start,
|
||||
const TwoDMatrix& vcov, Journal& journal);
|
||||
void simulate(int num_sim, const DecisionRule& dr, const Vector& start,
|
||||
const TwoDMatrix& vcov);
|
||||
void writeMat4(FILE* fd, const char* lname);
|
||||
};
|
||||
|
||||
@ For each shock, this simulates plus and minus impulse. The class
|
||||
maintains a vector of simulation results, each gets a particular shock
|
||||
and sign (positive/negative). The results of type |SimResultsIRF| are
|
||||
stored in a vector so that even ones are positive, odd ones are
|
||||
negative.
|
||||
|
||||
The constructor takes a reference to the control simulations, which
|
||||
must be finished before the constructor is called. The control
|
||||
simulations are passed to all |SimResultsIRF|s.
|
||||
|
||||
The constructor also takes the vector of indices of exogenous
|
||||
variables (|ili|) for which the IRFs are generated. The list is kept
|
||||
(as |irf_list_ind|) for other methods.
|
||||
|
||||
@<|IRFResults| class declaration@>=
|
||||
class DynamicModel;
|
||||
class IRFResults {
|
||||
vector<SimResultsIRF*> irf_res;
|
||||
const DynamicModel& model;
|
||||
vector<int> irf_list_ind;
|
||||
public:@;
|
||||
IRFResults(const DynamicModel& mod, const DecisionRule& dr,
|
||||
const SimResults& control, const vector<int>& ili,
|
||||
Journal& journal);
|
||||
~IRFResults();
|
||||
void writeMat4(FILE* fd, const char* prefix) const;
|
||||
};
|
||||
|
||||
@ This worker simulates the given decision rule and inserts the result
|
||||
to |SimResults|.
|
||||
|
||||
@<|SimulationWorker| class declaration@>=
|
||||
class SimulationWorker : public THREAD {
|
||||
protected:@;
|
||||
SimResults& res;
|
||||
const DecisionRule& dr;
|
||||
DecisionRule::emethod em;
|
||||
int np;
|
||||
const Vector& st;
|
||||
ShockRealization& sr;
|
||||
public:@;
|
||||
SimulationWorker(SimResults& sim_res,
|
||||
const DecisionRule& dec_rule,
|
||||
DecisionRule::emethod emet, int num_per,
|
||||
const Vector& start, ShockRealization& shock_r)
|
||||
: res(sim_res), dr(dec_rule), em(emet), np(num_per), st(start), sr(shock_r) {}
|
||||
void operator()();
|
||||
};
|
||||
|
||||
@ This worker simulates a given impulse |imp| to a given shock
|
||||
|ishock| based on a given control simulation with index |idata|. The
|
||||
control simulations are contained in |SimResultsIRF| which is passed
|
||||
to the constructor.
|
||||
|
||||
@<|SimulationIRFWorker| class declaration@>=
|
||||
class SimulationIRFWorker : public THREAD {
|
||||
SimResultsIRF& res;
|
||||
const DecisionRule& dr;
|
||||
DecisionRule::emethod em;
|
||||
int np;
|
||||
const Vector& st;
|
||||
int idata;
|
||||
int ishock;
|
||||
double imp;
|
||||
public:@;
|
||||
SimulationIRFWorker(SimResultsIRF& sim_res,
|
||||
const DecisionRule& dec_rule,
|
||||
DecisionRule::emethod emet, int num_per,
|
||||
const Vector& start, int id,
|
||||
int ishck, double impulse)
|
||||
: res(sim_res), dr(dec_rule), em(emet), np(num_per), st(start),
|
||||
idata(id), ishock(ishck), imp(impulse)@+ {}
|
||||
void operator()();
|
||||
};
|
||||
|
||||
@ This class does the real time simulation job for
|
||||
|RTSimResultsStats|. It simulates the model period by period. It
|
||||
accummulates the information in the |RTSimResultsStats::nc|. If NaN or
|
||||
Inf is observed, it ends the simulation and adds to the
|
||||
|thrown_periods| of |RTSimResultsStats|.
|
||||
|
||||
@<|RTSimulationWorker| class declaration@>=
|
||||
class RTSimulationWorker : public THREAD {
|
||||
protected:@;
|
||||
RTSimResultsStats& res;
|
||||
const DecisionRule& dr;
|
||||
DecisionRule::emethod em;
|
||||
int np;
|
||||
const Vector& ystart;
|
||||
ShockRealization& sr;
|
||||
public:@;
|
||||
RTSimulationWorker(RTSimResultsStats& sim_res,
|
||||
const DecisionRule& dec_rule,
|
||||
DecisionRule::emethod emet, int num_per,
|
||||
const Vector& start, ShockRealization& shock_r)
|
||||
: res(sim_res), dr(dec_rule), em(emet), np(num_per), ystart(start), sr(shock_r) {}
|
||||
void operator()();
|
||||
};
|
||||
|
||||
@ This class generates draws from Gaussian distribution with zero mean
|
||||
and the given variance-covariance matrix. It stores the factor of vcov
|
||||
$V$ matrix, yielding $FF^T = V$.
|
||||
|
||||
@<|RandomShockRealization| class declaration@>=
|
||||
class RandomShockRealization : virtual public ShockRealization {
|
||||
protected:@;
|
||||
MersenneTwister mtwister;
|
||||
TwoDMatrix factor;
|
||||
public:@;
|
||||
RandomShockRealization(const TwoDMatrix& v, unsigned int iseed)
|
||||
: mtwister(iseed), factor(v.nrows(),v.nrows())
|
||||
{@+schurFactor(v);@+}
|
||||
RandomShockRealization(const RandomShockRealization& sr)
|
||||
: mtwister(sr.mtwister), factor(sr.factor)@+ {}
|
||||
virtual ~RandomShockRealization() @+{}
|
||||
void get(int n, Vector& out);
|
||||
int numShocks() const
|
||||
{@+ return factor.nrows();@+}
|
||||
protected:@;
|
||||
void choleskyFactor(const TwoDMatrix& v);
|
||||
void schurFactor(const TwoDMatrix& v);
|
||||
};
|
||||
|
||||
@ This is just a matrix of finite numbers. It can be constructed from
|
||||
any |ShockRealization| with a given number of periods.
|
||||
|
||||
@<|ExplicitShockRealization| class declaration@>=
|
||||
class ExplicitShockRealization : virtual public ShockRealization {
|
||||
TwoDMatrix shocks;
|
||||
public:@;
|
||||
ExplicitShockRealization(const TwoDMatrix& sh)
|
||||
: shocks(sh)@+ {}
|
||||
ExplicitShockRealization(const ExplicitShockRealization& sr)
|
||||
: shocks(sr.shocks)@+ {}
|
||||
ExplicitShockRealization(ShockRealization& sr, int num_per);
|
||||
void get(int n, Vector& out);
|
||||
int numShocks() const
|
||||
{@+ return shocks.nrows();@+}
|
||||
void addToShock(int ishock, int iper, double val);
|
||||
void print() const
|
||||
{@+ shocks.print();@+}
|
||||
};
|
||||
|
||||
@ This represents a user given shock realization. The first matrix of
|
||||
the constructor is a covariance matrix of shocks, the second matrix is
|
||||
a rectangular matrix, where columns correspond to periods, rows to
|
||||
shocks. If an element of the matrix is {\tt NaN}, or {\tt Inf}, or
|
||||
{\tt -Inf}, then the random shock is taken instead of that element.
|
||||
|
||||
In this way it is a generalization of both |RandomShockRealization|
|
||||
and |ExplicitShockRealization|.
|
||||
|
||||
@<|GenShockRealization| class declaration@>=
|
||||
class GenShockRealization : public RandomShockRealization, public ExplicitShockRealization {
|
||||
public:@;
|
||||
GenShockRealization(const TwoDMatrix& v, const TwoDMatrix& sh, int seed)
|
||||
: RandomShockRealization(v, seed), ExplicitShockRealization(sh)@+
|
||||
{
|
||||
KORD_RAISE_IF(sh.nrows() != v.nrows() || v.nrows() != v.ncols(),
|
||||
"Wrong dimension of input matrix in GenShockRealization constructor");
|
||||
}
|
||||
void get(int n, Vector& out);
|
||||
int numShocks() const
|
||||
{@+ return RandomShockRealization::numShocks();@+}
|
||||
};
|
||||
|
||||
@ End of {\tt decision\_rule.h} file.
|
|
@ -0,0 +1,59 @@
|
|||
@q $Id: dynamic_model.cweb 431 2005-08-16 15:41:01Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@ Start of {\tt dynamic\_model.cpp} file.
|
||||
|
||||
@c
|
||||
#include "dynamic_model.h"
|
||||
|
||||
@<|NameList::print| code@>;
|
||||
@<|NameList::writeMat4| code@>;
|
||||
@<|NameList::writeMat4Indices| code@>;
|
||||
|
||||
@
|
||||
@<|NameList::print| code@>=
|
||||
void NameList::print() const
|
||||
{
|
||||
for (int i = 0; i < getNum(); i++)
|
||||
printf("%s\n", getName(i));
|
||||
}
|
||||
|
||||
@
|
||||
@<|NameList::writeMat4| code@>=
|
||||
void NameList::writeMat4(FILE* fd, const char* vname) const
|
||||
{
|
||||
int maxlen = 0;
|
||||
for (int i = 0; i < getNum(); i++)
|
||||
if (maxlen < (int)strlen(getName(i)))
|
||||
maxlen = (int)strlen(getName(i));
|
||||
|
||||
if (maxlen == 0)
|
||||
return;
|
||||
|
||||
TwoDMatrix m(getNum(), maxlen);
|
||||
for (int i = 0; i < getNum(); i++)
|
||||
for (int j = 0; j < maxlen; j++)
|
||||
if (j < (int)strlen(getName(i)))
|
||||
m.get(i,j) = (double)(getName(i)[j]);
|
||||
else
|
||||
m.get(i,j) = (double)(' ');
|
||||
|
||||
Mat4Header header(m, vname, "text matrix");
|
||||
header.write(fd);
|
||||
fwrite(m.getData().base(), sizeof(double), m.nrows()*m.ncols(), fd);
|
||||
}
|
||||
|
||||
@
|
||||
@<|NameList::writeMat4Indices| code@>=
|
||||
void NameList::writeMat4Indices(FILE* fd, const char* prefix) const
|
||||
{
|
||||
char tmp[100];
|
||||
TwoDMatrix aux(1,1);
|
||||
for (int i = 0; i < getNum(); i++) {
|
||||
sprintf(tmp, "%s_i_%s", prefix, getName(i));
|
||||
aux.get(0,0) = i+1;
|
||||
aux.writeMat4(fd, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
@ End of {\tt dynamic\_model.cpp} file.
|
|
@ -0,0 +1,120 @@
|
|||
@q $Id: dynamic_model.hweb 378 2005-07-21 15:50:20Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@*2 Dynamic model abstraction. Start of {\tt dynamic\_model.h} file.
|
||||
|
||||
This file only defines a generic interface to an SDGE model. The model
|
||||
takes the form:
|
||||
$$E_t\left[f(g^{**}(g^*(y,u_t),u_{t+1}),g(y,u),y,u_t)\right]=0$$
|
||||
The interface is defined via pure virtual class |DynamicModel|.
|
||||
|
||||
@s NameList int
|
||||
@s DynamicModel int
|
||||
@c
|
||||
#ifndef DYNAMIC_MODEL_H
|
||||
#define DYNAMIC_MODEL_H
|
||||
|
||||
#include "t_container.h"
|
||||
#include "sparse_tensor.h"
|
||||
|
||||
#include "Vector.h"
|
||||
|
||||
@<|NameList| class declaration@>;
|
||||
@<|DynamicModel| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
@ The class is a virtual pure class which provides an access to names
|
||||
of the variables.
|
||||
@<|NameList| class declaration@>=
|
||||
class NameList {
|
||||
public:@;
|
||||
virtual ~NameList() {}
|
||||
virtual int getNum() const =0;
|
||||
virtual const char* getName(int i) const=0;
|
||||
void print() const;
|
||||
void writeMat4(FILE* fd, const char* vname) const;
|
||||
void writeMat4Indices(FILE* fd, const char* prefix) const;
|
||||
};
|
||||
|
||||
@ This is the interface to an information on a generic SDGE
|
||||
model. It is sufficient for calculations of policy rule Taylor
|
||||
approximations at some (not necessarily deterministic) steady state.
|
||||
|
||||
We need to know a partitioning of endogenous variables $y$. We suppose
|
||||
that $y$ is partitioned as
|
||||
$$y=\left[\matrix{\hbox{static}\cr\hbox{pred}\cr\hbox{both}\cr\hbox{forward}}\right]$$
|
||||
of which we define
|
||||
$$y^*=\left[\matrix{\hbox{pred}\cr\hbox{both}}\right]\quad
|
||||
y^{**}=\left[\matrix{\hbox{both}\cr\hbox{forward}}\right]$$
|
||||
where ``static'' are meant those variables, which appear only at time
|
||||
$t$; ``pred'' are meant those variables, which appear only at $t$ and
|
||||
$t-1$; ``both'' are meant those variables, which appear at least at
|
||||
$t-1$ and $t+1$; and ``forward'' are meant those variables, which
|
||||
appear only at $t$ and $t+1$. This partitioning is given by methods
|
||||
|nstat()|, |npred()|, |nboth()|, and |nforw()|. The number of
|
||||
equations |numeq()| must be the same as a number of endogenous
|
||||
variables.
|
||||
|
||||
In order to complete description, we need to know a number of
|
||||
exogenous variables, which is a size of $u$, hence |nexog()| method.
|
||||
|
||||
The model contains an information about names of variables, the
|
||||
variance-covariance matrix of the shocks, the derivatives of equations
|
||||
of $f$ at some steady state, and the steady state. These can be
|
||||
retrieved by the corresponding methods.
|
||||
|
||||
The derivatives of the system are calculated with respect to stacked
|
||||
variables, the stack looks as:
|
||||
$$\left[\matrix{y^{**}_{t+1}\cr y_t\cr y^*_{t-1}\cr u_t}\right].$$
|
||||
|
||||
There are only three operations. The first
|
||||
|solveDeterministicSteady()| solves the deterministic steady steate
|
||||
which can be retrieved by |getSteady()| later. The method
|
||||
|evaluateSystem| calculates $f(y^{**},y,y^*,u)$, where $y$ and $u$ are
|
||||
passed, or $f(y^{**}_{t+1}, y_t, y^*_{t-1}, u)$, where $y^{**}_{t+1}$,
|
||||
$y_t$, $y^*_{t-1}$, $u$ are passed. Finally, the method
|
||||
|calcDerivativesAtSteady()| calculates derivatives of $f$ at the
|
||||
current steady state, and zero shocks. The derivatives can be
|
||||
retrieved with |getModelDerivatives()|. All the derivatives are done
|
||||
up to a given order in the model, which can be retrieved by |order()|.
|
||||
|
||||
The model initialization is done in a constructor of the implementing
|
||||
class. The constructor usually calls a parser, which parses a given
|
||||
file (usually a text file), and retrieves all necessary information
|
||||
about the model, inluding variables, partitioning, variance-covariance
|
||||
matrix, information helpful for calculation of the deterministic
|
||||
steady state, and so on.
|
||||
|
||||
@<|DynamicModel| class declaration@>=
|
||||
class DynamicModel {
|
||||
public:@;
|
||||
virtual DynamicModel* clone() const =0;
|
||||
virtual ~DynamicModel() {}
|
||||
|
||||
virtual int nstat() const =0;
|
||||
virtual int nboth() const =0;
|
||||
virtual int npred() const =0;
|
||||
virtual int nforw() const =0;
|
||||
virtual int nexog() const =0;
|
||||
virtual int order() const =0;
|
||||
int numeq() const
|
||||
{@+ return nstat()+nboth()+npred()+nforw(); @+}
|
||||
|
||||
virtual const NameList& getAllEndoNames() const =0;
|
||||
virtual const NameList& getStateNames() const =0;
|
||||
virtual const NameList& getExogNames() const =0;
|
||||
virtual const TwoDMatrix& getVcov() const =0;
|
||||
virtual const TensorContainer<FSSparseTensor>& getModelDerivatives() const =0;
|
||||
virtual const Vector& getSteady() const =0;
|
||||
virtual Vector& getSteady() =0;
|
||||
|
||||
virtual void solveDeterministicSteady() =0;
|
||||
virtual void evaluateSystem(Vector& out, const Vector& yy, const Vector& xx) =0;
|
||||
virtual void evaluateSystem(Vector& out, const Vector& yym, const Vector& yy,
|
||||
const Vector& yyp, const Vector& xx) =0;
|
||||
virtual void calcDerivativesAtSteady() =0;
|
||||
};
|
||||
|
||||
|
||||
@ End of {\tt dynamic\_model.h} file.
|
|
@ -0,0 +1,158 @@
|
|||
@q $Id: faa_di_bruno.cweb 744 2006-05-09 13:16:07Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@ Start of {\tt faa\_di\_bruno.cpp} file.
|
||||
|
||||
@s FoldedFineContainer int
|
||||
@s UnfoldedFineContainer int
|
||||
|
||||
@c
|
||||
#include "faa_di_bruno.h"
|
||||
#include "fine_container.h"
|
||||
|
||||
#include <math.h>
|
||||
|
||||
double FaaDiBruno::magic_mult = 1.5;
|
||||
@<|FaaDiBruno::calculate| folded sparse code@>;
|
||||
@<|FaaDiBruno::calculate| folded dense code@>;
|
||||
@<|FaaDiBruno::calculate| unfolded sparse code@>;
|
||||
@<|FaaDiBruno::calculate| unfolded dense code@>;
|
||||
@<|FaaDiBruno::estimRefinment| code@>;
|
||||
|
||||
@ We take an opportunity to refine the stack container to avoid
|
||||
allocation of more memory than available.
|
||||
|
||||
@<|FaaDiBruno::calculate| folded sparse code@>=
|
||||
void FaaDiBruno::calculate(const StackContainer<FGSTensor>& cont,
|
||||
const TensorContainer<FSSparseTensor>& f,
|
||||
FGSTensor& out)
|
||||
{
|
||||
out.zeros();
|
||||
for (int l = 1; l <= out.dimen(); l++) {
|
||||
int mem_mb, p_size_mb;
|
||||
int max = estimRefinment(out.getDims(), out.nrows(), l, mem_mb, p_size_mb);
|
||||
FoldedFineContainer fine_cont(cont, max);
|
||||
fine_cont.multAndAdd(l, f, out);
|
||||
JournalRecord recc(journal);
|
||||
recc << "dim=" << l << " avmem=" << mem_mb << " tmpmem=" << p_size_mb << " max=" << max
|
||||
<< " stacks=" << cont.numStacks() << "->" << fine_cont.numStacks() << endrec;
|
||||
}
|
||||
}
|
||||
|
||||
@ Here we just simply evaluate |multAndAdd| for the dense
|
||||
container. There is no opportunity for tuning.
|
||||
|
||||
@<|FaaDiBruno::calculate| folded dense code@>=
|
||||
void FaaDiBruno::calculate(const FoldedStackContainer& cont, const FGSContainer& g,
|
||||
FGSTensor& out)
|
||||
{
|
||||
out.zeros();
|
||||
for (int l = 1; l <= out.dimen(); l++) {
|
||||
long int mem = SystemResources::availableMemory();
|
||||
cont.multAndAdd(l, g, out);
|
||||
JournalRecord rec(journal);
|
||||
int mem_mb = mem/1024/1024;
|
||||
rec << "dim=" << l << " avmem=" << mem_mb << endrec;
|
||||
}
|
||||
}
|
||||
|
||||
@ This is the same as |@<|FaaDiBruno::calculate| folded sparse
|
||||
code@>|. The only difference is that we construct unfolded fine
|
||||
container.
|
||||
|
||||
@<|FaaDiBruno::calculate| unfolded sparse code@>=
|
||||
void FaaDiBruno::calculate(const StackContainer<UGSTensor>& cont,
|
||||
const TensorContainer<FSSparseTensor>& f,
|
||||
UGSTensor& out)
|
||||
{
|
||||
out.zeros();
|
||||
for (int l = 1; l <= out.dimen(); l++) {
|
||||
int mem_mb, p_size_mb;
|
||||
int max = estimRefinment(out.getDims(), out.nrows(), l, mem_mb, p_size_mb);
|
||||
UnfoldedFineContainer fine_cont(cont, max);
|
||||
fine_cont.multAndAdd(l, f, out);
|
||||
JournalRecord recc(journal);
|
||||
recc << "dim=" << l << " avmem=" << mem_mb << " tmpmem=" << p_size_mb << " max=" << max
|
||||
<< " stacks=" << cont.numStacks() << "->" << fine_cont.numStacks() << endrec;
|
||||
}
|
||||
}
|
||||
|
||||
@ Again, no tuning opportunity here.
|
||||
@<|FaaDiBruno::calculate| unfolded dense code@>=
|
||||
void FaaDiBruno::calculate(const UnfoldedStackContainer& cont, const UGSContainer& g,
|
||||
UGSTensor& out)
|
||||
{
|
||||
out.zeros();
|
||||
for (int l = 1; l <= out.dimen(); l++) {
|
||||
long int mem = SystemResources::availableMemory();
|
||||
cont.multAndAdd(l, g, out);
|
||||
JournalRecord rec(journal);
|
||||
int mem_mb = mem/1024/1024;
|
||||
rec << "dim=" << l << " avmem=" << mem_mb << endrec;
|
||||
}
|
||||
}
|
||||
|
||||
@ This function returns a number of maximum rows used for refinement of
|
||||
the stacked container. We want to set the maximum so that the expected
|
||||
memory consumption for the number of paralel threads would be less
|
||||
than available memory. On the other hand we do not want to be too
|
||||
pesimistic since a very fine refinement can be very slow.
|
||||
|
||||
Besides memory needed for a dense unfolded slice of a tensor from
|
||||
|f|, each thread needs |magic_mult*per_size| bytes of memory. In the
|
||||
worst case, |magic_mult| will be equal to 2, this means memory
|
||||
|per_size| for target temporary (permuted symmetry) tensor plus one
|
||||
copy for intermediate result. However, this shows to be too
|
||||
pesimistic, so we set |magic_mult| to 1.5. The memory for permuted
|
||||
symmetry temporary tensor |per_size| is estimated as a weigthed
|
||||
average of unfolded memory of the |out| tensor and unfolded memory of
|
||||
a symetric tensor with the largest coordinate size. Some experiments
|
||||
showed that the best combination of the two is to take 100\% if the
|
||||
latter, so we set |lambda| to zero.
|
||||
|
||||
The |max| number of rows in the refined |cont| must be such that each
|
||||
slice fits to remaining memory. Number of columns of the slice are
|
||||
never greater $max^l$. (This is not true, since stacks corresponing to
|
||||
unit/zero matrices cannot be further refined). We get en equation:
|
||||
|
||||
$$nthreads\cdot max^l\cdot 8\cdot r = mem -
|
||||
magic\_mult\cdot nthreads\cdot per\_size\cdot 8\cdot r,$$
|
||||
where |mem| is available memory in bytes, |nthreads| is a number of
|
||||
threads, $r$ is a number of rows, and $8$ is |sizeof(double)|.
|
||||
|
||||
If the right hand side is less than zero, we set |max| to 10, just to
|
||||
let it do something.
|
||||
|
||||
@<|FaaDiBruno::estimRefinment| code@>=
|
||||
int FaaDiBruno::estimRefinment(const TensorDimens& tdims, int nr, int l,
|
||||
int& avmem_mb, int& tmpmem_mb)
|
||||
{
|
||||
int nthreads = THREAD_GROUP::max_parallel_threads;
|
||||
long int per_size1 = tdims.calcUnfoldMaxOffset();
|
||||
long int per_size2 = (long int)pow((double)tdims.getNVS().getMax(), l);
|
||||
double lambda = 0.0;
|
||||
long int per_size = sizeof(double)*nr
|
||||
*(long int)(lambda*per_size1+(1-lambda)*per_size2);
|
||||
long int mem = SystemResources::availableMemory();
|
||||
int max = 0;
|
||||
double num_cols = ((double)(mem-magic_mult*nthreads*per_size))
|
||||
/nthreads/sizeof(double)/nr;
|
||||
if (num_cols > 0) {
|
||||
double maxd = pow(num_cols, ((double)1)/l);
|
||||
max = (int)floor(maxd);
|
||||
}
|
||||
if (max == 0) {
|
||||
max = 10;
|
||||
JournalRecord rec(journal);
|
||||
rec << "dim=" << l << " run out of memory, imposing max=" << max;
|
||||
if (nthreads > 1)
|
||||
rec << " (decrease number of threads)";
|
||||
rec << endrec;
|
||||
}
|
||||
avmem_mb = mem/1024/1024;
|
||||
tmpmem_mb = (nthreads*per_size)/1024/1024;
|
||||
return max;
|
||||
}
|
||||
|
||||
|
||||
@ End of {\tt faa\_di\_bruno.cpp} file.
|
|
@ -0,0 +1,49 @@
|
|||
@q $Id: faa_di_bruno.hweb 744 2006-05-09 13:16:07Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@*2 Faa Di Bruno evaluator. Start of {\tt faa\_di\_bruno.h} file.
|
||||
|
||||
This defines a class which implements Faa Di Bruno Formula
|
||||
$$\left[B_{s^k}\right]_{\alpha_1\ldots\alpha_l}=\left[f_{z^l}\right]_{\beta_1\ldots\beta_l}
|
||||
\sum_{c\in M_{l,k}}\prod_{m=1}^l\left[z_{s^k(c_m)}\right]^{\beta_m}_{c_m(\alpha)}$$
|
||||
where $s^k$ is a general symmetry of dimension $k$ and $z$ is a stack of functions.
|
||||
|
||||
@s FaaDiBruno int
|
||||
|
||||
@c
|
||||
#ifndef FAA_DI_BRUNO_H
|
||||
#define FAA_DI_BRUNO_H
|
||||
|
||||
#include "journal.h"
|
||||
#include "stack_container.h"
|
||||
#include "t_container.h"
|
||||
#include "sparse_tensor.h"
|
||||
#include "gs_tensor.h"
|
||||
|
||||
@<|FaaDiBruno| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
@ Nothing special here. See |@<|FaaDiBruno::calculate| folded sparse
|
||||
code@>| for reason of having |magic_mult|.
|
||||
|
||||
@<|FaaDiBruno| class declaration@>=
|
||||
class FaaDiBruno {
|
||||
Journal& journal;
|
||||
public:@;
|
||||
FaaDiBruno(Journal& jr)
|
||||
: journal(jr)@+ {}
|
||||
void calculate(const StackContainer<FGSTensor>& cont, const TensorContainer<FSSparseTensor>& f,
|
||||
FGSTensor& out);
|
||||
void calculate(const FoldedStackContainer& cont, const FGSContainer& g,
|
||||
FGSTensor& out);
|
||||
void calculate(const StackContainer<UGSTensor>& cont, const TensorContainer<FSSparseTensor>& f,
|
||||
UGSTensor& out);
|
||||
void calculate(const UnfoldedStackContainer& cont, const UGSContainer& g,
|
||||
UGSTensor& out);
|
||||
protected:@;
|
||||
int estimRefinment(const TensorDimens& tdims, int nr, int l, int& avmem_mb, int& tmpmem_mb);
|
||||
static double magic_mult;
|
||||
};
|
||||
|
||||
@ End of {\tt faa\_di\_bruno.h} file.
|
|
@ -0,0 +1,302 @@
|
|||
@q $Id: first_order.cweb 2351 2009-09-03 14:58:03Z kamenik $ @>
|
||||
@q Copyright 2004, Ondra Kamenik @>
|
||||
|
||||
@ Start of {\tt first\_order.cpp} file.
|
||||
|
||||
@c
|
||||
|
||||
#include "kord_exception.h"
|
||||
#include "first_order.h"
|
||||
#include "cpplapack.h"
|
||||
|
||||
double qz_criterium = 1.000001;
|
||||
@<|order_eigs| function code@>;
|
||||
@<|FirstOrder::solve| code@>;
|
||||
@<|FirstOrder::journalEigs| code@>;
|
||||
|
||||
@ This is a function which selects the eigenvalues pair used by
|
||||
|LAPACK_dgges|. See documentation to DGGES for details. Here we want
|
||||
to select (return true) the pairs for which $\alpha<\beta$.
|
||||
|
||||
@<|order_eigs| function code@>=
|
||||
int order_eigs(const double* alphar, const double* alphai, const double* beta)
|
||||
{
|
||||
return (*alphar * *alphar + *alphai * *alphai < *beta * *beta * qz_criterium);
|
||||
}
|
||||
|
||||
|
||||
@ Here we solve the linear approximation. The result are the matrices
|
||||
$g_{y^*}$ and $g_u$. The method solves the first derivatives of $g$ so
|
||||
that the following equation would be true:
|
||||
$$E_t[F(y^*_{t-1},u_t,u_{t+1},\sigma)] =
|
||||
E_t[f(g^{**}(g^*(y_{t-1}^*,u_t,\sigma), u_{t+1}, \sigma), g(y_{t-1}^*,u_t,\sigma),
|
||||
y^*_{t-1},u_t)]=0$$
|
||||
where $f$ is a given system of equations.
|
||||
|
||||
It is known that $g_{y^*}$ is given by $F_{y^*}=0$, $g_u$ is given by
|
||||
$F_u=0$, and $g_\sigma$ is zero. The only input to the method are the
|
||||
derivatives |fd| of the system $f$, and partitioning of the vector $y$
|
||||
(from object).
|
||||
|
||||
@<|FirstOrder::solve| code@>=
|
||||
void FirstOrder::solve(const TwoDMatrix& fd)
|
||||
{
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Recovering first order derivatives " << endrec;
|
||||
|
||||
::qz_criterium = FirstOrder::qz_criterium;
|
||||
|
||||
@<solve derivatives |gy|@>;
|
||||
@<solve derivatives |gu|@>;
|
||||
journalEigs();
|
||||
|
||||
if (! gy.isFinite() || ! gu.isFinite()) {
|
||||
throw KordException(__FILE__, __LINE__,
|
||||
"NaN or Inf asserted in first order derivatives in FirstOrder::solve");
|
||||
}
|
||||
}
|
||||
|
||||
@ The derivatives $g_{y^*}$ are retrieved from the equation
|
||||
$F_{y^*}=0$. The calculation proceeds as follows:
|
||||
|
||||
\orderedlist
|
||||
|
||||
\li For each variable appearing at both $t-1$ and $t-1$ we add a dummy
|
||||
variable, so that the predetermined variables and forward looking would
|
||||
be disjoint. This is, the matrix of the first derivatives of the
|
||||
system written as:
|
||||
$$\left[\matrix{f_{y^{**}_+}&f_{ys}&f_{yp}&f_{yb}&f_{yf}&f_{y^*_-}}\right],$$
|
||||
where $f_{ys}$, $f_{yp}$, $f_{yb}$, and $f_{yf}$ are derivatives wrt
|
||||
static, predetermined, both, forward looking at time $t$, is rewritten
|
||||
to the matrix:
|
||||
$$\left[
|
||||
\matrix{f_{y^{**}_+}&f_{ys}&f_{yp}&f_{yb}&0&f_{yf}&f_{y^*_-}\cr
|
||||
0 &0 &0 &I &-I&0 &0}
|
||||
\right],$$
|
||||
where the second line has number of rows equal to the number of both variables.
|
||||
|
||||
\li Next, provided that forward looking and predetermined are
|
||||
disjoint, the equation $F_{y^*}=0$ is written as:
|
||||
$$\left[f_+{y^{**}_+}\right]\left[g_{y^*}^{**}\right]\left[g_{y^*}^*\right]
|
||||
+\left[f_{ys}\right]\left[g^s_{y^*}\right]
|
||||
+\left[f_{y^*}\right]\left[g^*_{y^*}\right]
|
||||
+\left[f_{y^{**}}\right]\left[g^{**}_{y^*}\right]+\left[f_{y^*_-}\right]=0$$
|
||||
This is rewritten as
|
||||
$$\left[\matrix{f_{y^*}&0&f_{y^{**}_+}}\right]
|
||||
\left[\matrix{I\cr g^s_{y^*}\cr g^{**}_{y^*}}\right]\left[g_{y^*}^*\right]+
|
||||
\left[\matrix{f_{y^*_-}&f_{ys}&f_{y^{**}}}\right]
|
||||
\left[\matrix{I\cr g^s_{y^*}\cr g^{**}_{y^*}}\right]=0
|
||||
$$
|
||||
Now, in the above equation, there are the auxiliary variables standing
|
||||
for copies of both variables at time $t+1$. This equation is then
|
||||
rewritten as:
|
||||
$$
|
||||
\left[\matrix{f_{yp}&f_{yb}&0&f_{y^{**}_+}\cr 0&I&0&0}\right]
|
||||
\left[\matrix{I\cr g^s_{y^*}\cr g^{**}_{y^*}}\right]\left[g_{y^*}^*\right]+
|
||||
\left[\matrix{f_{y^*_-}&f_{ys}&0&f_{yf}\cr 0&0&-I&0}\right]
|
||||
\left[\matrix{I\cr g^s_{y^*}\cr g^{**}_{y^*}}\right]=0
|
||||
$$
|
||||
The two matrices are denoted as $D$ and $-E$, so the equation takes the form:
|
||||
$$D\left[\matrix{I\cr g^s_{y^*}\cr g^{**}_{y^*}}\right]\left[g_{y^*}^*\right]=
|
||||
E\left[\matrix{I\cr g^s_{y^*}\cr g^{**}_{y^*}}\right]$$
|
||||
|
||||
\li Next we solve the equation by Generalized Schur decomposition:
|
||||
$$
|
||||
\left[\matrix{T_{11}&T_{12}\cr 0&T_{22}}\right]
|
||||
\left[\matrix{Z_{11}^T&Z_{21}^T\cr Z_{12}^T&Z_{22}^T}\right]
|
||||
\left[\matrix{I\cr X}\right]\left[g_{y^*}^*\right]=
|
||||
\left[\matrix{S_{11}&S_{12}\cr 0&S_{22}}\right]
|
||||
\left[\matrix{Z_{11}^T&Z_{21}^T\cr Z_{12}^T&Z_{22}^T}\right]
|
||||
\left[\matrix{I\cr X}\right]
|
||||
$$
|
||||
We reorder the eigenvalue pair so that $S_{ii}/T_{ii}$ with modulus
|
||||
less than one would be in the left-upper part.
|
||||
|
||||
\li The Blachard--Kahn stability argument implies that the pairs
|
||||
with modulus less that one will be in and only int $S_{11}/T_{11}$.
|
||||
The exploding paths will be then eliminated when
|
||||
$$
|
||||
\left[\matrix{Z_{11}^T&Z_{21}^T\cr Z_{12}^T&Z_{22}^T}\right]
|
||||
\left[\matrix{I\cr X}\right]\left[g_{y^*}^*\right]=
|
||||
\left[\matrix{Y\cr 0}\right]
|
||||
$$
|
||||
From this we have, $Y=Z_{11}^{-1}$, and $X=Z_{21}Y$, or equivalently
|
||||
$X=-Z_{22}^{-T}Z_{12}^T$. From the equation, we get
|
||||
$\left[g_{y^*}^*\right]=Y^{-1}T_{11}^{-1}S_{11}Y$, which is
|
||||
$Z_{11}T_{11}^{-1}S_{11}Z_{11}^{-1}$.
|
||||
|
||||
\li We then copy the derivatives to storage |gy|. Note that the
|
||||
derivatives of both variables are in $X$ and in
|
||||
$\left[g_{y^*}^*\right]$, so we check whether the two submatrices are
|
||||
the same. The difference is only numerical error.
|
||||
|
||||
\endorderedlist
|
||||
|
||||
@<solve derivatives |gy|@>=
|
||||
@<setup submatrices of |f|@>;
|
||||
@<form matrix $D$@>;
|
||||
@<form matrix $E$@>;
|
||||
@<solve generalized Schur@>;
|
||||
@<make submatrices of right space@>;
|
||||
@<calculate derivatives of static and forward@>;
|
||||
@<calculate derivatives of predetermined@>;
|
||||
@<copy derivatives to |gy|@>;
|
||||
@<check difference for derivatives of both@>;
|
||||
|
||||
|
||||
@ Here we setup submatrices of the derivatives |fd|.
|
||||
@<setup submatrices of |f|@>=
|
||||
int off = 0;
|
||||
ConstTwoDMatrix fyplus(fd, off, ypart.nyss());
|
||||
off += ypart.nyss();
|
||||
ConstTwoDMatrix fyszero(fd, off, ypart.nstat);
|
||||
off += ypart.nstat;
|
||||
ConstTwoDMatrix fypzero(fd, off, ypart.npred);
|
||||
off += ypart.npred;
|
||||
ConstTwoDMatrix fybzero(fd, off, ypart.nboth);
|
||||
off += ypart.nboth;
|
||||
ConstTwoDMatrix fyfzero(fd, off, ypart.nforw);
|
||||
off += ypart.nforw;
|
||||
ConstTwoDMatrix fymins(fd, off, ypart.nys());
|
||||
off += ypart.nys();
|
||||
ConstTwoDMatrix fuzero(fd, off, nu);
|
||||
off += nu;
|
||||
|
||||
@
|
||||
@<form matrix $D$@>=
|
||||
int n = ypart.ny()+ypart.nboth;
|
||||
TwoDMatrix matD(n, n);
|
||||
matD.zeros();
|
||||
matD.place(fypzero, 0, 0);
|
||||
matD.place(fybzero, 0, ypart.npred);
|
||||
matD.place(fyplus, 0, ypart.nys()+ypart.nstat);
|
||||
for (int i = 0; i < ypart.nboth; i++)
|
||||
matD.get(ypart.ny()+i, ypart.npred+i) = 1.0;
|
||||
|
||||
@
|
||||
@<form matrix $E$@>=
|
||||
TwoDMatrix matE(n, n);
|
||||
matE.zeros();
|
||||
matE.place(fymins, 0, 0);
|
||||
matE.place(fyszero, 0, ypart.nys());
|
||||
matE.place(fyfzero, 0, ypart.nys()+ypart.nstat+ypart.nboth);
|
||||
for (int i = 0; i < ypart.nboth; i++)
|
||||
matE.get(ypart.ny()+i, ypart.nys()+ypart.nstat+i) = -1.0;
|
||||
matE.mult(-1.0);
|
||||
|
||||
@
|
||||
@<solve generalized Schur@>=
|
||||
TwoDMatrix vsl(n, n);
|
||||
TwoDMatrix vsr(n, n);
|
||||
int lwork = 100*n+16;
|
||||
Vector work(lwork);
|
||||
IntSequence bwork(n);
|
||||
int info;
|
||||
LAPACK_dgges("N", "V", "S", order_eigs, &n, matE.getData().base(), &n,
|
||||
matD.getData().base(), &n, &sdim, alphar.base(), alphai.base(),
|
||||
beta.base(), vsl.getData().base(), &n, vsr.getData().base(), &n,
|
||||
work.base(), &lwork, &(bwork[0]), &info);
|
||||
bk_cond = (sdim == ypart.nys());
|
||||
|
||||
|
||||
@ Here we setup submatrices of the matrix $Z$.
|
||||
@<make submatrices of right space@>=
|
||||
ConstGeneralMatrix z11(vsr, 0, 0, ypart.nys(), ypart.nys());
|
||||
ConstGeneralMatrix z12(vsr, 0, ypart.nys(), ypart.nys(), n-ypart.nys());
|
||||
ConstGeneralMatrix z21(vsr, ypart.nys(), 0, n-ypart.nys(), ypart.nys());
|
||||
ConstGeneralMatrix z22(vsr, ypart.nys(), ypart.nys(), n-ypart.nys(), n-ypart.nys());
|
||||
|
||||
@ Here we calculate $X=-Z_{22}^{-T}Z_{12}^T$, where $X$ is |sfder| in the code.
|
||||
@<calculate derivatives of static and forward@>=
|
||||
GeneralMatrix sfder(z12, "transpose");
|
||||
z22.multInvLeftTrans(sfder);
|
||||
sfder.mult(-1);
|
||||
|
||||
@ Here we calculate
|
||||
$g_{y^*}^*=Z_{11}T^{-1}_{11}S_{11}Z_{11}^{-1}
|
||||
=Z_{11}T^{-1}_{11}(Z_{11}^{-T}S^T_{11})^T$.
|
||||
|
||||
@<calculate derivatives of predetermined@>=
|
||||
ConstGeneralMatrix s11(matE, 0, 0, ypart.nys(), ypart.nys());
|
||||
ConstGeneralMatrix t11(matD, 0, 0, ypart.nys(), ypart.nys());
|
||||
GeneralMatrix dumm(s11, "transpose");
|
||||
z11.multInvLeftTrans(dumm);
|
||||
GeneralMatrix preder(dumm, "transpose");
|
||||
t11.multInvLeft(preder);
|
||||
preder.multLeft(z11);
|
||||
|
||||
@
|
||||
@<copy derivatives to |gy|@>=
|
||||
gy.place(preder, ypart.nstat, 0);
|
||||
GeneralMatrix sder(sfder, 0, 0, ypart.nstat, ypart.nys());
|
||||
gy.place(sder, 0, 0);
|
||||
GeneralMatrix fder(sfder, ypart.nstat+ypart.nboth, 0, ypart.nforw, ypart.nys());
|
||||
gy.place(fder, ypart.nstat+ypart.nys(), 0);
|
||||
|
||||
@
|
||||
@<check difference for derivatives of both@>=
|
||||
GeneralMatrix bder((const GeneralMatrix&)sfder, ypart.nstat, 0, ypart.nboth, ypart.nys());
|
||||
GeneralMatrix bder2(preder, ypart.npred, 0, ypart.nboth, ypart.nys());
|
||||
bder.add(-1, bder2);
|
||||
b_error = bder.getData().getMax();
|
||||
|
||||
@ The equation $F_u=0$ can be written as
|
||||
$$
|
||||
\left[f_{y^{**}_+}\right]\left[g^{**}_{y^*}\right]\left[g_u^*\right]+
|
||||
\left[f_y\right]\left[g_u\right]+\left[f_u\right]=0
|
||||
$$
|
||||
and rewritten as
|
||||
$$
|
||||
\left[f_y +
|
||||
\left[\matrix{0&f_{y^{**}_+}g^{**}_{y^*}&0}\right]\right]g_u=f_u
|
||||
$$
|
||||
This is exactly done here. The matrix
|
||||
$\left[f_y +\left[\matrix{0&f_{y^{**}_+}g^{**}_{y^*}&0}\right]\right]$ is |matA|
|
||||
in the code.
|
||||
|
||||
@<solve derivatives |gu|@>=
|
||||
GeneralMatrix matA(ypart.ny(), ypart.ny());
|
||||
matA.zeros();
|
||||
ConstGeneralMatrix gss(gy, ypart.nstat+ypart.npred, 0, ypart.nyss(), ypart.nys());
|
||||
GeneralMatrix aux(fyplus, gss);
|
||||
matA.place(aux, 0, ypart.nstat);
|
||||
ConstGeneralMatrix fyzero(fd, 0, ypart.nyss(), ypart.ny(), ypart.ny());
|
||||
matA.add(1.0, fyzero);
|
||||
gu.zeros();
|
||||
gu.add(-1.0, fuzero);
|
||||
ConstGeneralMatrix(matA).multInvLeft(gu);
|
||||
|
||||
@
|
||||
@<|FirstOrder::journalEigs| code@>=
|
||||
void FirstOrder::journalEigs()
|
||||
{
|
||||
if (bk_cond) {
|
||||
JournalRecord jr(journal);
|
||||
jr << "Blanchard-Kahn conditition satisfied, model stable" << endrec;
|
||||
} else {
|
||||
JournalRecord jr(journal);
|
||||
jr << "Blanchard-Kahn condition not satisfied, model not stable: sdim=" << sdim
|
||||
<< " " << "npred=" << ypart.nys() << endrec;
|
||||
}
|
||||
if (!bk_cond) {
|
||||
for (int i = 0; i < alphar.length(); i++) {
|
||||
if (i == sdim || i == ypart.nys()) {
|
||||
JournalRecord jr(journal);
|
||||
jr << "---------------------------------------------------- ";
|
||||
if (i == sdim)
|
||||
jr << "sdim";
|
||||
else
|
||||
jr << "npred";
|
||||
jr << endrec;
|
||||
}
|
||||
JournalRecord jr(journal);
|
||||
double mod = sqrt(alphar[i]*alphar[i]+alphai[i]*alphai[i]);
|
||||
mod = mod/round(100000*std::abs(beta[i]))*100000;
|
||||
jr << i << "\t(" << alphar[i] << "," << alphai[i] << ") / " << beta[i]
|
||||
<< " \t" << mod << endrec;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ End of {\tt first\_order.cpp} file.
|
|
@ -0,0 +1,85 @@
|
|||
@q $Id: first_order.hweb 2345 2009-03-24 11:50:48Z kamenik $ @>
|
||||
@q Copyright 2004, Ondra Kamenik @>
|
||||
|
||||
@*2 First order at deterministic steady. Start of {\tt first\_order.h} file.
|
||||
|
||||
@s GeneralMatrix int
|
||||
@s ConstGeneralMatrix int
|
||||
@s FirstOrder int
|
||||
@s FirstOrderDerivs int
|
||||
@c
|
||||
|
||||
#ifndef FIRST_ORDER_H
|
||||
#define FIRST_ORDER_H
|
||||
|
||||
#include "korder.h"
|
||||
|
||||
@<|FirstOrder| class declaration@>;
|
||||
@<|FirstOrderDerivs| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
@
|
||||
@<|FirstOrder| class declaration@>=
|
||||
template<int> class FirstOrderDerivs;
|
||||
class FirstOrder {
|
||||
template <int> friend class FirstOrderDerivs;
|
||||
PartitionY ypart;
|
||||
int nu;
|
||||
TwoDMatrix gy;
|
||||
TwoDMatrix gu;
|
||||
bool bk_cond;
|
||||
double b_error;
|
||||
int sdim;
|
||||
Vector alphar;
|
||||
Vector alphai;
|
||||
Vector beta;
|
||||
double qz_criterium;
|
||||
Journal& journal;
|
||||
public:@;
|
||||
FirstOrder(int num_stat, int num_pred, int num_both, int num_forw,
|
||||
int num_u, const FSSparseTensor& f, Journal& jr, double qz_crit)
|
||||
: ypart(num_stat, num_pred, num_both, num_forw),
|
||||
nu(num_u),
|
||||
gy(ypart.ny(), ypart.nys()),
|
||||
gu(ypart.ny(), nu),
|
||||
alphar(ypart.ny()+ypart.nboth),
|
||||
alphai(ypart.ny()+ypart.nboth),
|
||||
beta(ypart.ny()+ypart.nboth),
|
||||
qz_criterium(qz_crit),
|
||||
journal(jr)
|
||||
{@+ solve(FFSTensor(f)); @+}
|
||||
bool isStable() const
|
||||
{@+ return bk_cond;@+}
|
||||
const TwoDMatrix& getGy() const
|
||||
{@+ return gy;@+}
|
||||
const TwoDMatrix& getGu() const
|
||||
{@+ return gu;@+}
|
||||
protected:@;
|
||||
void solve(const TwoDMatrix& f);
|
||||
void journalEigs();
|
||||
};
|
||||
|
||||
@ This class only converts the derivatives $g_{y^*}$ and $g_u$ to a
|
||||
folded or unfolded container.
|
||||
|
||||
@<|FirstOrderDerivs| class declaration@>=
|
||||
template <int t>
|
||||
class FirstOrderDerivs : public ctraits<t>::Tg {
|
||||
public:@;
|
||||
FirstOrderDerivs(const FirstOrder& fo)
|
||||
: ctraits<t>::Tg(4)
|
||||
{
|
||||
IntSequence nvs(4);
|
||||
nvs[0] = fo.ypart.nys(); nvs[1] = fo.nu; nvs[2] = fo.nu; nvs[3] = 1;
|
||||
_Ttensor* ten = new _Ttensor(fo.ypart.ny(), TensorDimens(Symmetry(1,0,0,0),nvs));
|
||||
ten->zeros(); ten->add(1.0, fo.gy);
|
||||
insert(ten);
|
||||
ten = new _Ttensor(fo.ypart.ny(), TensorDimens(Symmetry(0,1,0,0), nvs));
|
||||
ten->zeros(); ten->add(1.0, fo.gu);
|
||||
insert(ten);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ End of {\tt first\_order.h} file.
|
|
@ -0,0 +1,443 @@
|
|||
@q $Id: global_check.cweb 1830 2008-05-18 20:06:40Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@ Start of {\tt global\_check.cpp} file.
|
||||
|
||||
@c
|
||||
#include "SymSchurDecomp.h"
|
||||
|
||||
#include "global_check.h"
|
||||
|
||||
#include "smolyak.h"
|
||||
#include "product.h"
|
||||
#include "quasi_mcarlo.h"
|
||||
|
||||
#include "cpplapack.h"
|
||||
|
||||
@<|ResidFunction| constructor code@>;
|
||||
@<|ResidFunction| copy constructor code@>;
|
||||
@<|ResidFunction| destructor code@>;
|
||||
@<|ResidFunction::setYU| code@>;
|
||||
@<|ResidFunction::eval| code@>;
|
||||
@<|GlobalChecker::check| vector code@>;
|
||||
@<|GlobalChecker::check| matrix code@>;
|
||||
@<|GlobalChecker::checkAlongShocksAndSave| code@>;
|
||||
@<|GlobalChecker::checkOnEllipseAndSave| code@>;
|
||||
@<|GlobalChecker::checkAlongSimulationAndSave| code@>;
|
||||
|
||||
@ Here we just set a reference to the approximation, and create a new
|
||||
|DynamicModel|.
|
||||
|
||||
@<|ResidFunction| constructor code@>=
|
||||
ResidFunction::ResidFunction(const Approximation& app)
|
||||
: VectorFunction(app.getModel().nexog(), app.getModel().numeq()), approx(app),
|
||||
model(app.getModel().clone()),
|
||||
yplus(NULL), ystar(NULL), u(NULL), hss(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
@
|
||||
@<|ResidFunction| copy constructor code@>=
|
||||
ResidFunction::ResidFunction(const ResidFunction& rf)
|
||||
: VectorFunction(rf), approx(rf.approx), model(rf.model->clone()),
|
||||
yplus(NULL), ystar(NULL), u(NULL), hss(NULL)
|
||||
{
|
||||
if (rf.yplus)
|
||||
yplus = new Vector(*(rf.yplus));
|
||||
if (rf.ystar)
|
||||
ystar = new Vector(*(rf.ystar));
|
||||
if (rf.u)
|
||||
u = new Vector(*(rf.u));
|
||||
if (rf.hss)
|
||||
hss = new FTensorPolynomial(*(rf.hss));
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<|ResidFunction| destructor code@>=
|
||||
ResidFunction::~ResidFunction()
|
||||
{
|
||||
delete model;
|
||||
@<delete |y| and |u| dependent data@>;
|
||||
}
|
||||
|
||||
@
|
||||
@<delete |y| and |u| dependent data@>=
|
||||
if (yplus)
|
||||
delete yplus;
|
||||
if (ystar)
|
||||
delete ystar;
|
||||
if (u)
|
||||
delete u;
|
||||
if (hss)
|
||||
delete hss;
|
||||
|
||||
|
||||
@ This sets $y^*$ and $u$. We have to create |ystar|, |u|, |yplus| and
|
||||
|hss|.
|
||||
|
||||
@<|ResidFunction::setYU| code@>=
|
||||
void ResidFunction::setYU(const Vector& ys, const Vector& xx)
|
||||
{
|
||||
@<delete |y| and |u| dependent data@>;
|
||||
|
||||
ystar = new Vector(ys);
|
||||
u = new Vector(xx);
|
||||
yplus = new Vector(model->numeq());
|
||||
approx.getFoldDecisionRule().evaluate(DecisionRule::horner,
|
||||
*yplus, *ystar, *u);
|
||||
|
||||
@<make a tensor polynomial of in-place subtensors from decision rule@>;
|
||||
@<make |ytmp_star| be a difference of |yplus| from steady@>;
|
||||
@<make |hss| and add steady to it@>;
|
||||
}
|
||||
|
||||
@ Here we use a dirty tricky of converting |const| to non-|const| to
|
||||
obtain a polynomial of subtensor corresponding to non-predetermined
|
||||
variables. However, this new non-|const| polynomial will be used for a
|
||||
construction of |hss| and will be used in |const| context. So this
|
||||
dirty thing is safe.
|
||||
|
||||
Note, that there is always a folded decision rule in |Approximation|.
|
||||
|
||||
@<make a tensor polynomial of in-place subtensors from decision rule@>=
|
||||
union {const FoldDecisionRule* c; FoldDecisionRule* n;} dr;
|
||||
dr.c = &(approx.getFoldDecisionRule());
|
||||
FTensorPolynomial dr_ss(model->nstat()+model->npred(), model->nboth()+model->nforw(),
|
||||
*(dr.n));
|
||||
|
||||
@
|
||||
@<make |ytmp_star| be a difference of |yplus| from steady@>=
|
||||
Vector ytmp_star(ConstVector(*yplus, model->nstat(), model->npred()+model->nboth()));
|
||||
ConstVector ysteady_star(dr.c->getSteady(), model->nstat(),
|
||||
model->npred()+model->nboth());
|
||||
ytmp_star.add(-1.0, ysteady_star);
|
||||
|
||||
@ Here is the |const| context of |dr_ss|.
|
||||
@<make |hss| and add steady to it@>=
|
||||
hss = new FTensorPolynomial(dr_ss, ytmp_star);
|
||||
ConstVector ysteady_ss(dr.c->getSteady(), model->nstat()+model->npred(),
|
||||
model->nboth()+model->nforw());
|
||||
if (hss->check(Symmetry(0))) {
|
||||
hss->get(Symmetry(0))->getData().add(1.0, ysteady_ss);
|
||||
} else {
|
||||
FFSTensor* ten = new FFSTensor(hss->nrows(), hss->nvars(), 0);
|
||||
ten->getData() = ysteady_ss;
|
||||
hss->insert(ten);
|
||||
}
|
||||
|
||||
@ Here we evaluate the residual $F(y^*,u,u')$. We have to evaluate |hss|
|
||||
for $u'=$|point| and then we evaluate the system $f$.
|
||||
|
||||
@<|ResidFunction::eval| code@>=
|
||||
void ResidFunction::eval(const Vector& point, const ParameterSignal& sig, Vector& out)
|
||||
{
|
||||
KORD_RAISE_IF(point.length() != hss->nvars(),
|
||||
"Wrong dimension of input vector in ResidFunction::eval");
|
||||
KORD_RAISE_IF(out.length() != model->numeq(),
|
||||
"Wrong dimension of output vector in ResidFunction::eval");
|
||||
Vector yss(hss->nrows());
|
||||
hss->evalHorner(yss, point);
|
||||
model->evaluateSystem(out, *ystar, *yplus, yss, *u);
|
||||
}
|
||||
|
||||
@ This checks the $E[F(y^*,u,u')]$ for a given $y^*$ and $u$ by
|
||||
integrating with a given quadrature. Note that the input |ys| is $y^*$
|
||||
not whole $y$.
|
||||
|
||||
@<|GlobalChecker::check| vector code@>=
|
||||
void GlobalChecker::check(const Quadrature& quad, int level,
|
||||
const ConstVector& ys, const ConstVector& x, Vector& out)
|
||||
{
|
||||
for (int ifunc = 0; ifunc < vfs.getNum(); ifunc++)
|
||||
((GResidFunction&)(vfs.getFunc(ifunc))).setYU(ys, x);
|
||||
quad.integrate(vfs, level, out);
|
||||
}
|
||||
|
||||
@ This method is a bulk version of |@<|GlobalChecker::check| vector
|
||||
code@>|. It decides between Smolyak and product quadrature according
|
||||
to |max_evals| constraint.
|
||||
|
||||
Note that |y| can be either full (all endogenous variables including
|
||||
static and forward looking), or just $y^*$ (state variables). The
|
||||
method is able to recognize it.
|
||||
|
||||
@<|GlobalChecker::check| matrix code@>=
|
||||
void GlobalChecker::check(int max_evals, const ConstTwoDMatrix& y,
|
||||
const ConstTwoDMatrix& x, TwoDMatrix& out)
|
||||
{
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Checking approximation error for " << y.ncols()
|
||||
<< " states with at most " << max_evals << " evaluations" << endrec;
|
||||
|
||||
@<decide about type of quadrature@>;
|
||||
Quadrature* quad;
|
||||
int lev;
|
||||
@<create the quadrature and report the decision@>;
|
||||
@<check all column of |y| and |x|@>;
|
||||
delete quad;
|
||||
}
|
||||
|
||||
@
|
||||
@<decide about type of quadrature@>=
|
||||
GaussHermite gh;
|
||||
|
||||
SmolyakQuadrature dummy_sq(model.nexog(), 1, gh);
|
||||
int smol_evals;
|
||||
int smol_level;
|
||||
dummy_sq.designLevelForEvals(max_evals, smol_level, smol_evals);
|
||||
|
||||
ProductQuadrature dummy_pq(model.nexog(), gh);
|
||||
int prod_evals;
|
||||
int prod_level;
|
||||
dummy_pq.designLevelForEvals(max_evals, prod_level, prod_evals);
|
||||
|
||||
bool take_smolyak = (smol_evals < prod_evals) && (smol_level >= prod_level-1);
|
||||
|
||||
@
|
||||
@<create the quadrature and report the decision@>=
|
||||
if (take_smolyak) {
|
||||
quad = new SmolyakQuadrature(model.nexog(), smol_level, gh);
|
||||
lev = smol_level;
|
||||
JournalRecord rec(journal);
|
||||
rec << "Selected Smolyak (level,evals)=(" << smol_level << ","
|
||||
<< smol_evals << ") over product (" << prod_level << ","
|
||||
<< prod_evals << ")" << endrec;
|
||||
} else {
|
||||
quad = new ProductQuadrature(model.nexog(), gh);
|
||||
lev = prod_level;
|
||||
JournalRecord rec(journal);
|
||||
rec << "Selected product (level,evals)=(" << prod_level << ","
|
||||
<< prod_evals << ") over Smolyak (" << smol_level << ","
|
||||
<< smol_evals << ")" << endrec;
|
||||
}
|
||||
|
||||
@
|
||||
@<check all column of |y| and |x|@>=
|
||||
int first_row = (y.nrows() == model.numeq())? model.nstat() : 0;
|
||||
ConstTwoDMatrix ysmat(y, first_row, 0, model.npred()+model.nboth(), y.ncols());
|
||||
for (int j = 0; j < y.ncols(); j++) {
|
||||
ConstVector yj(ysmat, j);
|
||||
ConstVector xj(x, j);
|
||||
Vector outj(out, j);
|
||||
check(*quad, lev, yj, xj, outj);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ This method checks an error of the approximation by evaluating
|
||||
residual $E[F(y^*,u,u')\vert y^*,u]$ for $y^*$ being the steady state, and
|
||||
changing $u$. We go through all elements of $u$ and vary them from
|
||||
$-mult\cdot\sigma$ to $mult\cdot\sigma$ in |m| steps.
|
||||
|
||||
@<|GlobalChecker::checkAlongShocksAndSave| code@>=
|
||||
void GlobalChecker::checkAlongShocksAndSave(FILE* fd, const char* prefix,
|
||||
int m, double mult, int max_evals)
|
||||
{
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Calculating errors along shocks +/- "
|
||||
<< mult << " std errors, granularity " << m << endrec;
|
||||
@<setup |y_mat| of steady states for checking@>;
|
||||
@<setup |exo_mat| for checking@>;
|
||||
|
||||
TwoDMatrix errors(model.numeq(), 2*m*model.nexog()+1);
|
||||
check(max_evals, y_mat, exo_mat, errors);
|
||||
|
||||
@<report errors along shock and save them@>;
|
||||
}
|
||||
|
||||
@
|
||||
@<setup |y_mat| of steady states for checking@>=
|
||||
TwoDMatrix y_mat(model.numeq(), 2*m*model.nexog()+1);
|
||||
for (int j = 0; j < 2*m*model.nexog()+1; j++) {
|
||||
Vector yj(y_mat, j);
|
||||
yj = (const Vector&)model.getSteady();
|
||||
}
|
||||
|
||||
@
|
||||
@<setup |exo_mat| for checking@>=
|
||||
TwoDMatrix exo_mat(model.nexog(), 2*m*model.nexog()+1);
|
||||
exo_mat.zeros();
|
||||
for (int ishock = 0; ishock < model.nexog(); ishock++) {
|
||||
double max_sigma = sqrt(model.getVcov().get(ishock,ishock));
|
||||
for (int j = 0; j < 2*m; j++) {
|
||||
int jmult = (j < m)? j-m: j-m+1;
|
||||
exo_mat.get(ishock, 1+2*m*ishock+j) =
|
||||
mult*jmult*max_sigma/m;
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<report errors along shock and save them@>=
|
||||
TwoDMatrix res(model.nexog(), 2*m+1);
|
||||
JournalRecord rec(journal);
|
||||
rec << "Shock value error" << endrec;
|
||||
ConstVector err0(errors,0);
|
||||
char shock[9];
|
||||
char erbuf[17];
|
||||
for (int ishock = 0; ishock < model.nexog(); ishock++) {
|
||||
TwoDMatrix err_out(model.numeq(), 2*m+1);
|
||||
sprintf(shock, "%-8s", model.getExogNames().getName(ishock));
|
||||
for (int j = 0; j < 2*m+1; j++) {
|
||||
int jj;
|
||||
Vector error(err_out, j);
|
||||
if (j != m) {
|
||||
if (j < m)
|
||||
jj = 1 + 2*m*ishock+j;
|
||||
else
|
||||
jj = 1 + 2*m*ishock+j-1;
|
||||
ConstVector coljj(errors,jj);
|
||||
error = coljj;
|
||||
} else {
|
||||
jj = 0;
|
||||
error = err0;
|
||||
}
|
||||
JournalRecord rec1(journal);
|
||||
sprintf(erbuf,"%12.7g ", error.getMax());
|
||||
rec1 << shock << " " << exo_mat.get(ishock,jj)
|
||||
<< "\t" << erbuf << endrec;
|
||||
}
|
||||
char tmp[100];
|
||||
sprintf(tmp, "%s_shock_%s_errors", prefix, model.getExogNames().getName(ishock));
|
||||
err_out.writeMat4(fd, tmp);
|
||||
}
|
||||
|
||||
|
||||
@ This method checks errors on ellipse of endogenous states
|
||||
(predetermined variables). The ellipse is shaped according to
|
||||
covariance matrix of endogenous variables based on the first order
|
||||
approximation and scaled by |mult|. The points on the
|
||||
ellipse are chosen as polar images of the low discrepancy grid in a
|
||||
cube.
|
||||
|
||||
The method works as follows: First we calculate symmetric Schur factor of
|
||||
covariance matrix of the states. Second we generate low discrepancy
|
||||
points on the unit sphere. Third we transform the sphere with the
|
||||
variance-covariance matrix factor and multiplier |mult| and initialize
|
||||
matrix of $u_t$ to zeros. Fourth we run the |check| method and save
|
||||
the results.
|
||||
|
||||
@<|GlobalChecker::checkOnEllipseAndSave| code@>=
|
||||
void GlobalChecker::checkOnEllipseAndSave(FILE* fd, const char* prefix,
|
||||
int m, double mult, int max_evals)
|
||||
{
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Calculating errors at " << m
|
||||
<< " ellipse points scaled by " << mult << endrec;
|
||||
@<make factor of covariance of variables@>;
|
||||
@<put low discrepancy sphere points to |ymat|@>;
|
||||
@<transform sphere |ymat| and prepare |umat| for checking@>;
|
||||
@<check on ellipse and save@>;
|
||||
}
|
||||
|
||||
|
||||
@ Here we set |ycovfac| to the symmetric Schur decomposition factor of
|
||||
a submatrix of covariances of all endogenous variables. The submatrix
|
||||
corresponds to state variables (predetermined plus both).
|
||||
|
||||
@<make factor of covariance of variables@>=
|
||||
TwoDMatrix* ycov = approx.calcYCov();
|
||||
TwoDMatrix ycovpred((const TwoDMatrix&)*ycov, model.nstat(), model.nstat(),
|
||||
model.npred()+model.nboth(), model.npred()+model.nboth());
|
||||
delete ycov;
|
||||
SymSchurDecomp ssd(ycovpred);
|
||||
ssd.correctDefinitness(1.e-05);
|
||||
TwoDMatrix ycovfac(ycovpred.nrows(), ycovpred.ncols());
|
||||
KORD_RAISE_IF(! ssd.isPositiveSemidefinite(),
|
||||
"Covariance matrix of the states not positive \
|
||||
semidefinite in GlobalChecker::checkOnEllipseAndSave");
|
||||
ssd.getFactor(ycovfac);
|
||||
|
||||
|
||||
@ Here we first calculate dimension |d| of the sphere, which is a
|
||||
number of state variables minus one. We go through the |d|-dimensional
|
||||
cube $\langle 0,1\rangle^d$ by |QMCarloCubeQuadrature| and make a
|
||||
polar transformation to the sphere. The polar transformation $f^i$ can
|
||||
be written recursively wrt. the dimension $i$ as:
|
||||
$$\eqalign{
|
||||
f^0() &= \left[1\right]\cr
|
||||
f^i(x_1,\ldots,x_i) &=
|
||||
\left[\matrix{cos(2\pi x_i)\cdot f^{i-1}(x_1,\ldots,x_{i-1})\cr sin(2\pi x_i)}\right]
|
||||
}$$
|
||||
|
||||
@<put low discrepancy sphere points to |ymat|@>=
|
||||
int d = model.npred()+model.nboth()-1;
|
||||
TwoDMatrix ymat(model.npred()+model.nboth(), (d==0)? 2:m);
|
||||
if (d == 0) {
|
||||
ymat.get(0,0) = 1;
|
||||
ymat.get(0,1) = -1;
|
||||
} else {
|
||||
int icol = 0;
|
||||
ReversePerScheme ps;
|
||||
QMCarloCubeQuadrature qmc(d, m, ps);
|
||||
qmcpit beg = qmc.start(m);
|
||||
qmcpit end = qmc.end(m);
|
||||
for (qmcpit run = beg; run != end; ++run, icol++) {
|
||||
Vector ycol(ymat, icol);
|
||||
Vector x(run.point());
|
||||
x.mult(2*M_PI);
|
||||
ycol[0] = 1;
|
||||
for (int i = 0; i < d; i++) {
|
||||
Vector subsphere(ycol, 0, i+1);
|
||||
subsphere.mult(cos(x[i]));
|
||||
ycol[i+1] = sin(x[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ Here we multiply the sphere points in |ymat| with the Cholesky
|
||||
factor to obtain the ellipse, scale the ellipse by the given |mult|,
|
||||
and initialize matrix of shocks |umat| to zero.
|
||||
|
||||
@<transform sphere |ymat| and prepare |umat| for checking@>=
|
||||
TwoDMatrix umat(model.nexog(), ymat.ncols());
|
||||
umat.zeros();
|
||||
ymat.mult(mult);
|
||||
ymat.multLeft(ycovfac);
|
||||
ConstVector ys(model.getSteady(), model.nstat(),
|
||||
model.npred()+model.nboth());
|
||||
for (int icol = 0; icol < ymat.ncols(); icol++) {
|
||||
Vector ycol(ymat, icol);
|
||||
ycol.add(1.0, ys);
|
||||
}
|
||||
|
||||
@ Here we check the points and save the results to MAT-4 file.
|
||||
@<check on ellipse and save@>=
|
||||
TwoDMatrix out(model.numeq(), ymat.ncols());
|
||||
check(max_evals, ymat, umat, out);
|
||||
|
||||
char tmp[100];
|
||||
sprintf(tmp, "%s_ellipse_points", prefix);
|
||||
ymat.writeMat4(fd, tmp);
|
||||
sprintf(tmp, "%s_ellipse_errors", prefix);
|
||||
out.writeMat4(fd, tmp);
|
||||
|
||||
@ Here we check the errors along a simulation. We simulate, then set
|
||||
|x| to zeros, check and save results.
|
||||
|
||||
@<|GlobalChecker::checkAlongSimulationAndSave| code@>=
|
||||
void GlobalChecker::checkAlongSimulationAndSave(FILE* fd, const char* prefix,
|
||||
int m, int max_evals)
|
||||
{
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Calculating errors at " << m
|
||||
<< " simulated points" << endrec;
|
||||
RandomShockRealization sr(model.getVcov(), system_random_generator.int_uniform());
|
||||
TwoDMatrix* y = approx.getFoldDecisionRule().simulate(DecisionRule::horner,
|
||||
m, model.getSteady(), sr);
|
||||
TwoDMatrix x(model.nexog(), m);
|
||||
x.zeros();
|
||||
TwoDMatrix out(model.numeq(), m);
|
||||
check(max_evals, *y, x, out);
|
||||
|
||||
char tmp[100];
|
||||
sprintf(tmp, "%s_simul_points", prefix);
|
||||
y->writeMat4(fd, tmp);
|
||||
sprintf(tmp, "%s_simul_errors", prefix);
|
||||
out.writeMat4(fd, tmp);
|
||||
|
||||
delete y;
|
||||
}
|
||||
|
||||
|
||||
@ End of {\tt global\_check.cpp} file.
|
|
@ -0,0 +1,167 @@
|
|||
@q $Id: global_check.hweb 431 2005-08-16 15:41:01Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@*2 Global check. Start of {\tt global\_check.h} file.
|
||||
|
||||
The purpose of this file is to provide classes for checking error of
|
||||
approximation. If $y_t=g(y^*_{t-1},u)$ is an approximate solution,
|
||||
then we check for the error of residuals of the system equations. Let
|
||||
$F(y^*,u,u')=f(g^{**}(g^*(y^*,u'),u),g(y^*,u),y^*,u)$, then we
|
||||
calculate integral
|
||||
$$E[F(y^*,u,u')]$$@q'@>
|
||||
which we want to be zero for all $y^*$, and $u$.
|
||||
|
||||
There are a few possibilities how and where the integral is
|
||||
evaluated. Currently we offer the following:
|
||||
|
||||
\numberedlist
|
||||
\li Along shocks. The $y^*$ is set to steady state, and $u$ is set to
|
||||
zero but one element is going from minus through plus shocks in few
|
||||
steps. The user gives the scaling factor, for instance interval
|
||||
$\langle<-3\sigma,3\sigma\rangle$ (where $sigma$ is a standard error
|
||||
of the shock), and a number of steps. This is repeated for each shock
|
||||
(element of the $u$ vector).
|
||||
\li Along simulation. Some random simulation is run, and for each
|
||||
realization of $y^*$ and $u$ along the path we evaluate the residual.
|
||||
\li On ellipse. Let $V=AA^T$ be a covariance matrix of the
|
||||
predetermined variables $y^*$ based on linear approximation, then we
|
||||
calculate integral for points on the ellipse $\{Ax\vert\, \Vert
|
||||
x\Vert_2=1\}$. The points are selected by means of low discrepancy
|
||||
method and polar transformation. The shock $u$ are zeros.
|
||||
|
||||
\li Unconditional distribution.
|
||||
|
||||
\endnumberedlist
|
||||
|
||||
|
||||
@s ResidFunction int
|
||||
@s GResidFunction int
|
||||
@s GlobalChecker int
|
||||
@s VectorFunction int
|
||||
@s ResidFunctionSig int
|
||||
@s GaussHermite int
|
||||
@s SmolyakQuadrature int
|
||||
@s ProductQuadrature int
|
||||
@s ParameterSignal int
|
||||
@s Quadrature int
|
||||
@s QMCarloCubeQuadrature int
|
||||
|
||||
@c
|
||||
#ifndef GLOBAL_CHECK_H
|
||||
#define GLOBAL_CHECK_H
|
||||
|
||||
#include "vector_function.h"
|
||||
#include "quadrature.h"
|
||||
|
||||
#include "dynamic_model.h"
|
||||
#include "journal.h"
|
||||
#include "approximation.h"
|
||||
|
||||
@<|ResidFunction| class declaration@>;
|
||||
@<|GResidFunction| class declaration@>;
|
||||
@<|GlobalChecker| class declaration@>;
|
||||
@<|ResidFunctionSig| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
@ This is a class for implementing |VectorFunction| interface
|
||||
evaluating the residual of equations, this is
|
||||
$$F(y^*,u,u')=f(g^{**}(g^*(y^*,u),u'),y^*,u)$$
|
||||
is written as a function of $u'$.
|
||||
|
||||
When the object is constructed, one has to specify $(y^*,u)$, this is
|
||||
done by |setYU| method. The object has basically two states. One is
|
||||
after construction and before call to |setYU|. The second is after
|
||||
call |setYU|. We distinguish between the two states, an object in the
|
||||
second state contains |yplus|, |ystar|, |u|, and |hss|.
|
||||
|
||||
The vector |yplus| is $g^*(y^*,u)$. |ystar| is $y^*$, and polynomial
|
||||
|hss| is partially evaluated $g^**(yplus, u)$.
|
||||
|
||||
The pointer to |DynamicModel| is important, since the |DynamicModel|
|
||||
evaluates the function $f$. When copying the object, we have to make
|
||||
also a copy of |DynamicModel|.
|
||||
|
||||
@<|ResidFunction| class declaration@>=
|
||||
class ResidFunction : public VectorFunction {
|
||||
protected:@;
|
||||
const Approximation& approx;
|
||||
DynamicModel* model;
|
||||
Vector* yplus;
|
||||
Vector* ystar;
|
||||
Vector* u;
|
||||
FTensorPolynomial* hss;
|
||||
public:@;
|
||||
ResidFunction(const Approximation& app);
|
||||
ResidFunction(const ResidFunction& rf);
|
||||
virtual ~ResidFunction();
|
||||
virtual VectorFunction* clone() const
|
||||
{@+ return new ResidFunction(*this);@+}
|
||||
virtual void eval(const Vector& point, const ParameterSignal& sig, Vector& out);
|
||||
void setYU(const Vector& ys, const Vector& xx);
|
||||
};
|
||||
|
||||
@ This is a |ResidFunction| wrapped with |GaussConverterFunction|.
|
||||
|
||||
@<|GResidFunction| class declaration@>=
|
||||
class GResidFunction : public GaussConverterFunction {
|
||||
public:@;
|
||||
GResidFunction(const Approximation& app)
|
||||
: GaussConverterFunction(new ResidFunction(app), app.getModel().getVcov())@+ {}
|
||||
GResidFunction(const GResidFunction& rf)
|
||||
: GaussConverterFunction(rf)@+ {}
|
||||
virtual ~GResidFunction()@+ {}
|
||||
virtual VectorFunction* clone() const
|
||||
{@+ return new GResidFunction(*this);@+}
|
||||
void setYU(const Vector& ys, const Vector& xx)
|
||||
{@+ ((ResidFunction*)func)->setYU(ys, xx);}
|
||||
};
|
||||
|
||||
|
||||
@ This is a class encapsulating checking algorithms. Its core routine
|
||||
is |check|, which calculates integral $E[F(y^*,u,u')\vert y^*,u]$ for
|
||||
given realizations of $y^*$ and $u$. The both are given in
|
||||
matrices. The methods checking along shocks, on ellipse and anlong a
|
||||
simulation path, just fill the matrices and call the core |check|.
|
||||
|
||||
The method |checkUnconditionalAndSave| evaluates unconditional
|
||||
$E[F(y,u,u')]$.
|
||||
|
||||
The object also maintains a set of |GResidFunction| functions |vfs| in
|
||||
order to save (possibly expensive) copying of |DynamicModel|s.
|
||||
|
||||
@<|GlobalChecker| class declaration@>=
|
||||
class GlobalChecker {
|
||||
const Approximation& approx;
|
||||
const DynamicModel& model;
|
||||
Journal& journal;
|
||||
GResidFunction rf;
|
||||
VectorFunctionSet vfs;
|
||||
public:@;
|
||||
GlobalChecker(const Approximation& app, int n, Journal& jr)
|
||||
: approx(app), model(approx.getModel()), journal(jr),
|
||||
rf(approx), vfs(rf, n)@+ {}
|
||||
void check(int max_evals, const ConstTwoDMatrix& y,
|
||||
const ConstTwoDMatrix& x, TwoDMatrix& out);
|
||||
void checkAlongShocksAndSave(FILE* fd, const char* prefix,
|
||||
int m, double mult, int max_evals);
|
||||
void checkOnEllipseAndSave(FILE* fd, const char* prefix,
|
||||
int m, double mult, int max_evals);
|
||||
void checkAlongSimulationAndSave(FILE* fd, const char* prefix,
|
||||
int m, int max_evals);
|
||||
void checkUnconditionalAndSave(FILE* fd, const char* prefix,
|
||||
int m, int max_evals);
|
||||
protected:@;
|
||||
void check(const Quadrature& quad, int level,
|
||||
const ConstVector& y, const ConstVector& x, Vector& out);
|
||||
};
|
||||
|
||||
|
||||
@ Signalled resid function. Not implemented yet. todo:
|
||||
@<|ResidFunctionSig| class declaration@>=
|
||||
class ResidFunctionSig : public ResidFunction {
|
||||
public:@;
|
||||
ResidFunctionSig(const Approximation& app, const Vector& ys, const Vector& xx);
|
||||
};
|
||||
|
||||
@ End of {\tt global\_check.h} file.
|
|
@ -0,0 +1,333 @@
|
|||
@q $Id: journal.cweb 413 2005-08-16 14:39:55Z kamenik $ @>
|
||||
@q Copyright 2004, Ondra Kamenik @>
|
||||
|
||||
@ Start of {\tt journal.cpp} file.
|
||||
|
||||
@c
|
||||
#include "journal.h"
|
||||
#include "kord_exception.h"
|
||||
|
||||
#ifndef __MINGW32__
|
||||
# include <sys/resource.h>
|
||||
# include <sys/utsname.h>
|
||||
#endif
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <time.h>
|
||||
|
||||
SystemResources _sysres;
|
||||
#ifdef __MINGW32__
|
||||
@<|gettimeofday| Win32 implementation@>;
|
||||
@<|sysconf| Win32 implementation@>;
|
||||
#endif
|
||||
|
||||
@<|SystemResources| constructor code@>;
|
||||
@<|SystemResources::pageSize| code@>;
|
||||
@<|SystemResources::physicalPages| code@>;
|
||||
@<|SystemResources::onlineProcessors| code@>;
|
||||
@<|SystemResources::availableMemory| code@>;
|
||||
@<|SystemResources::getRUS| code@>;
|
||||
@<|SystemResourcesFlash| constructor code@>;
|
||||
@<|SystemResourcesFlash::diff| code@>;
|
||||
@<|JournalRecord::operator<<| symmetry code@>;
|
||||
@<|JournalRecord::writePrefix| code@>;
|
||||
@<|JournalRecord::writePrefixForEnd| code@>;
|
||||
@<|JournalRecordPair| destructor code@>;
|
||||
@<|endrec| code@>;
|
||||
@<|Journal::printHeader| code@>;
|
||||
|
||||
@
|
||||
@<|SystemResources| constructor code@>=
|
||||
SystemResources::SystemResources()
|
||||
{
|
||||
gettimeofday(&start, NULL);
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<|SystemResources::pageSize| code@>=
|
||||
long int SystemResources::pageSize()
|
||||
{
|
||||
return sysconf(_SC_PAGESIZE);
|
||||
}
|
||||
|
||||
@
|
||||
@<|SystemResources::physicalPages| code@>=
|
||||
long int SystemResources::physicalPages()
|
||||
{
|
||||
return sysconf(_SC_PHYS_PAGES);
|
||||
}
|
||||
|
||||
@
|
||||
@<|SystemResources::onlineProcessors| code@>=
|
||||
long int SystemResources::onlineProcessors()
|
||||
{
|
||||
return sysconf(_SC_NPROCESSORS_ONLN);
|
||||
}
|
||||
|
||||
@
|
||||
@<|SystemResources::availableMemory| code@>=
|
||||
long int SystemResources::availableMemory()
|
||||
{
|
||||
return pageSize()*sysconf(_SC_AVPHYS_PAGES);
|
||||
}
|
||||
|
||||
@ Here we read the current values of resource usage. For MinGW, we
|
||||
implement only a number of available physical memory pages.
|
||||
|
||||
@<|SystemResources::getRUS| code@>=
|
||||
void SystemResources::getRUS(double& load_avg, long int& pg_avail,
|
||||
double& utime, double& stime, double& elapsed,
|
||||
long int& idrss, long int& majflt)
|
||||
{
|
||||
struct timeval now;
|
||||
gettimeofday(&now, NULL);
|
||||
elapsed = now.tv_sec-start.tv_sec + (now.tv_usec-start.tv_usec)*1.0e-6;
|
||||
|
||||
#ifndef __MINGW32__
|
||||
struct rusage rus;
|
||||
getrusage(RUSAGE_SELF, &rus);
|
||||
utime = rus.ru_utime.tv_sec+rus.ru_utime.tv_usec*1.0e-6;
|
||||
stime = rus.ru_stime.tv_sec+rus.ru_stime.tv_usec*1.0e-6;
|
||||
idrss = rus.ru_idrss;
|
||||
majflt = rus.ru_majflt;
|
||||
|
||||
getloadavg(&load_avg, 1);
|
||||
#else
|
||||
utime = -1.0;
|
||||
stime = -1.0;
|
||||
idrss = -1;
|
||||
majflt = -1;
|
||||
load_avg = -1.0;
|
||||
#endif
|
||||
pg_avail = sysconf(_SC_AVPHYS_PAGES);
|
||||
}
|
||||
|
||||
@
|
||||
@<|SystemResourcesFlash| constructor code@>=
|
||||
SystemResourcesFlash::SystemResourcesFlash()
|
||||
{
|
||||
_sysres.getRUS(load_avg, pg_avail, utime, stime,
|
||||
elapsed, idrss, majflt);
|
||||
}
|
||||
|
||||
@
|
||||
@<|SystemResourcesFlash::diff| code@>=
|
||||
void SystemResourcesFlash::diff(const SystemResourcesFlash& pre)
|
||||
{
|
||||
utime -= pre.utime;
|
||||
stime -= pre.stime;
|
||||
elapsed -= pre.elapsed;
|
||||
idrss -= pre.idrss;
|
||||
majflt -= pre.majflt;
|
||||
}
|
||||
|
||||
@
|
||||
@<|JournalRecord::operator<<| symmetry code@>=
|
||||
JournalRecord& JournalRecord::operator<<(const IntSequence& s)
|
||||
{
|
||||
operator<<("[");
|
||||
for (int i = 0; i < s.size(); i++) {
|
||||
operator<<(s[i]);
|
||||
if (i < s.size()-1)
|
||||
operator<<(",");
|
||||
}
|
||||
operator<<("]");
|
||||
return *this;
|
||||
}
|
||||
|
||||
@
|
||||
@<|JournalRecord::writePrefix| code@>=
|
||||
void JournalRecord::writePrefix(const SystemResourcesFlash& f)
|
||||
{
|
||||
for (int i = 0; i < MAXLEN; i++)
|
||||
prefix[i] = ' ';
|
||||
double mb = 1024*1024;
|
||||
sprintf(prefix, "%07.6g", f.elapsed);
|
||||
sprintf(prefix+7, ":%c%05d", recChar, ord);
|
||||
sprintf(prefix+14, ":%1.1f", f.load_avg);
|
||||
sprintf(prefix+18, ":%05.4g", f.pg_avail*_sysres.pageSize()/mb);
|
||||
sprintf(prefix+24, "%s", ": : ");
|
||||
for (int i = 0; i < 2*journal.getDepth(); i++)
|
||||
prefix[i+33]=' ';
|
||||
prefix[2*journal.getDepth()+33]='\0';
|
||||
}
|
||||
|
||||
@
|
||||
@<|JournalRecord::writePrefixForEnd| code@>=
|
||||
void JournalRecordPair::writePrefixForEnd(const SystemResourcesFlash& f)
|
||||
{
|
||||
for (int i = 0; i < MAXLEN; i++)
|
||||
prefix_end[i] = ' ';
|
||||
double mb = 1024*1024;
|
||||
SystemResourcesFlash difnow;
|
||||
difnow.diff(f);
|
||||
sprintf(prefix_end, "%07.6g", f.elapsed+difnow.elapsed);
|
||||
sprintf(prefix_end+7, ":E%05d", ord);
|
||||
sprintf(prefix_end+14, ":%1.1f", difnow.load_avg);
|
||||
sprintf(prefix_end+18, ":%05.4g", difnow.pg_avail*_sysres.pageSize()/mb);
|
||||
sprintf(prefix_end+24, ":%06.5g", difnow.majflt*_sysres.pageSize()/mb);
|
||||
sprintf(prefix_end+31, "%s", ": ");
|
||||
for (int i = 0; i < 2*journal.getDepth(); i++)
|
||||
prefix_end[i+33]=' ';
|
||||
prefix_end[2*journal.getDepth()+33]='\0';
|
||||
}
|
||||
|
||||
@
|
||||
@<|JournalRecordPair| destructor code@>=
|
||||
JournalRecordPair::~JournalRecordPair()
|
||||
{
|
||||
journal.decrementDepth();
|
||||
writePrefixForEnd(flash);
|
||||
journal << prefix_end;
|
||||
journal << mes;
|
||||
journal << endl;
|
||||
journal.flush();
|
||||
}
|
||||
|
||||
@
|
||||
@<|endrec| code@>=
|
||||
JournalRecord& endrec(JournalRecord& rec)
|
||||
{
|
||||
rec.journal << rec.prefix;
|
||||
rec.journal << rec.mes;
|
||||
rec.journal << endl;
|
||||
rec.journal.flush();
|
||||
rec.journal.incrementOrd();
|
||||
return rec;
|
||||
}
|
||||
|
||||
@
|
||||
@<|Journal::printHeader| code@>=
|
||||
void Journal::printHeader()
|
||||
{
|
||||
(*this)<< "This is Dynare++, Copyright (C) 2004,2005 Michel Juillard, Ondra Kamenik\n";
|
||||
(*this)<< "Dynare++ comes with ABSOLUTELY NO WARRANTY and is distributed under\n";
|
||||
(*this)<< "General Public License, see http://www.gnu.org/license/gpl.html\n";
|
||||
(*this)<< "\n\n";
|
||||
|
||||
#ifndef __MINGW32__
|
||||
utsname info;
|
||||
uname(&info);
|
||||
(*this)<< "System info: ";
|
||||
(*this)<< info.sysname << " " << info.release << " " << info.version << " ";
|
||||
(*this)<< info.machine << ", processors online: " << _sysres.onlineProcessors();
|
||||
|
||||
(*this)<< "\n\nStart time: ";
|
||||
char ts[100];
|
||||
time_t curtime = time(NULL);
|
||||
tm loctime;
|
||||
localtime_r(&curtime, &loctime);
|
||||
asctime_r(&loctime, ts);
|
||||
(*this)<< ts << "\n";
|
||||
#else
|
||||
(*this) << "System info: (not implemented for MINGW)\n";
|
||||
(*this) << "Start time: (not implemented for MINGW)\n\n";
|
||||
#endif
|
||||
|
||||
(*this)<< " ------ elapsed time (seconds) \n";
|
||||
(*this)<< " | ------ record unique identifier \n";
|
||||
(*this)<< " | | ------ load average \n";
|
||||
(*this)<< " | | | ------ available memory (MB) \n";
|
||||
(*this)<< " | | | | ------ major faults (MB)\n";
|
||||
(*this)<< " | | | | | \n";
|
||||
(*this)<< " V V V V V \n";
|
||||
(*this)<< "\n";
|
||||
}
|
||||
|
||||
|
||||
@ Taken from list {\tt gdb@@sources.redhat.com}, the author is Danny Smith.
|
||||
|
||||
|_W32_FT_OFFSET| is a time from 1 Jan 1601 to 1 Jan 1970 in 100ns
|
||||
units, and the the |filetime| is taken from {\tt windows.h} file.
|
||||
|
||||
@s filetime int
|
||||
@s __stdcall int
|
||||
@d _W32_FT_OFFSET (116444736000000000LL)
|
||||
@s w32_ftv int
|
||||
|
||||
@<|gettimeofday| Win32 implementation@>=
|
||||
typedef struct _filetime {
|
||||
unsigned long dwLowDateTime;
|
||||
unsigned long dwHighDateTime;
|
||||
} filetime;
|
||||
@#
|
||||
extern "C" {
|
||||
void __stdcall GetSystemTimeAsFileTime(filetime*);
|
||||
};
|
||||
@#
|
||||
typedef union {
|
||||
long long ns100; // time since 1 Jan 1601 in 100ns units
|
||||
filetime ft;
|
||||
} w32_ftv;
|
||||
@#
|
||||
void gettimeofday(struct timeval* p, struct timezone* tz)
|
||||
{
|
||||
w32_ftv _now;
|
||||
GetSystemTimeAsFileTime( &(_now.ft) );
|
||||
p->tv_usec=(long)((_now.ns100 / 10LL) % 1000000LL );
|
||||
p->tv_sec= (long)((_now.ns100-_W32_FT_OFFSET)/10000000LL);
|
||||
return;
|
||||
}
|
||||
|
||||
@ Here we implement |sysconf| for MinGW. We implement only page size,
|
||||
number of physial pages, and a number of available physical pages. The
|
||||
pagesize is set to 1024 bytes, real pagesize can differ but it is not
|
||||
important. We can do this since Windows kernel32 |GlobalMemoryStatus|
|
||||
call returns number of bytes.
|
||||
|
||||
Number of online processors is not implemented and returns -1, since
|
||||
Windows kernel32 |GetSystemInfo| call is too complicated.
|
||||
|
||||
@<|sysconf| Win32 implementation@>=
|
||||
#define _SC_PAGESIZE 1
|
||||
#define _SC_PHYS_PAGES 2
|
||||
#define _SC_AVPHYS_PAGES 3
|
||||
#define _SC_NPROCESSORS_ONLN 4
|
||||
@#
|
||||
struct Win32MemoryStatus {
|
||||
unsigned long dwLength;
|
||||
unsigned long dwMemoryLoad;
|
||||
unsigned int dwTotalPhys;
|
||||
unsigned int dwAvailPhys;
|
||||
unsigned int dwTotalPageFile;
|
||||
unsigned int dwAvailPageFile;
|
||||
unsigned int dwTotalVirtual;
|
||||
unsigned int dwAvailVirtual;
|
||||
Win32MemoryStatus();
|
||||
};
|
||||
@#
|
||||
extern "C" {
|
||||
void __stdcall GlobalMemoryStatus(Win32MemoryStatus *);
|
||||
};
|
||||
@#
|
||||
Win32MemoryStatus::Win32MemoryStatus()
|
||||
{
|
||||
dwLength = sizeof(Win32MemoryStatus);
|
||||
GlobalMemoryStatus(this);
|
||||
}
|
||||
@#
|
||||
long sysconf(int name)
|
||||
{
|
||||
switch (name) {
|
||||
case _SC_PAGESIZE:@;
|
||||
return 1024;
|
||||
case _SC_PHYS_PAGES:@;
|
||||
{
|
||||
Win32MemoryStatus memstat;
|
||||
return memstat.dwTotalPhys/1024;
|
||||
}
|
||||
case _SC_AVPHYS_PAGES:@;
|
||||
{
|
||||
Win32MemoryStatus memstat;
|
||||
return memstat.dwAvailPhys/1024;
|
||||
}
|
||||
case _SC_NPROCESSORS_ONLN:@;
|
||||
return -1;
|
||||
default:@;
|
||||
KORD_RAISE("Not implemented in Win32 sysconf.");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@ End of {\tt journal.cpp} file.
|
|
@ -0,0 +1,137 @@
|
|||
@q $Id: journal.hweb 417 2005-08-16 15:04:24Z kamenik $ @>
|
||||
@q Copyright 2004, Ondra Kamenik @>
|
||||
|
||||
@*2 Resource usage journal. Start of {\tt journal.h} file.
|
||||
|
||||
@s timeval int
|
||||
@s rusage int
|
||||
@s SystemResources int
|
||||
@s SystemResourcesFlash int
|
||||
@s Journal int
|
||||
@s JournalRecord int
|
||||
@s JournalRecordPair int
|
||||
|
||||
@c
|
||||
#ifndef JOURNAL_H
|
||||
#define JOURNAL_H
|
||||
|
||||
#include "int_sequence.h"
|
||||
|
||||
#include <sys/time.h>
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
@<|SystemResources| class declaration@>;
|
||||
@<|SystemResourcesFlash| struct declaration@>;
|
||||
@<|Journal| class declaration@>;
|
||||
@<|JournalRecord| class declaration@>;
|
||||
@<|JournalRecordPair| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
@
|
||||
@<|SystemResources| class declaration@>=
|
||||
class SystemResources {
|
||||
timeval start;
|
||||
public:@;
|
||||
SystemResources();
|
||||
static long int pageSize();
|
||||
static long int physicalPages();
|
||||
static long int onlineProcessors();
|
||||
static long int availableMemory();
|
||||
void getRUS(double& load_avg, long int& pg_avail, double& utime,
|
||||
double& stime, double& elapsed, long int& idrss,
|
||||
long int& majflt);
|
||||
};
|
||||
|
||||
@
|
||||
@<|SystemResourcesFlash| struct declaration@>=
|
||||
struct SystemResourcesFlash {
|
||||
double load_avg;
|
||||
long int pg_avail;
|
||||
double utime;
|
||||
double stime;
|
||||
double elapsed;
|
||||
long int idrss;
|
||||
long int majflt;
|
||||
SystemResourcesFlash();
|
||||
void diff(const SystemResourcesFlash& pre);
|
||||
};
|
||||
|
||||
|
||||
@
|
||||
@s stringstream int
|
||||
@d MAXLEN 1000
|
||||
|
||||
@<|JournalRecord| class declaration@>=
|
||||
class JournalRecord;
|
||||
JournalRecord& endrec(JournalRecord&);
|
||||
|
||||
class JournalRecord {
|
||||
protected:@;
|
||||
char recChar;
|
||||
int ord;
|
||||
public:@;
|
||||
Journal& journal;
|
||||
char prefix[MAXLEN];
|
||||
char mes[MAXLEN];
|
||||
SystemResourcesFlash flash;
|
||||
typedef JournalRecord& (*_Tfunc)(JournalRecord&);
|
||||
|
||||
JournalRecord(Journal& jr, char rc = 'M')
|
||||
: recChar(rc), ord(jr.getOrd()), journal(jr)
|
||||
{@+ prefix[0]='\0';mes[0]='\0';writePrefix(flash); @+}
|
||||
virtual ~JournalRecord() @+{}
|
||||
JournalRecord& operator<<(const IntSequence& s);
|
||||
JournalRecord& operator<<(_Tfunc f)
|
||||
{@+ (*f)(*this); return *this;@+}
|
||||
JournalRecord& operator<<(const char* s)
|
||||
{@+ strcat(mes, s); return *this; @+}
|
||||
JournalRecord& operator<<(int i)
|
||||
{@+ sprintf(mes+strlen(mes), "%d", i); return *this;@+}
|
||||
JournalRecord& operator<<(double d)
|
||||
{@+ sprintf(mes+strlen(mes), "%f", d); return *this;@+}
|
||||
protected:@;
|
||||
void writePrefix(const SystemResourcesFlash& f);
|
||||
};
|
||||
|
||||
@
|
||||
@<|JournalRecordPair| class declaration@>=
|
||||
class JournalRecordPair : public JournalRecord {
|
||||
char prefix_end[MAXLEN];
|
||||
public:@;
|
||||
JournalRecordPair(Journal& jr)
|
||||
: JournalRecord(jr, 'S')
|
||||
{@+ prefix_end[0] = '\0'; journal.incrementDepth(); @+}
|
||||
~JournalRecordPair();
|
||||
private:@;
|
||||
void writePrefixForEnd(const SystemResourcesFlash& f);
|
||||
};
|
||||
|
||||
@
|
||||
@<|Journal| class declaration@>=
|
||||
class Journal : public ofstream {
|
||||
int ord;
|
||||
int depth;
|
||||
public:@;
|
||||
Journal(const char* fname)
|
||||
: ofstream(fname), ord(0), depth(0)
|
||||
{@+ printHeader();@+}
|
||||
~Journal()
|
||||
{@+ flush();@+}
|
||||
void printHeader();
|
||||
void incrementOrd()
|
||||
{@+ ord++; @+}
|
||||
int getOrd() const
|
||||
{@+ return ord; @+}
|
||||
void incrementDepth()
|
||||
{@+ depth++; @+}
|
||||
void decrementDepth()
|
||||
{@+ depth--; @+}
|
||||
int getDepth() const
|
||||
{return depth;}
|
||||
};
|
||||
|
||||
|
||||
@ End of {\tt journal.h} file.
|
|
@ -0,0 +1,64 @@
|
|||
@q $Id: kord_exception.hweb 1452 2007-11-21 11:33:30Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@*2 Exception. Start of {\tt kord\_exception.h} file.
|
||||
|
||||
This is a simple code defining an exception and two convenience macros.
|
||||
|
||||
@s KordException int
|
||||
@c
|
||||
#ifndef KORD_EXCEPTION_H
|
||||
#define KORD_EXCEPTION_H
|
||||
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#define KORD_RAISE(mes) \
|
||||
throw KordException(__FILE__, __LINE__, mes);
|
||||
|
||||
#define KORD_RAISE_IF(expr, mes) \
|
||||
if (expr) throw KordException(__FILE__, __LINE__, mes);
|
||||
|
||||
#define KORD_RAISE_X(mes, c) \
|
||||
throw KordException(__FILE__, __LINE__, mes, c);
|
||||
|
||||
#define KORD_RAISE_IF_X(expr, mes, c) \
|
||||
if (expr) throw KordException(__FILE__, __LINE__, mes, c);
|
||||
|
||||
@<|KordException| class definition@>;
|
||||
@<|KordException| error code definitions@>;
|
||||
|
||||
#endif
|
||||
|
||||
@
|
||||
@<|KordException| class definition@>=
|
||||
class KordException {
|
||||
protected:@;
|
||||
char fname[50];
|
||||
int lnum;
|
||||
char message[500];
|
||||
int cd;
|
||||
public:@;
|
||||
KordException(const char* f, int l, const char* mes, int c=255)
|
||||
{
|
||||
strncpy(fname, f, 50);@+ fname[49] = '\0';
|
||||
strncpy(message, mes, 500);@+ message[499] = '\0';
|
||||
lnum = l;
|
||||
cd = c;
|
||||
}
|
||||
virtual ~KordException()@+ {}
|
||||
virtual void print() const
|
||||
{@+ printf("At %s:%d:(%d):%s\n", fname, lnum, cd, message);@+}
|
||||
virtual int code() const
|
||||
{@+ return cd; @+}
|
||||
const char* get_message() const
|
||||
{@+ return message; @+}
|
||||
};
|
||||
|
||||
@
|
||||
@<|KordException| error code definitions@>=
|
||||
#define KORD_FP_NOT_CONV 254
|
||||
#define KORD_FP_NOT_FINITE 253
|
||||
#define KORD_MD_NOT_STABLE 252
|
||||
|
||||
@ End of {\tt kord\_exception.h} file.
|
|
@ -0,0 +1,340 @@
|
|||
@q $Id: korder.cweb 1831 2008-05-18 20:13:42Z kamenik $ @>
|
||||
@q Copyright 2004, Ondra Kamenik @>
|
||||
|
||||
@ Start of {\tt korder.cpp} file.
|
||||
|
||||
@c
|
||||
|
||||
#include "kord_exception.h"
|
||||
#include "korder.h"
|
||||
|
||||
#include "cpplapack.h"
|
||||
|
||||
@<|PLUMatrix| copy constructor@>;
|
||||
@<|PLUMatrix::calcPLU| code@>;
|
||||
@<|PLUMatrix::multInv| code@>;
|
||||
@<|MatrixA| constructor code@>;
|
||||
@<|MatrixS| constructor code@>;
|
||||
@<|KOrder| member access method specializations@>;
|
||||
@<|KOrder::sylvesterSolve| unfolded specialization@>;
|
||||
@<|KOrder::sylvesterSolve| folded specialization@>;
|
||||
@<|KOrder::switchToFolded| code@>;
|
||||
@<|KOrder| constructor code@>;
|
||||
|
||||
@
|
||||
@<|PLUMatrix| copy constructor@>=
|
||||
PLUMatrix::PLUMatrix(const PLUMatrix& plu)
|
||||
: TwoDMatrix(plu), inv(plu.inv), ipiv(new int[nrows()])
|
||||
{
|
||||
memcpy(ipiv, plu.ipiv, nrows()*sizeof(int));
|
||||
}
|
||||
|
||||
|
||||
@ Here we set |ipiv| and |inv| members of the |PLUMatrix| depending on
|
||||
its content. It is assumed that subclasses will call this method at
|
||||
the end of their constructors.
|
||||
|
||||
@<|PLUMatrix::calcPLU| code@>=
|
||||
void PLUMatrix::calcPLU()
|
||||
{
|
||||
int info;
|
||||
int rows = nrows();
|
||||
inv = (const Vector&)getData();
|
||||
LAPACK_dgetrf(&rows, &rows, inv.base(), &rows, ipiv, &info);
|
||||
}
|
||||
|
||||
@ Here we just call the LAPACK machinery to multiply by the inverse.
|
||||
|
||||
@<|PLUMatrix::multInv| code@>=
|
||||
void PLUMatrix::multInv(TwoDMatrix& m) const
|
||||
{
|
||||
KORD_RAISE_IF(m.nrows() != ncols(),
|
||||
"The matrix is not square in PLUMatrix::multInv");
|
||||
int info;
|
||||
int mcols = m.ncols();
|
||||
int mrows = m.nrows();
|
||||
double* mbase = m.getData().base();
|
||||
LAPACK_dgetrs("N", &mrows, &mcols, inv.base(), &mrows, ipiv,
|
||||
mbase, &mrows, &info);
|
||||
KORD_RAISE_IF(info != 0,
|
||||
"Info!=0 in PLUMatrix::multInv");
|
||||
}
|
||||
|
||||
@ Here we construct the matrix $A$. Its dimension is |ny|, and it is
|
||||
$$A=\left[f_{y}\right]+
|
||||
\left[0 \left[f_{y^{**}_+}\right]\cdot\left[g^{**}_{y^*}\right] 0\right]$$,
|
||||
where the first zero spans |nstat| columns, and last zero spans
|
||||
|nforw| columns.
|
||||
|
||||
@<|MatrixA| constructor code@>=
|
||||
MatrixA::MatrixA(const FSSparseTensor& f, const IntSequence& ss,
|
||||
const TwoDMatrix& gy, const PartitionY& ypart)
|
||||
: PLUMatrix(ypart.ny())
|
||||
{
|
||||
zeros();
|
||||
|
||||
IntSequence c(1); c[0] = 1;
|
||||
FGSTensor f_y(f, ss, c, TensorDimens(ss, c));
|
||||
add(1.0, f_y);
|
||||
|
||||
ConstTwoDMatrix gss_ys(ypart.nstat+ypart.npred, ypart.nyss(), gy);
|
||||
c[0] = 0;
|
||||
FGSTensor f_yss(f, ss, c, TensorDimens(ss, c));
|
||||
TwoDMatrix sub(*this, ypart.nstat, ypart.nys());
|
||||
sub.multAndAdd(ConstTwoDMatrix(f_yss), gss_ys);
|
||||
|
||||
calcPLU();
|
||||
}
|
||||
|
||||
@ Here we construct the matrix $S$. Its dimension is |ny|, and it is
|
||||
$$S=\left[f_{y}\right]+
|
||||
\left[0\quad\left[f_{y^{**}_+}\right]\cdot\left[g^{**}_{y^*}\right]\quad
|
||||
0\right]+ \left[0\quad 0\quad\left[f_{y^{**}_+}\right]\right]$$
|
||||
It is, in fact, the matrix $A$ plus the third summand. The first zero
|
||||
in the summand spans |nstat| columns, the second zero spans |npred|
|
||||
columns.
|
||||
|
||||
@<|MatrixS| constructor code@>=
|
||||
MatrixS::MatrixS(const FSSparseTensor& f, const IntSequence& ss,
|
||||
const TwoDMatrix& gy, const PartitionY& ypart)
|
||||
: PLUMatrix(ypart.ny())
|
||||
{
|
||||
zeros();
|
||||
|
||||
IntSequence c(1); c[0] = 1;
|
||||
FGSTensor f_y(f, ss, c, TensorDimens(ss, c));
|
||||
add(1.0, f_y);
|
||||
|
||||
ConstTwoDMatrix gss_ys(ypart.nstat+ypart.npred, ypart.nyss(), gy);
|
||||
c[0] = 0;
|
||||
FGSTensor f_yss(f, ss, c, TensorDimens(ss, c));
|
||||
TwoDMatrix sub(*this, ypart.nstat, ypart.nys());
|
||||
sub.multAndAdd(ConstTwoDMatrix(f_yss), gss_ys);
|
||||
|
||||
TwoDMatrix sub2(*this, ypart.nstat+ypart.npred, ypart.nyss());
|
||||
sub2.add(1.0, f_yss);
|
||||
|
||||
calcPLU();
|
||||
}
|
||||
|
||||
|
||||
@ Here is the constructor of the |KOrder| class. We pass what we have
|
||||
to. The partitioning of the $y$ vector, a sparse container with model
|
||||
derivatives, then the first order approximation, these are $g_y$ and
|
||||
$g_u$ matrices, and covariance matrix of exogenous shocks |v|.
|
||||
|
||||
We build the members, it is nothing difficult. Note that we do not make
|
||||
a physical copy of sparse tensors, so during running the class, the
|
||||
outer world must not change them.
|
||||
|
||||
In the body, we have to set |nvs| array, and initialize $g$ and $G$
|
||||
containers to comply to preconditions of |performStep|.
|
||||
|
||||
@<|KOrder| constructor code@>=
|
||||
KOrder::KOrder(int num_stat, int num_pred, int num_both, int num_forw,
|
||||
const TensorContainer<FSSparseTensor>& fcont,
|
||||
const TwoDMatrix& gy, const TwoDMatrix& gu, const TwoDMatrix& v,
|
||||
Journal& jr)
|
||||
: ypart(num_stat, num_pred, num_both, num_forw),@/
|
||||
ny(ypart.ny()), nu(gu.ncols()), maxk(fcont.getMaxDim()),@/
|
||||
nvs(4),@/
|
||||
_ug(4), _fg(4), _ugs(4), _fgs(4), _ugss(4), _fgss(4), @/
|
||||
_uG(4), _fG(4),@/
|
||||
_uZstack(&_uG, ypart.nyss(), &_ug, ny, ypart.nys(), nu),@/
|
||||
_fZstack(&_fG, ypart.nyss(), &_fg, ny, ypart.nys(), nu),@/
|
||||
_uGstack(&_ugs, ypart.nys(), nu),@/
|
||||
_fGstack(&_fgs, ypart.nys(), nu),@/
|
||||
_um(maxk, v), _fm(_um), f(fcont),@/
|
||||
matA(*(f.get(Symmetry(1))), _uZstack.getStackSizes(), gy, ypart),@/
|
||||
matS(*(f.get(Symmetry(1))), _uZstack.getStackSizes(), gy, ypart),@/
|
||||
matB(*(f.get(Symmetry(1))), _uZstack.getStackSizes()),@/
|
||||
journal(jr)@/
|
||||
{
|
||||
KORD_RAISE_IF(gy.ncols() != ypart.nys(),
|
||||
"Wrong number of columns in gy in KOrder constructor");
|
||||
KORD_RAISE_IF(v.ncols() != nu,
|
||||
"Wrong number of columns of Vcov in KOrder constructor");
|
||||
KORD_RAISE_IF(nu != v.nrows(),
|
||||
"Wrong number of rows of Vcov in KOrder constructor");
|
||||
KORD_RAISE_IF(maxk < 2,
|
||||
"Order of approximation must be at least 2 in KOrder constructor");
|
||||
KORD_RAISE_IF(gy.nrows() != ypart.ny(),
|
||||
"Wrong number of rows in gy in KOrder constructor");
|
||||
KORD_RAISE_IF(gu.nrows() != ypart.ny(),
|
||||
"Wrong number of rows in gu in KOrder constuctor");
|
||||
KORD_RAISE_IF(gu.ncols() != nu,
|
||||
"Wrong number of columns in gu in KOrder constuctor");
|
||||
|
||||
// set nvs:
|
||||
nvs[0] = ypart.nys(); nvs[1] = nu; nvs[2] = nu; nvs[3] = 1;
|
||||
|
||||
@<put $g_y$ and $g_u$ to the container@>;
|
||||
@<put $G_y$, $G_u$ and $G_{u'}$ to the container@>;@q'@>
|
||||
}
|
||||
|
||||
@ Note that $g_\sigma$ is zero by the nature and we do not insert it to
|
||||
the container. We insert a new physical copies.
|
||||
|
||||
@<put $g_y$ and $g_u$ to the container@>=
|
||||
UGSTensor* tgy = new UGSTensor(ny, TensorDimens(Symmetry(1,0,0,0), nvs));
|
||||
tgy->getData() = gy.getData();
|
||||
insertDerivative<unfold>(tgy);
|
||||
UGSTensor* tgu = new UGSTensor(ny, TensorDimens(Symmetry(0,1,0,0), nvs));
|
||||
tgu->getData() = gu.getData();
|
||||
insertDerivative<unfold>(tgu);
|
||||
|
||||
@ Also note that since $g_\sigma$ is zero, so $G_\sigma$.
|
||||
@<put $G_y$, $G_u$ and $G_{u'}$ to the container@>=
|
||||
UGSTensor* tGy = faaDiBrunoG<unfold>(Symmetry(1,0,0,0));
|
||||
G<unfold>().insert(tGy);
|
||||
UGSTensor* tGu = faaDiBrunoG<unfold>(Symmetry(0,1,0,0));
|
||||
G<unfold>().insert(tGu);
|
||||
UGSTensor* tGup = faaDiBrunoG<unfold>(Symmetry(0,0,1,0));
|
||||
G<unfold>().insert(tGup);
|
||||
|
||||
|
||||
|
||||
@ Here we have an unfolded specialization of |sylvesterSolve|. We
|
||||
simply create the sylvester object and solve it. Note that the $g^*_y$
|
||||
is not continuous in memory as assumed by the sylvester code, so we
|
||||
make a temporary copy and pass it as matrix $C$.
|
||||
|
||||
If the $B$ matrix is empty, in other words there are now forward
|
||||
looking variables, then the system becomes $AX=D$ which is solved by
|
||||
simple |matA.multInv()|.
|
||||
|
||||
If one wants to display the diagnostic messages from the Sylvester
|
||||
module, then after the |sylv.solve()| one needs to call
|
||||
|sylv.getParams().print("")|.
|
||||
|
||||
|
||||
@<|KOrder::sylvesterSolve| unfolded specialization@>=
|
||||
template<>@/
|
||||
void KOrder::sylvesterSolve<KOrder::unfold>(ctraits<unfold>::Ttensor& der) const
|
||||
{
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Sylvester equation for dimension = " << der.getSym()[0] << endrec;
|
||||
if (ypart.nys() > 0 && ypart.nyss() > 0) {
|
||||
KORD_RAISE_IF(! der.isFinite(),
|
||||
"RHS of Sylverster is not finite");
|
||||
TwoDMatrix gs_y(*(gs<unfold>().get(Symmetry(1,0,0,0))));
|
||||
GeneralSylvester sylv(der.getSym()[0], ny, ypart.nys(),
|
||||
ypart.nstat+ypart.npred,
|
||||
matA.getData().base(), matB.getData().base(),
|
||||
gs_y.getData().base(), der.getData().base());
|
||||
sylv.solve();
|
||||
} else if (ypart.nys() > 0 && ypart.nyss() == 0) {
|
||||
matA.multInv(der);
|
||||
}
|
||||
}
|
||||
|
||||
@ Here is the folded specialization of sylvester. We unfold the right
|
||||
hand side. Then we solve it by the unfolded version of
|
||||
|sylvesterSolve|, and fold it back and copy to output vector.
|
||||
|
||||
@<|KOrder::sylvesterSolve| folded specialization@>=
|
||||
template<>@/
|
||||
void KOrder::sylvesterSolve<KOrder::fold>(ctraits<fold>::Ttensor& der) const
|
||||
{
|
||||
ctraits<unfold>::Ttensor tmp(der);
|
||||
sylvesterSolve<unfold>(tmp);
|
||||
ctraits<fold>::Ttensor ftmp(tmp);
|
||||
der.getData() = (const Vector&)(ftmp.getData());
|
||||
}
|
||||
|
||||
@
|
||||
@<|KOrder::switchToFolded| code@>=
|
||||
void KOrder::switchToFolded()
|
||||
{
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Switching from unfolded to folded" << endrec;
|
||||
|
||||
int maxdim = g<unfold>().getMaxDim();
|
||||
for (int dim = 1; dim <= maxdim; dim++) {
|
||||
SymmetrySet ss(dim, 4);
|
||||
for (symiterator si(ss); !si.isEnd(); ++si) {
|
||||
if ((*si)[2] == 0 && g<unfold>().check(*si)) {
|
||||
FGSTensor* ft = new FGSTensor(*(g<unfold>().get(*si)));
|
||||
insertDerivative<fold>(ft);
|
||||
if (dim > 1) {
|
||||
gss<unfold>().remove(*si);
|
||||
gs<unfold>().remove(*si);
|
||||
g<unfold>().remove(*si);
|
||||
}
|
||||
}
|
||||
if (G<unfold>().check(*si)) {
|
||||
FGSTensor* ft = new FGSTensor(*(G<unfold>().get(*si)));
|
||||
G<fold>().insert(ft);
|
||||
if (dim > 1) {
|
||||
G<fold>().remove(*si);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ These are the specializations of container access methods. Nothing
|
||||
interesting here.
|
||||
|
||||
@<|KOrder| member access method specializations@>=
|
||||
template<> ctraits<KOrder::unfold>::Tg& KOrder::g<KOrder::unfold>()
|
||||
{@+ return _ug;@+}
|
||||
template<>@; const ctraits<KOrder::unfold>::Tg& KOrder::g<KOrder::unfold>()@+const@;
|
||||
{@+ return _ug;@+}
|
||||
template<> ctraits<KOrder::fold>::Tg& KOrder::g<KOrder::fold>()
|
||||
{@+ return _fg;@+}
|
||||
template<> const ctraits<KOrder::fold>::Tg& KOrder::g<KOrder::fold>()@+const@;
|
||||
{@+ return _fg;@+}
|
||||
template<> ctraits<KOrder::unfold>::Tgs& KOrder::gs<KOrder::unfold>()
|
||||
{@+ return _ugs;@+}
|
||||
template<> const ctraits<KOrder::unfold>::Tgs& KOrder::gs<KOrder::unfold>()@+const@;
|
||||
{@+ return _ugs;@+}
|
||||
template<> ctraits<KOrder::fold>::Tgs& KOrder::gs<KOrder::fold>()
|
||||
{@+ return _fgs;@+}
|
||||
template<> const ctraits<KOrder::fold>::Tgs& KOrder::gs<KOrder::fold>()@+const@;
|
||||
{@+ return _fgs;@+}
|
||||
template<> ctraits<KOrder::unfold>::Tgss& KOrder::gss<KOrder::unfold>()
|
||||
{@+ return _ugss;@+}
|
||||
template<> const ctraits<KOrder::unfold>::Tgss& KOrder::gss<KOrder::unfold>()@+const@;
|
||||
{@+ return _ugss;@+}
|
||||
template<> ctraits<KOrder::fold>::Tgss& KOrder::gss<KOrder::fold>()
|
||||
{@+ return _fgss;@+}
|
||||
template<> const ctraits<KOrder::fold>::Tgss& KOrder::gss<KOrder::fold>()@+const@;
|
||||
{@+ return _fgss;@+}
|
||||
template<> ctraits<KOrder::unfold>::TG& KOrder::G<KOrder::unfold>()
|
||||
{@+ return _uG;@+}
|
||||
template<> const ctraits<KOrder::unfold>::TG& KOrder::G<KOrder::unfold>()@+const@;
|
||||
{@+ return _uG;@+}
|
||||
template<> ctraits<KOrder::fold>::TG& KOrder::G<KOrder::fold>()
|
||||
{@+ return _fG;@+}
|
||||
template<> const ctraits<KOrder::fold>::TG& KOrder::G<KOrder::fold>()@+const@;
|
||||
{@+ return _fG;@+}
|
||||
template<> ctraits<KOrder::unfold>::TZstack& KOrder::Zstack<KOrder::unfold>()
|
||||
{@+ return _uZstack;@+}
|
||||
template<> const ctraits<KOrder::unfold>::TZstack& KOrder::Zstack<KOrder::unfold>()@+const@;
|
||||
{@+ return _uZstack;@+}
|
||||
template<> ctraits<KOrder::fold>::TZstack& KOrder::Zstack<KOrder::fold>()
|
||||
{@+ return _fZstack;@+}
|
||||
template<> const ctraits<KOrder::fold>::TZstack& KOrder::Zstack<KOrder::fold>()@+const@;
|
||||
{@+ return _fZstack;@+}
|
||||
template<> ctraits<KOrder::unfold>::TGstack& KOrder::Gstack<KOrder::unfold>()
|
||||
{@+ return _uGstack;@+}
|
||||
template<> const ctraits<KOrder::unfold>::TGstack& KOrder::Gstack<KOrder::unfold>()@+const@;
|
||||
{@+ return _uGstack;@+}
|
||||
template<> ctraits<KOrder::fold>::TGstack& KOrder::Gstack<KOrder::fold>()
|
||||
{@+ return _fGstack;@+}
|
||||
template<> const ctraits<KOrder::fold>::TGstack& KOrder::Gstack<KOrder::fold>()@+const@;
|
||||
{@+ return _fGstack;@+}
|
||||
template<> ctraits<KOrder::unfold>::Tm& KOrder::m<KOrder::unfold>()
|
||||
{@+ return _um;@+}
|
||||
template<> const ctraits<KOrder::unfold>::Tm& KOrder::m<KOrder::unfold>()@+const@;
|
||||
{@+ return _um;@+}
|
||||
template<> ctraits<KOrder::fold>::Tm& KOrder::m<KOrder::fold>()
|
||||
{@+ return _fm;@+}
|
||||
template<> const ctraits<KOrder::fold>::Tm& KOrder::m<KOrder::fold>()@+const@;
|
||||
{@+ return _fm;@+}
|
||||
|
||||
|
||||
@ End of {\tt korder.cpp} file.
|
|
@ -0,0 +1,956 @@
|
|||
@q $Id: korder.hweb 2332 2009-01-14 10:26:54Z kamenik $ @>
|
||||
@q Copyright 2004, Ondra Kamenik @>
|
||||
|
||||
@*2 Higher order at deterministic steady. Start of {\tt korder.h} file.
|
||||
|
||||
The main purpose of this file is to implement a perturbation method
|
||||
algorithm for an SDGE model for higher order approximations. The input
|
||||
of the algorithm are sparse tensors as derivatives of the dynamic
|
||||
system, then dimensions of vector variables, then the first order
|
||||
approximation to the decision rule and finally a covariance matrix of
|
||||
exogenous shocks. The output are higher order derivatives of decision
|
||||
rule $y_t=g(y^*_{t-1},u_t,\sigma)$. The class provides also a method
|
||||
for checking a size of residuals of the solved equations.
|
||||
|
||||
The algorithm is implemented in |KOrder| class. The class contains
|
||||
both unfolded and folded containers to allow for switching (usually
|
||||
from unfold to fold) during the calculations. The algorithm is
|
||||
implemented in a few templated methods. To do this, we need some
|
||||
container type traits, which are in |ctraits| struct. Also, the
|
||||
|KOrder| class contains some information encapsulated in other
|
||||
classes, which are defined here. These include: |PartitionY|,
|
||||
|MatrixA|, |MatrixS| and |MatrixB|.
|
||||
|
||||
@s KOrder int
|
||||
@s ctraits int
|
||||
@s PartitionY int
|
||||
@s MatrixA int
|
||||
@s MatrixB int
|
||||
@s MatrixS int
|
||||
@s PLUMatrix int
|
||||
@s FGSTensor int
|
||||
@s UGSTensor int
|
||||
@s FGSContainer int
|
||||
@s UGSContainer int
|
||||
@s FSSparseTensor int
|
||||
@s TensorContainer int
|
||||
@s UNormalMoments int
|
||||
@s FNormalMoments int
|
||||
@s FoldedZContainer int
|
||||
@s UnfoldedZContainer int
|
||||
@s FoldedGContainer int
|
||||
@s UnfoldedGContainer int
|
||||
@s FoldedZXContainer int
|
||||
@s UnfoldedZXContainer int
|
||||
@s FoldedGXContainer int
|
||||
@s UnfoldedGXContainer int
|
||||
@s TwoDMatrix int
|
||||
@s ConstTwoDMatrix int
|
||||
@s IntSequence int
|
||||
@s Symmetry int
|
||||
@s SymmetrySet int
|
||||
@s symiterator int
|
||||
@s TensorDimens int
|
||||
@s Vector int
|
||||
@s ConstVector int
|
||||
@s UTensorPolynomial int
|
||||
@s FTensorPolynomial int
|
||||
@s UFSTensor int
|
||||
@s FFSTensor int
|
||||
@s GeneralSylvester int
|
||||
|
||||
@c
|
||||
#ifndef KORDER_H
|
||||
#define KORDER_H
|
||||
|
||||
#include "int_sequence.h"
|
||||
#include "fs_tensor.h"
|
||||
#include "gs_tensor.h"
|
||||
#include "t_container.h"
|
||||
#include "stack_container.h"
|
||||
#include "normal_moments.h"
|
||||
#include "t_polynomial.h"
|
||||
#include "faa_di_bruno.h"
|
||||
#include "journal.h"
|
||||
|
||||
#include "kord_exception.h"
|
||||
#include "GeneralSylvester.h"
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#define TYPENAME typename
|
||||
|
||||
@<|ctraits| type traits declaration@>;
|
||||
@<|PartitionY| struct declaration@>;
|
||||
@<|PLUMatrix| class declaration@>;
|
||||
@<|MatrixA| class declaration@>;
|
||||
@<|MatrixS| class declaration@>;
|
||||
@<|MatrixB| class declaration@>;
|
||||
@<|KOrder| class declaration@>;
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
@ Here we use a classical IF template, and in |ctraits| we define a
|
||||
number of types. We have a type for tensor |Ttensor|, and types for
|
||||
each pair of folded/unfolded containers used as a member in |KOrder|.
|
||||
|
||||
Note that we have enumeration |fold| and |unfold|. These must have the
|
||||
same value as the same enumeration in |KOrder|.
|
||||
|
||||
@s IF int
|
||||
@s Then int
|
||||
@s Else int
|
||||
@s RET int
|
||||
|
||||
@<|ctraits| type traits declaration@>=
|
||||
class FoldedZXContainer;
|
||||
class UnfoldedZXContainer;
|
||||
class FoldedGXContainer;
|
||||
class UnfoldedGXContainer;
|
||||
|
||||
template<bool condition, class Then, class Else>
|
||||
struct IF {
|
||||
typedef Then RET;
|
||||
};
|
||||
|
||||
template<class Then, class Else>
|
||||
struct IF<false, Then, Else> {
|
||||
typedef Else RET;
|
||||
};
|
||||
|
||||
template <int type>
|
||||
class ctraits {
|
||||
public:@;
|
||||
enum {@+ fold, unfold@+};
|
||||
typedef TYPENAME IF<type==fold, FGSTensor, UGSTensor>::RET Ttensor;
|
||||
typedef TYPENAME IF<type==fold, FFSTensor, UFSTensor>::RET Ttensym;
|
||||
typedef TYPENAME IF<type==fold, FGSContainer, UGSContainer>::RET Tg;
|
||||
typedef TYPENAME IF<type==fold, FGSContainer, UGSContainer>::RET Tgs;
|
||||
typedef TYPENAME IF<type==fold, FGSContainer, UGSContainer>::RET Tgss;
|
||||
typedef TYPENAME IF<type==fold, FGSContainer, UGSContainer>::RET TG;
|
||||
typedef TYPENAME IF<type==fold, FoldedZContainer, UnfoldedZContainer>::RET TZstack;
|
||||
typedef TYPENAME IF<type==fold, FoldedGContainer, UnfoldedGContainer>::RET TGstack;
|
||||
typedef TYPENAME IF<type==fold, FNormalMoments, UNormalMoments>::RET Tm;
|
||||
typedef TYPENAME IF<type==fold, FTensorPolynomial, UTensorPolynomial>::RET Tpol;
|
||||
typedef TYPENAME IF<type==fold, FoldedZXContainer, UnfoldedZXContainer>::RET TZXstack;
|
||||
typedef TYPENAME IF<type==fold, FoldedGXContainer, UnfoldedGXContainer>::RET TGXstack;
|
||||
};
|
||||
|
||||
|
||||
@ The |PartitionY| class defines the partitioning of state variables
|
||||
$y$. The vector $y$, and subvector $y^*$, and $y^{**}$ are defined.
|
||||
$$y=\left[\matrix{\hbox{static}\cr\hbox{predeter}\cr\hbox{both}\cr
|
||||
\hbox{forward}}\right],\quad
|
||||
y^*=\left[\matrix{\hbox{predeter}\cr\hbox{both}}\right],\quad
|
||||
y^{**}=\left[\matrix{\hbox{both}\cr\hbox{forward}}\right],$$
|
||||
where ``static'' means variables appearing only at time $t$,
|
||||
``predeter'' means variables appearing at time $t-1$, but not at
|
||||
$t+1$, ``both'' means variables appearing both at $t-1$ and $t+1$
|
||||
(regardless appearance at $t$), and ``forward'' means variables
|
||||
appearing at $t+1$, but not at $t-1$.
|
||||
|
||||
The class maintains the four lengths, and returns the whole length,
|
||||
length of $y^s$, and length of $y^{**}$.
|
||||
|
||||
@<|PartitionY| struct declaration@>=
|
||||
struct PartitionY {
|
||||
const int nstat;
|
||||
const int npred;
|
||||
const int nboth;
|
||||
const int nforw;
|
||||
PartitionY(int num_stat, int num_pred,
|
||||
int num_both, int num_forw)
|
||||
: nstat(num_stat), npred(num_pred),
|
||||
nboth(num_both), nforw(num_forw)
|
||||
{}
|
||||
int ny() const
|
||||
{@+ return nstat+npred+nboth+nforw;@+}
|
||||
int nys() const
|
||||
{@+ return npred+nboth;@+}
|
||||
int nyss() const
|
||||
{@+ return nboth+nforw;@+}
|
||||
};
|
||||
|
||||
|
||||
@ This is an abstraction for a square matrix with attached PLU
|
||||
factorization. It can calculate the PLU factorization and apply the
|
||||
inverse with some given matrix.
|
||||
|
||||
We use LAPACK $PLU$ decomposition for the inverse. We store the $L$
|
||||
and $U$ in the |inv| array and |ipiv| is the permutation $P$.
|
||||
|
||||
@<|PLUMatrix| class declaration@>=
|
||||
class PLUMatrix : public TwoDMatrix {
|
||||
public:@;
|
||||
PLUMatrix(int n)
|
||||
: TwoDMatrix(n,n),
|
||||
inv(nrows()*ncols()),
|
||||
ipiv(new int[nrows()]) {}
|
||||
PLUMatrix(const PLUMatrix& plu);
|
||||
virtual ~PLUMatrix()
|
||||
{delete [] ipiv;}
|
||||
void multInv(TwoDMatrix& m) const;
|
||||
private:@;
|
||||
Vector inv;
|
||||
int* ipiv;
|
||||
protected:@;
|
||||
void calcPLU();
|
||||
};
|
||||
|
||||
@ The class |MatrixA| is used for matrix $\left[f_{y}\right]+ \left[0
|
||||
\left[f_{y^{**}_+}\right]\cdot\left[g^{**}_{y^*}\right] 0\right]$,
|
||||
which is central for the perturbation method step.
|
||||
|
||||
@<|MatrixA| class declaration@>=
|
||||
class MatrixA : public PLUMatrix {
|
||||
public:@;
|
||||
MatrixA(const FSSparseTensor& f, const IntSequence& ss,
|
||||
const TwoDMatrix& gy, const PartitionY& ypart);
|
||||
};
|
||||
|
||||
@ The class |MatrixS| slightly differs from |MatrixA|. It is used for
|
||||
matrix $$\left[f_{y}\right]+ \left[0
|
||||
\quad\left[f_{y^{**}_+}\right]\cdot\left[g^{**}_{y^*}\right]\quad
|
||||
0\right]+\left[0\quad 0\quad\left[f_{y^{**}_+}\right]\right]$$, which is
|
||||
needed when recovering $g_{\sigma^k}$.
|
||||
|
||||
@<|MatrixS| class declaration@>=
|
||||
class MatrixS : public PLUMatrix {
|
||||
public:@;
|
||||
MatrixS(const FSSparseTensor& f, const IntSequence& ss,
|
||||
const TwoDMatrix& gy, const PartitionY& ypart);
|
||||
};
|
||||
|
||||
|
||||
@ The $B$ matrix is equal to $\left[f_{y^{**}_+}\right]$. We have just
|
||||
a constructor.
|
||||
|
||||
@<|MatrixB| class declaration@>=
|
||||
class MatrixB : public TwoDMatrix {
|
||||
public:@;
|
||||
MatrixB(const FSSparseTensor& f, const IntSequence& ss)
|
||||
: TwoDMatrix(FGSTensor(f, ss, IntSequence(1,0),
|
||||
TensorDimens(ss, IntSequence(1,0))))
|
||||
{}
|
||||
};
|
||||
|
||||
@ Here we have the class for the higher order approximations. It
|
||||
contains the following data:
|
||||
|
||||
\halign{\kern\parindent\vrule height12pt width0pt
|
||||
\vtop{\hsize=4cm\noindent\raggedright #}&\kern0.5cm\vtop{\hsize=10cm\noindent #}\cr
|
||||
variable sizes ypart& |PartitionY| struct maintaining partitions of
|
||||
$y$, see |@<|PartitionY| struct declaration@>|\cr
|
||||
tensor variable dimension |nvs|& variable sizes of all tensors in
|
||||
containers, sizes of $y^*$, $u$, $u'$@q'@> and $\sigma$\cr
|
||||
tensor containers & folded and unfolded containers for $g$, $g_{y^*}$,
|
||||
$g_{y^**}$ (the latter two collect appropriate subtensors of $g$, they
|
||||
do not allocate any new space), $G$, $G$ stack, $Z$ stack\cr
|
||||
dynamic model derivatives & just a reference to the container of
|
||||
sparse tensors of the system derivatives, lives outside the class\cr
|
||||
moments & both folded and unfolded normal moment containers, both are
|
||||
calculated at initialization\cr
|
||||
matrices & matrix $A$, matrix $S$, and matrix $B$, see |@<|MatrixA| class
|
||||
declaration@>| and |@<|MatrixB| class declaration@>|\cr
|
||||
}
|
||||
|
||||
\kern 0.4cm
|
||||
|
||||
The methods are the following:
|
||||
\halign{\kern\parindent\vrule height12pt width0pt
|
||||
\vtop{\hsize=4cm\noindent\raggedright #}&\kern0.5cm\vtop{\hsize=10cm\noindent #}\cr
|
||||
member access & we declare template methods for accessing containers
|
||||
depending on |fold| and |unfold| flag, we implement their
|
||||
specializations\cr
|
||||
|performStep| & this performs $k$-order step provided that $k=2$ or
|
||||
the $k-1$-th step has been run, this is the core method\cr
|
||||
|check| & this calculates residuals of all solved equations for
|
||||
$k$-order and reports their sizes, it is runnable after $k$-order
|
||||
|performStep| has been run\cr
|
||||
|insertDerivative| & inserts a $g$ derivative to the $g$ container and
|
||||
also creates subtensors and insert them to $g_{y^*}$ and $g_{y^{**}}$
|
||||
containers\cr
|
||||
|sylvesterSolve| & solve the sylvester equation (templated fold, and
|
||||
unfold)\cr
|
||||
|faaDiBrunoZ| & calculates derivatives of $F$ by Faa Di Bruno for the
|
||||
sparse container of system derivatives and $Z$ stack container\cr
|
||||
|faaDiBrunoG| & calculates derivatives of $G$ by Faa Di Bruno for the
|
||||
dense container $g^{**}$ and $G$ stack\cr
|
||||
|recover_y| & recovers $g_{y^{*i}}$\cr
|
||||
|recover_yu| & recovers $g_{y^{*i}u^j}$\cr
|
||||
|recover_ys| & recovers $g_{y^{*i}\sigma^j}$\cr
|
||||
|recover_yus| & recovers $g_{y^{*i}u^j\sigma^k}$\cr
|
||||
|recover_s| & recovers $g_{\sigma^i}$\cr
|
||||
|fillG| & calculates specified derivatives of $G$ and inserts them to
|
||||
the container\cr
|
||||
|calcE_ijk|& calculates $E_{ijk}$\cr
|
||||
|calcD_ijk|& calculates $D_{ijk}$\cr
|
||||
}
|
||||
|
||||
\kern 0.3cm
|
||||
|
||||
Most of the code is templated, and template types are calculated in
|
||||
|ctraits|. So all templated methods get a template argument |T|, which
|
||||
can be either |fold|, or |unfold|. To shorten a reference to a type
|
||||
calculated by |ctraits| for a particular |t|, we define the following
|
||||
macros.
|
||||
|
||||
@s _Ttensor int
|
||||
@s _Ttensym int
|
||||
@s _Tg int
|
||||
@s _Tgs int
|
||||
@s _Tgss int
|
||||
@s _TG int
|
||||
@s _TZstack int
|
||||
@s _TGstack int
|
||||
@s _TZXstack int
|
||||
@s _TGXstack int
|
||||
@s _Tm int
|
||||
@s _Tpol int
|
||||
@d _Ttensor TYPENAME ctraits<t>::Ttensor
|
||||
@d _Ttensym TYPENAME ctraits<t>::Ttensym
|
||||
@d _Tg TYPENAME ctraits<t>::Tg
|
||||
@d _Tgs TYPENAME ctraits<t>::Tgs
|
||||
@d _Tgss TYPENAME ctraits<t>::Tgss
|
||||
@d _TG TYPENAME ctraits<t>::TG
|
||||
@d _TZstack TYPENAME ctraits<t>::TZstack
|
||||
@d _TGstack TYPENAME ctraits<t>::TGstack
|
||||
@d _TZXstack TYPENAME ctraits<t>::TZXstack
|
||||
@d _TGXstack TYPENAME ctraits<t>::TGXstack
|
||||
@d _Tm TYPENAME ctraits<t>::Tm
|
||||
@d _Tpol TYPENAME ctraits<t>::Tpol
|
||||
|
||||
|
||||
@<|KOrder| class declaration@>=
|
||||
class KOrder {
|
||||
protected:@;
|
||||
const PartitionY ypart;
|
||||
const int ny;
|
||||
const int nu;
|
||||
const int maxk;
|
||||
IntSequence nvs;
|
||||
@<|KOrder| container members@>;
|
||||
const MatrixA matA;
|
||||
const MatrixS matS;
|
||||
const MatrixB matB;
|
||||
@<|KOrder| member access method declarations@>;
|
||||
Journal& journal;
|
||||
public:@;
|
||||
KOrder(int num_stat, int num_pred, int num_both, int num_forw,
|
||||
const TensorContainer<FSSparseTensor>& fcont,
|
||||
const TwoDMatrix& gy, const TwoDMatrix& gu, const TwoDMatrix& v,
|
||||
Journal& jr);
|
||||
enum {@+ fold, unfold@+ };
|
||||
@<|KOrder::performStep| templated code@>;
|
||||
@<|KOrder::check| templated code@>;
|
||||
@<|KOrder::calcStochShift| templated code@>;
|
||||
void switchToFolded();
|
||||
const PartitionY& getPartY() const
|
||||
{@+ return ypart;@+}
|
||||
const FGSContainer& getFoldDers() const
|
||||
{@+ return _fg;@+}
|
||||
const UGSContainer& getUnfoldDers() const
|
||||
{@+ return _ug;@+}
|
||||
static bool is_even(int i)
|
||||
{@+ return (i/2)*2 == i;@+}
|
||||
protected:@;
|
||||
@<|KOrder::insertDerivative| templated code@>;
|
||||
template<int t>
|
||||
void sylvesterSolve(_Ttensor& der) const;
|
||||
|
||||
@<|KOrder::faaDiBrunoZ| templated code@>;
|
||||
@<|KOrder::faaDiBrunoG| templated code@>;
|
||||
|
||||
@<|KOrder::recover_y| templated code@>;
|
||||
@<|KOrder::recover_yu| templated code@>;
|
||||
@<|KOrder::recover_ys| templated code@>;
|
||||
@<|KOrder::recover_yus| templated code@>;
|
||||
@<|KOrder::recover_s| templated code@>;
|
||||
@<|KOrder::fillG| templated code@>;
|
||||
|
||||
@<|KOrder::calcD_ijk| templated code@>;
|
||||
@<|KOrder::calcD_ik| templated code@>;
|
||||
@<|KOrder::calcD_k| templated code@>;
|
||||
|
||||
@<|KOrder::calcE_ijk| templated code@>;
|
||||
@<|KOrder::calcE_ik| templated code@>;
|
||||
@<|KOrder::calcE_k| templated code@>;
|
||||
};
|
||||
|
||||
|
||||
|
||||
@ Here we insert the result to the container. Along the insertion, we
|
||||
also create subtensors and insert as well.
|
||||
|
||||
@<|KOrder::insertDerivative| templated code@>=
|
||||
template <int t>
|
||||
void insertDerivative(_Ttensor* der)
|
||||
{
|
||||
g<t>().insert(der);
|
||||
gs<t>().insert(new _Ttensor(ypart.nstat, ypart.nys(), *der));
|
||||
gss<t>().insert(new _Ttensor(ypart.nstat+ypart.npred,
|
||||
ypart.nyss(), *der));
|
||||
}
|
||||
|
||||
|
||||
@ Here we implement Faa Di Bruno formula
|
||||
$$\sum_{l=1}^k\left[f_{z^l}\right]_{\gamma_1\ldots\gamma_l}
|
||||
\sum_{c\in M_{l,k}}\prod_{m=1}^l\left[z_{s(c_m)}\right]^{\gamma_m},
|
||||
$$
|
||||
where $s$ is a given outer symmetry and $k$ is the dimension of the
|
||||
symmetry.
|
||||
|
||||
@<|KOrder::faaDiBrunoZ| templated code@>=
|
||||
template <int t>
|
||||
_Ttensor* faaDiBrunoZ(const Symmetry& sym) const
|
||||
{
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Faa Di Bruno Z container for " << sym << endrec;
|
||||
_Ttensor* res = new _Ttensor(ny, TensorDimens(sym, nvs));
|
||||
FaaDiBruno bruno(journal);
|
||||
bruno.calculate(Zstack<t>(), f, *res);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ The same as |@<|KOrder::faaDiBrunoZ| templated code@>|, but for
|
||||
$g^{**}$ and $G$ stack.
|
||||
|
||||
@<|KOrder::faaDiBrunoG| templated code@>=
|
||||
template <int t>
|
||||
_Ttensor* faaDiBrunoG(const Symmetry& sym) const
|
||||
{
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Faa Di Bruno G container for " << sym << endrec;
|
||||
TensorDimens tdims(sym, nvs);
|
||||
_Ttensor* res = new _Ttensor(ypart.nyss(), tdims);
|
||||
FaaDiBruno bruno(journal);
|
||||
bruno.calculate(Gstack<t>(), gss<t>(), *res);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ Here we solve $\left[F_{y^i}\right]=0$. First we calculate
|
||||
conditional $G_{y^i}$ (it misses $l=1$ and $l=i$ since $g_{y^i}$ does
|
||||
not exist yet). Then calculate conditional $F_{y^i}$ and we have the
|
||||
right hand side of equation. Since we miss two orders, we solve by
|
||||
Sylvester, and insert the solution as the derivative $g_{y^i}$. Then
|
||||
we need to update $G_{y^i}$ running |multAndAdd| for both dimensions
|
||||
$1$ and $i$.
|
||||
|
||||
{\bf Requires:} everything at order $\leq i-1$
|
||||
|
||||
{\bf Provides:} $g_{y^i}$, and $G_{y^i}$
|
||||
|
||||
@<|KOrder::recover_y| templated code@>=
|
||||
template<int t>
|
||||
void recover_y(int i)
|
||||
{
|
||||
Symmetry sym(i,0,0,0);
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Recovering symmetry " << sym << endrec;
|
||||
|
||||
_Ttensor* G_yi = faaDiBrunoG<t>(sym);
|
||||
G<t>().insert(G_yi);
|
||||
|
||||
_Ttensor* g_yi = faaDiBrunoZ<t>(sym);
|
||||
g_yi->mult(-1.0);
|
||||
|
||||
sylvesterSolve<t>(*g_yi);
|
||||
|
||||
insertDerivative<t>(g_yi);
|
||||
|
||||
_Ttensor* gss_y = gss<t>().get(Symmetry(1,0,0,0));
|
||||
gs<t>().multAndAdd(*gss_y, *G_yi);
|
||||
_Ttensor* gss_yi = gss<t>().get(sym);
|
||||
gs<t>().multAndAdd(*gss_yi, *G_yi);
|
||||
}
|
||||
|
||||
|
||||
@ Here we solve $\left[F_{y^iu^j}\right]=0$ to obtain $g_{y^iu^j}$ for
|
||||
$j>0$. We calculate conditional $G_{y^iu^j}$ (this misses only $l=1$)
|
||||
and calculate conditional $F_{y^iu^j}$ and we have the right hand
|
||||
side. It is solved by multiplication of inversion of $A$. Then we insert
|
||||
the result, and update $G_{y^iu^j}$ by |multAndAdd| for $l=1$.
|
||||
|
||||
{\bf Requires:} everything at order $\leq i+j-1$, $G_{y^{i+j}}$, and
|
||||
$g_{y^{i+j}}$.
|
||||
|
||||
{\bf Provides:} $g_{y^iu^j}$, and $G_{y^iu^j}$
|
||||
|
||||
@<|KOrder::recover_yu| templated code@>=
|
||||
template <int t>
|
||||
void recover_yu(int i, int j)
|
||||
{
|
||||
Symmetry sym(i,j,0,0);
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Recovering symmetry " << sym << endrec;
|
||||
|
||||
_Ttensor* G_yiuj = faaDiBrunoG<t>(sym);
|
||||
G<t>().insert(G_yiuj);
|
||||
|
||||
_Ttensor* g_yiuj = faaDiBrunoZ<t>(sym);
|
||||
g_yiuj->mult(-1.0);
|
||||
matA.multInv(*g_yiuj);
|
||||
insertDerivative<t>(g_yiuj);
|
||||
|
||||
gs<t>().multAndAdd(*(gss<t>().get(Symmetry(1,0,0,0))), *G_yiuj);
|
||||
}
|
||||
|
||||
@ Here we solve
|
||||
$\left[F_{y^i\sigma^j}\right]+\left[D_{ij}\right]+\left[E_{ij}\right]=0$
|
||||
to obtain $g_{y^i\sigma^j}$. We calculate conditional
|
||||
$G_{y^i\sigma^j}$ (missing dimensions $1$ and $i+j$), calculate
|
||||
conditional $F_{y^i\sigma^j}$. Before we can calculate $D_{ij}$ and
|
||||
$E_{ij}$, we have to calculate $G_{y^iu'^m\sigma^{j-m}}$ for
|
||||
$m=1,\ldots,j$. Then we add the $D_{ij}$ and $E_{ij}$ to obtain the
|
||||
right hand side. Then we solve the sylvester to obtain
|
||||
$g_{y^i\sigma^j}$. Then we update $G_{y^i\sigma^j}$ for $l=1$ and
|
||||
$l=i+j$.
|
||||
|
||||
{\bf Requires:} everything at order $\leq i+j-1$, $g_{y^{i+j}}$,
|
||||
$G_{y^iu'^j}$ and $g_{y^iu^j}$ through $D_{ij}$,
|
||||
$g_{y^iu^m\sigma^{j-m}}$ for
|
||||
$m=1,\ldots,j-1$ through $E_{ij}$.
|
||||
|
||||
{\bf Provides:} $g_{y^i\sigma^j}$ and $G_{y^i\sigma^j}$, and finally
|
||||
$G_{y^iu'^m\sigma^{j-m}}$ for $m=1,\ldots,j$. The latter is calculated
|
||||
by |fillG| before the actual calculation.
|
||||
|
||||
@<|KOrder::recover_ys| templated code@>=
|
||||
template <int t>
|
||||
void recover_ys(int i, int j)
|
||||
{
|
||||
Symmetry sym(i,0,0,j);
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Recovering symmetry " << sym << endrec;
|
||||
|
||||
fillG<t>(i, 0, j);
|
||||
|
||||
if (is_even(j)) {
|
||||
_Ttensor* G_yisj = faaDiBrunoG<t>(sym);
|
||||
G<t>().insert(G_yisj);
|
||||
|
||||
_Ttensor* g_yisj = faaDiBrunoZ<t>(sym);
|
||||
|
||||
{
|
||||
_Ttensor* D_ij = calcD_ik<t>(i, j);
|
||||
g_yisj->add(1.0, *D_ij);
|
||||
delete D_ij;
|
||||
}
|
||||
|
||||
if (j >= 3) {
|
||||
_Ttensor* E_ij = calcE_ik<t>(i, j);
|
||||
g_yisj->add(1.0, *E_ij);
|
||||
delete E_ij;
|
||||
}
|
||||
|
||||
g_yisj->mult(-1.0);
|
||||
|
||||
sylvesterSolve<t>(*g_yisj);
|
||||
|
||||
insertDerivative<t>(g_yisj);
|
||||
|
||||
Gstack<t>().multAndAdd(1, gss<t>(), *G_yisj);
|
||||
Gstack<t>().multAndAdd(i+j, gss<t>(), *G_yisj);
|
||||
}
|
||||
}
|
||||
|
||||
@ Here we solve
|
||||
$\left[F_{y^iu^j\sigma^k}\right]+\left[D_{ijk}\right]+\left[E_{ijk}\right]=0$
|
||||
to obtain $g_{y^iu^j\sigma^k}$. First we calculate conditional
|
||||
$G_{y^iu^j\sigma^k}$ (missing only for dimension $l=1$), then we
|
||||
evaluate conditional $F_{y^iu^j\sigma^k}$. Before we can calculate
|
||||
$D_{ijk}$, and $E_{ijk}$, we need to insert
|
||||
$G_{y^iu^ju'^m\sigma^{k-m}}$ for $m=1,\ldots, k$. This is done by
|
||||
|fillG|. Then we have right hand side and we multiply by $A^{-1}$ to
|
||||
obtain $g_{y^iu^j\sigma^k}$. Finally we have to update
|
||||
$G_{y^iu^j\sigma^k}$ by |multAndAdd| for dimension $l=1$.
|
||||
|
||||
{\bf Requires:} everything at order $\leq i+j+k$, $g_{y^{i+j}\sigma^k}$
|
||||
through $G_{y^iu^j\sigma^k}$ involved in right hand side, then
|
||||
$g_{y^iu^{j+k}}$ through $D_{ijk}$, and $g_{y^iu^{j+m}\sigma^{k-m}}$
|
||||
for $m=1,\ldots,k-1$ through $E_{ijk}$.
|
||||
|
||||
{\bf Provides:} $g_{y^iu^j\sigma^k}$, $G_{y^iu^j\sigma^k}$, and
|
||||
$G_{y^iu^ju'^m\sigma^{k-m}}$ for $m=1,\ldots, k$
|
||||
|
||||
@<|KOrder::recover_yus| templated code@>=
|
||||
template <int t>
|
||||
void recover_yus(int i, int j, int k)
|
||||
{
|
||||
Symmetry sym(i,j,0,k);
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Recovering symmetry " << sym << endrec;
|
||||
|
||||
fillG<t>(i, j, k);
|
||||
|
||||
if (is_even(k)) {
|
||||
_Ttensor* G_yiujsk = faaDiBrunoG<t>(sym);
|
||||
G<t>().insert(G_yiujsk);
|
||||
|
||||
_Ttensor* g_yiujsk = faaDiBrunoZ<t>(sym);
|
||||
|
||||
{
|
||||
_Ttensor* D_ijk = calcD_ijk<t>(i,j,k);
|
||||
g_yiujsk->add(1.0, *D_ijk);
|
||||
delete D_ijk;
|
||||
}
|
||||
|
||||
if (k >= 3) {
|
||||
_Ttensor* E_ijk = calcE_ijk<t>(i,j,k);
|
||||
g_yiujsk->add(1.0, *E_ijk);
|
||||
delete E_ijk;
|
||||
}
|
||||
|
||||
g_yiujsk->mult(-1.0);
|
||||
|
||||
matA.multInv(*g_yiujsk);
|
||||
insertDerivative<t>(g_yiujsk);
|
||||
|
||||
Gstack<t>().multAndAdd(1, gss<t>(), *G_yiujsk);
|
||||
}
|
||||
}
|
||||
|
||||
@ Here we solve
|
||||
$\left[F_{\sigma^i}\right]+\left[D_i\right]+\left[E_i\right]=0$ to
|
||||
recover $g_{\sigma^i}$. First we calculate conditional $G_{\sigma^i}$
|
||||
(missing dimension $l=1$ and $l=i$), then we calculate conditional
|
||||
$F_{\sigma^i}$. Before we can calculate $D_i$ and $E_i$, we have to
|
||||
obtain $G_{u'm\sigma^{i-m}}$ for $m=1,\ldots,i$. Than
|
||||
adding $D_i$ and $E_i$ we have the right hand side. We solve by
|
||||
$S^{-1}$ multiplication and update $G_{\sigma^i}$ by calling
|
||||
|multAndAdd| for dimension $l=1$.
|
||||
|
||||
Recall that the solved equation here is:
|
||||
$$
|
||||
\left[f_y\right]\left[g_{\sigma^k}\right]+
|
||||
\left[f_{y^{**}_+}\right]\left[g^{**}_{y^*}\right]\left[g^*_{\sigma^k}\right]+
|
||||
\left[f_{y^{**}_+}\right]\left[g^{**}_{\sigma^k}\right]=\hbox{RHS}
|
||||
$$
|
||||
This is a sort of deficient sylvester equation (sylvester equation for
|
||||
dimension=0), we solve it by $S^{-1}$. See |@<|MatrixS| constructor
|
||||
code@>| to see how $S$ looks like.
|
||||
|
||||
{\bf Requires:} everything at order $\leq i-1$, $g_{y^i}$ and
|
||||
$g_{y^{i-j}\sigma^j}$, then $g_{u^k}$ through $F_{u'^k}$, and
|
||||
$g_{y^mu^j\sigma^k}$ for $j=1,\ldots,i-1$ and $m+j+k=i$ through
|
||||
$F_{u'j\sigma^{i-j}}$.
|
||||
|
||||
{\bf Provides:} $g_{\sigma^i}$, $G_{\sigma^i}$, and
|
||||
$G_{u'^m\sigma^{i-m}}$ for $m=1,\ldots,i$
|
||||
|
||||
@<|KOrder::recover_s| templated code@>=
|
||||
template <int t>
|
||||
void recover_s(int i)
|
||||
{
|
||||
Symmetry sym(0,0,0,i);
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Recovering symmetry " << sym << endrec;
|
||||
|
||||
fillG<t>(0, 0, i);
|
||||
|
||||
if (is_even(i)) {
|
||||
_Ttensor* G_si = faaDiBrunoG<t>(sym);
|
||||
G<t>().insert(G_si);
|
||||
|
||||
_Ttensor* g_si = faaDiBrunoZ<t>(sym);
|
||||
|
||||
{
|
||||
_Ttensor* D_i = calcD_k<t>(i);
|
||||
g_si->add(1.0, *D_i);
|
||||
delete D_i;
|
||||
}
|
||||
|
||||
if (i >= 3) {
|
||||
_Ttensor* E_i = calcE_k<t>(i);
|
||||
g_si->add(1.0, *E_i);
|
||||
delete E_i;
|
||||
}
|
||||
|
||||
g_si->mult(-1.0);
|
||||
|
||||
|
||||
matS.multInv(*g_si);
|
||||
insertDerivative<t>(g_si);
|
||||
|
||||
Gstack<t>().multAndAdd(1, gss<t>(), *G_si);
|
||||
Gstack<t>().multAndAdd(i, gss<t>(), *G_si);
|
||||
}
|
||||
}
|
||||
|
||||
@ Here we calculate and insert $G_{y^iu^ju'^m\sigma^{k-m}}$ for
|
||||
$m=1,\ldots, k$. The derivatives are inserted only for $k-m$ being
|
||||
even.
|
||||
|
||||
@<|KOrder::fillG| templated code@>=
|
||||
template<int t>
|
||||
void fillG(int i, int j, int k)
|
||||
{
|
||||
for (int m = 1; m <= k; m++) {
|
||||
if (is_even(k-m)) {
|
||||
_Ttensor* G_yiujupms = faaDiBrunoG<t>(Symmetry(i,j,m,k-m));
|
||||
G<t>().insert(G_yiujupms);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ Here we calculate
|
||||
$$\left[D_{ijk}\right]_{\alpha_1\ldots\alpha_i\beta_1\ldots\beta_j}=
|
||||
\left[F_{y^iu^ju'^k}\right]
|
||||
_{\alpha_1\ldots\alpha_i\beta_1\ldots\beta_j\gamma_1\ldots\gamma_k}
|
||||
\left[\Sigma\right]^{\gamma_1\ldots\gamma_k}$$
|
||||
So it is non zero only for even $k$.
|
||||
|
||||
@<|KOrder::calcD_ijk| templated code@>=
|
||||
template <int t>
|
||||
_Ttensor* calcD_ijk(int i, int j, int k) const
|
||||
{
|
||||
_Ttensor* res = new _Ttensor(ny, TensorDimens(Symmetry(i,j,0,0), nvs));
|
||||
res->zeros();
|
||||
if (is_even(k)) {
|
||||
_Ttensor* tmp = faaDiBrunoZ<t>(Symmetry(i,j,k,0));
|
||||
tmp->contractAndAdd(2, *res, *(m<t>().get(Symmetry(k))));
|
||||
delete tmp;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
@ Here we calculate
|
||||
$$\left[E_{ijk}\right]_{\alpha_1\ldots\alpha_i\beta_1\ldots\beta_j}=
|
||||
\sum_{m=1}^{k-1}\left(\matrix{k\cr m}\right)\left[F_{y^iu^ju'^m\sigma^{k-m}}\right]
|
||||
_{\alpha_1\ldots\alpha_i\beta_1\ldots\beta_j\gamma_1\ldots\gamma_m}
|
||||
\left[\Sigma\right]^{\gamma_1\ldots\gamma_m}$$
|
||||
The sum can sum only for even $m$.
|
||||
|
||||
@<|KOrder::calcE_ijk| templated code@>=
|
||||
template <int t>
|
||||
_Ttensor* calcE_ijk(int i, int j, int k) const
|
||||
{
|
||||
_Ttensor* res = new _Ttensor(ny, TensorDimens(Symmetry(i,j,0,0), nvs));
|
||||
res->zeros();
|
||||
for (int n = 2; n <= k-1; n+=2) {
|
||||
_Ttensor* tmp = faaDiBrunoZ<t>(Symmetry(i,j,n,k-n));
|
||||
tmp->mult((double)(Tensor::noverk(k,n)));
|
||||
tmp->contractAndAdd(2, *res, *(m<t>().get(Symmetry(n))));
|
||||
delete tmp;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@
|
||||
@<|KOrder::calcD_ik| templated code@>=
|
||||
template <int t>
|
||||
_Ttensor* calcD_ik(int i, int k) const
|
||||
{
|
||||
return calcD_ijk<t>(i, 0, k);
|
||||
}
|
||||
|
||||
@
|
||||
@<|KOrder::calcD_k| templated code@>=
|
||||
template <int t>
|
||||
_Ttensor* calcD_k(int k) const
|
||||
{
|
||||
return calcD_ijk<t>(0, 0, k);
|
||||
}
|
||||
|
||||
@
|
||||
@<|KOrder::calcE_ik| templated code@>=
|
||||
template <int t>
|
||||
_Ttensor* calcE_ik(int i, int k) const
|
||||
{
|
||||
return calcE_ijk<t>(i, 0, k);
|
||||
}
|
||||
|
||||
@
|
||||
@<|KOrder::calcE_k| templated code@>=
|
||||
template <int t>
|
||||
_Ttensor* calcE_k(int k) const
|
||||
{
|
||||
return calcE_ijk<t>(0, 0, k);
|
||||
}
|
||||
|
||||
@ Here is the core routine. It calls methods recovering derivatives in
|
||||
the right order. Recall, that the code, namely Faa Di Bruno's formula,
|
||||
is implemented as to be run conditionally on the current contents of
|
||||
containers. So, if some call of Faa Di Bruno evaluates derivatives,
|
||||
and some derivatives are not present in the container, then it is
|
||||
considered to be zero. So, we have to be very careful to put
|
||||
everything in the right order. The order here can be derived from
|
||||
dependencies, or it is in the paper.
|
||||
|
||||
The method recovers all the derivatives of the given |order|.
|
||||
|
||||
The precondition of the method is that all tensors of order |order-1|,
|
||||
which are not zero, exist (including $G$). The postcondition of of the
|
||||
method is derivatives of $g$ and $G$ of order |order| are calculated
|
||||
and stored in the containers. Responsibility of precondition lays upon
|
||||
the constructor (for |order==2|), or upon the previous call of
|
||||
|performStep|.
|
||||
|
||||
From the code, it is clear, that all $g$ are calculated. If one goes
|
||||
through all the recovering methods, he should find out that also all
|
||||
$G$ are provided.
|
||||
|
||||
@<|KOrder::performStep| templated code@>=
|
||||
template <int t>
|
||||
void performStep(int order)
|
||||
{
|
||||
KORD_RAISE_IF(order-1 != g<t>().getMaxDim(),
|
||||
"Wrong order for KOrder::performStep");
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Performing step for order = " << order << endrec;
|
||||
|
||||
recover_y<t>(order);
|
||||
|
||||
for (int i = 0; i < order; i++) {
|
||||
recover_yu<t>(i, order-i);
|
||||
}
|
||||
|
||||
for (int j = 1; j < order; j++) {
|
||||
for (int i = j-1; i >= 1; i--) {
|
||||
recover_yus<t>(order-j,i,j-i);
|
||||
}
|
||||
recover_ys<t>(order-j, j);
|
||||
}
|
||||
|
||||
for (int i = order-1; i >= 1; i--) {
|
||||
recover_yus<t>(0, i, order-i);
|
||||
}
|
||||
recover_s<t>(order);
|
||||
}
|
||||
|
||||
@ Here we check for residuals of all the solved equations at the given
|
||||
order. The method returns the largest residual size. Each check simply
|
||||
evaluates the equation.
|
||||
|
||||
@<|KOrder::check| templated code@>=
|
||||
template <int t>
|
||||
double check(int dim) const
|
||||
{
|
||||
KORD_RAISE_IF(dim > g<t>().getMaxDim(),
|
||||
"Wrong dimension for KOrder::check");
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Checking residuals for order = " << dim << endrec;
|
||||
|
||||
double maxerror = 0.0;
|
||||
|
||||
@<check for $F_{y^iu^j}=0$@>;
|
||||
@<check for $F_{y^iu^ju'^k}+D_{ijk}+E_{ijk}=0$@>;@q'@>
|
||||
@<check for $F_{\sigma^i}+D_i+E_i=0$@>;
|
||||
|
||||
return maxerror;
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<check for $F_{y^iu^j}=0$@>=
|
||||
for (int i = 0; i <= dim; i++) {
|
||||
Symmetry sym(dim-i, i, 0, 0);
|
||||
_Ttensor* r = faaDiBrunoZ<t>(sym);
|
||||
double err = r->getData().getMax();
|
||||
JournalRecord(journal) << "\terror for symmetry " << sym << "\tis " << err << endrec;
|
||||
if (err > maxerror)
|
||||
maxerror = err;
|
||||
delete r;
|
||||
}
|
||||
|
||||
@
|
||||
@<check for $F_{y^iu^ju'^k}+D_{ijk}+E_{ijk}=0$@>=
|
||||
SymmetrySet ss(dim, 3);
|
||||
for (symiterator si(ss); !si.isEnd(); ++si) {
|
||||
int i = (*si)[0];
|
||||
int j = (*si)[1];
|
||||
int k = (*si)[2];
|
||||
if (i+j > 0 && k > 0) {
|
||||
Symmetry sym(i, j, 0, k);
|
||||
_Ttensor* r = faaDiBrunoZ<t>(sym);
|
||||
_Ttensor* D_ijk = calcD_ijk<t>(i,j,k);
|
||||
r->add(1.0, *D_ijk);
|
||||
delete D_ijk;
|
||||
_Ttensor* E_ijk = calcE_ijk<t>(i,j,k);
|
||||
r->add(1.0, *E_ijk);
|
||||
delete E_ijk;
|
||||
double err = r->getData().getMax();
|
||||
JournalRecord(journal) << "\terror for symmetry " << sym << "\tis " << err << endrec;
|
||||
delete r;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<check for $F_{\sigma^i}+D_i+E_i=0$@>=
|
||||
_Ttensor* r = faaDiBrunoZ<t>(Symmetry(0,0,0,dim));
|
||||
_Ttensor* D_k = calcD_k<t>(dim);
|
||||
r->add(1.0, *D_k);
|
||||
delete D_k;
|
||||
_Ttensor* E_k = calcE_k<t>(dim);
|
||||
r->add(1.0, *E_k);
|
||||
delete E_k;
|
||||
double err = r->getData().getMax();
|
||||
Symmetry sym(0,0,0,dim);
|
||||
JournalRecord(journal) << "\terror for symmetry " << sym << "\tis " << err << endrec;
|
||||
if (err > maxerror)
|
||||
maxerror = err;
|
||||
delete r;
|
||||
|
||||
@
|
||||
@<|KOrder::calcStochShift| templated code@>=
|
||||
template <int t>
|
||||
Vector* calcStochShift(int order, double sigma) const
|
||||
{
|
||||
Vector* res = new Vector(ny);
|
||||
res->zeros();
|
||||
int jfac = 1;
|
||||
for (int j = 1; j <= order; j++, jfac *= j)
|
||||
if (is_even(j)) {
|
||||
_Ttensor* ten = calcD_k<t>(j);
|
||||
res->add(std::pow(sigma, j)/jfac, ten->getData());
|
||||
delete ten;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
@ These are containers. The names are not important because they do
|
||||
not appear anywhere else since we access them by template functions.
|
||||
|
||||
@<|KOrder| container members@>=
|
||||
UGSContainer _ug;
|
||||
FGSContainer _fg;
|
||||
UGSContainer _ugs;
|
||||
FGSContainer _fgs;
|
||||
UGSContainer _ugss;
|
||||
FGSContainer _fgss;
|
||||
UGSContainer _uG;
|
||||
FGSContainer _fG;
|
||||
UnfoldedZContainer _uZstack;
|
||||
FoldedZContainer _fZstack;
|
||||
UnfoldedGContainer _uGstack;
|
||||
FoldedGContainer _fGstack;
|
||||
UNormalMoments _um;
|
||||
FNormalMoments _fm;
|
||||
const TensorContainer<FSSparseTensor>& f;
|
||||
|
||||
@ These are the declarations of the template functions accessing the
|
||||
containers.
|
||||
|
||||
@<|KOrder| member access method declarations@>=
|
||||
template<int t> _Tg& g();
|
||||
template<int t> const _Tg& g() const;
|
||||
template<int t> _Tgs& gs();
|
||||
template<int t> const _Tgs& gs() const;
|
||||
template<int t> _Tgss& gss();
|
||||
template<int t> const _Tgss& gss() const;
|
||||
template<int t> _TG& G();
|
||||
template<int t> const _TG& G() const;
|
||||
template<int t> _TZstack& Zstack();
|
||||
template<int t> const _TZstack& Zstack() const;
|
||||
template<int t> _TGstack& Gstack();
|
||||
template<int t> const _TGstack& Gstack() const;
|
||||
template<int t> _Tm& m();
|
||||
template<int t> const _Tm& m() const;
|
||||
|
||||
|
||||
@ End of {\tt korder.h} file.
|
|
@ -0,0 +1,127 @@
|
|||
@q $Id: korder_stoch.cweb 148 2005-04-19 15:12:26Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@ Start of {\tt korder\_stoch.cpp} file.
|
||||
@c
|
||||
#include "korder_stoch.h"
|
||||
|
||||
@<|MatrixAA| constructor code@>;
|
||||
@<|KOrderStoch| folded constructor code@>;
|
||||
@<|KOrderStoch| unfolded constructor code@>;
|
||||
@<|KOrderStoch| convenience method specializations@>;
|
||||
|
||||
@ Same as |@<|MatrixA| constructor code@>|, but the submatrix |gss_ys| is passed directly.
|
||||
@<|MatrixAA| constructor code@>=
|
||||
MatrixAA::MatrixAA(const FSSparseTensor& f, const IntSequence& ss,
|
||||
const TwoDMatrix& gss_ys, const PartitionY& ypart)
|
||||
: PLUMatrix(ypart.ny())
|
||||
{
|
||||
zeros();
|
||||
|
||||
IntSequence c(1); c[0] = 1;
|
||||
FGSTensor f_y(f, ss, c, TensorDimens(ss, c));
|
||||
add(1.0, f_y);
|
||||
|
||||
c[0] = 0;
|
||||
FGSTensor f_yss(f, ss, c, TensorDimens(ss, c));
|
||||
TwoDMatrix sub(*this, ypart.nstat, ypart.nys());
|
||||
sub.multAndAdd(f_yss, gss_ys);
|
||||
|
||||
calcPLU();
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<|KOrderStoch| folded constructor code@>=
|
||||
KOrderStoch::KOrderStoch(const PartitionY& yp, int nu,
|
||||
const TensorContainer<FSSparseTensor>& fcont,
|
||||
const FGSContainer& hh, Journal& jr)
|
||||
: nvs(4), ypart(yp), journal(jr),@/
|
||||
_ug(4), _fg(4), _ugs(4), _fgs(4), _uG(4), _fG(4),@/
|
||||
_uh(NULL), _fh(&hh),@/
|
||||
_uZstack(&_uG, ypart.nyss(), &_ug, ypart.ny(), ypart.nys(), nu),@/
|
||||
_fZstack(&_fG, ypart.nyss(), &_fg, ypart.ny(), ypart.nys(), nu),@/
|
||||
_uGstack(&_ugs, ypart.nys(), nu),@/
|
||||
_fGstack(&_fgs, ypart.nys(), nu),@/
|
||||
f(fcont),@/
|
||||
matA(*(fcont.get(Symmetry(1))), _uZstack.getStackSizes(), *(hh.get(Symmetry(1,0,0,0))),
|
||||
ypart)
|
||||
{
|
||||
nvs[0] = ypart.nys();
|
||||
nvs[1] = nu;
|
||||
nvs[2] = nu;
|
||||
nvs[3] = 1;
|
||||
}
|
||||
|
||||
@
|
||||
@<|KOrderStoch| unfolded constructor code@>=
|
||||
KOrderStoch::KOrderStoch(const PartitionY& yp, int nu,
|
||||
const TensorContainer<FSSparseTensor>& fcont,
|
||||
const UGSContainer& hh, Journal& jr)
|
||||
: nvs(4), ypart(yp), journal(jr),@/
|
||||
_ug(4), _fg(4), _ugs(4), _fgs(4), _uG(4), _fG(4),@/
|
||||
_uh(&hh), _fh(NULL),@/
|
||||
_uZstack(&_uG, ypart.nyss(), &_ug, ypart.ny(), ypart.nys(), nu),@/
|
||||
_fZstack(&_fG, ypart.nyss(), &_fg, ypart.ny(), ypart.nys(), nu),@/
|
||||
_uGstack(&_ugs, ypart.nys(), nu),@/
|
||||
_fGstack(&_fgs, ypart.nys(), nu),@/
|
||||
f(fcont),@/
|
||||
matA(*(fcont.get(Symmetry(1))), _uZstack.getStackSizes(), *(hh.get(Symmetry(1,0,0,0))),
|
||||
ypart)
|
||||
{
|
||||
nvs[0] = ypart.nys();
|
||||
nvs[1] = nu;
|
||||
nvs[2] = nu;
|
||||
nvs[3] = 1;
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<|KOrderStoch| convenience method specializations@>=
|
||||
template<> ctraits<KOrder::unfold>::Tg& KOrderStoch::g<KOrder::unfold>()
|
||||
{@+ return _ug;@+}
|
||||
template<>@; const ctraits<KOrder::unfold>::Tg& KOrderStoch::g<KOrder::unfold>()@+const@;
|
||||
{@+ return _ug;@+}
|
||||
template<> ctraits<KOrder::fold>::Tg& KOrderStoch::g<KOrder::fold>()
|
||||
{@+ return _fg;@+}
|
||||
template<> const ctraits<KOrder::fold>::Tg& KOrderStoch::g<KOrder::fold>()@+const@;
|
||||
{@+ return _fg;@+}
|
||||
template<> ctraits<KOrder::unfold>::Tgs& KOrderStoch::gs<KOrder::unfold>()
|
||||
{@+ return _ugs;@+}
|
||||
template<> const ctraits<KOrder::unfold>::Tgs& KOrderStoch::gs<KOrder::unfold>()@+const@;
|
||||
{@+ return _ugs;@+}
|
||||
template<> ctraits<KOrder::fold>::Tgs& KOrderStoch::gs<KOrder::fold>()
|
||||
{@+ return _fgs;@+}
|
||||
template<> const ctraits<KOrder::fold>::Tgs& KOrderStoch::gs<KOrder::fold>()@+const@;
|
||||
{@+ return _fgs;@+}
|
||||
template<> const ctraits<KOrder::unfold>::Tgss& KOrderStoch::h<KOrder::unfold>()@+const@;
|
||||
{@+ return *_uh;@+}
|
||||
template<> const ctraits<KOrder::fold>::Tgss& KOrderStoch::h<KOrder::fold>()@+const@;
|
||||
{@+ return *_fh;@+}
|
||||
template<> ctraits<KOrder::unfold>::TG& KOrderStoch::G<KOrder::unfold>()
|
||||
{@+ return _uG;@+}
|
||||
template<> const ctraits<KOrder::unfold>::TG& KOrderStoch::G<KOrder::unfold>()@+const@;
|
||||
{@+ return _uG;@+}
|
||||
template<> ctraits<KOrder::fold>::TG& KOrderStoch::G<KOrder::fold>()
|
||||
{@+ return _fG;@+}
|
||||
template<> const ctraits<KOrder::fold>::TG& KOrderStoch::G<KOrder::fold>()@+const@;
|
||||
{@+ return _fG;@+}
|
||||
template<> ctraits<KOrder::unfold>::TZXstack& KOrderStoch::Zstack<KOrder::unfold>()
|
||||
{@+ return _uZstack;@+}
|
||||
template<> const ctraits<KOrder::unfold>::TZXstack& KOrderStoch::Zstack<KOrder::unfold>()@+const@;
|
||||
{@+ return _uZstack;@+}
|
||||
template<> ctraits<KOrder::fold>::TZXstack& KOrderStoch::Zstack<KOrder::fold>()
|
||||
{@+ return _fZstack;@+}
|
||||
template<> const ctraits<KOrder::fold>::TZXstack& KOrderStoch::Zstack<KOrder::fold>()@+const@;
|
||||
{@+ return _fZstack;@+}
|
||||
template<> ctraits<KOrder::unfold>::TGXstack& KOrderStoch::Gstack<KOrder::unfold>()
|
||||
{@+ return _uGstack;@+}
|
||||
template<> const ctraits<KOrder::unfold>::TGXstack& KOrderStoch::Gstack<KOrder::unfold>()@+const@;
|
||||
{@+ return _uGstack;@+}
|
||||
template<> ctraits<KOrder::fold>::TGXstack& KOrderStoch::Gstack<KOrder::fold>()
|
||||
{@+ return _fGstack;@+}
|
||||
template<> const ctraits<KOrder::fold>::TGXstack& KOrderStoch::Gstack<KOrder::fold>()@+const@;
|
||||
{@+ return _fGstack;@+}
|
||||
|
||||
|
||||
@ End of {\tt korder\_stoch.cpp} file.
|
|
@ -0,0 +1,538 @@
|
|||
@q $Id: korder_stoch.hweb 418 2005-08-16 15:10:06Z kamenik $ @>
|
||||
@q Copyright 2005, Ondra Kamenik @>
|
||||
|
||||
@*2 Higher order at stochastic steady. Start of {\tt korder\_stoch.h} file.
|
||||
|
||||
This file defines a number of classes of which |KOrderStoch| is the
|
||||
main purpose. Basically, |KOrderStoch| calculates first and higher
|
||||
order Taylor expansion of a policy rule at $\sigma>0$ with explicit
|
||||
forward $g^{**}$. More formally, we have to solve a policy rule $g$
|
||||
from
|
||||
$$E_t\left[f(g^{**}(g^*(y^*_t,u_t,\sigma),u_{t+1},\sigma),g(y^*,u_t,\sigma),y^*,u_t)\right]$$
|
||||
As an introduction in {\tt approximation.hweb} argues, $g^{**}$ at
|
||||
tine $t+1$ must be given from outside. Let the explicit
|
||||
$E_t(g^{**}(y^*,u_{t+1},\sigma)$ be equal to $h(y^*,\sigma)$. Then we
|
||||
have to solve
|
||||
$$f(h(g^*(y^*,u,\sigma),\sigma),g(y,u,\sigma),y,u),$$
|
||||
which is much easier than fully implicit system for $\sigma=0$.
|
||||
|
||||
Besides the class |KOrderStoch|, we declare here also classes for the
|
||||
new containers corresponding to
|
||||
$f(h(g^*(y^*,u,\sigma),\sigma),g(y,u,\sigma),y,u)$. Further, we
|
||||
declare |IntegDerivs| and |StochForwardDerivs| classes which basically
|
||||
calculate $h$ as an extrapolation based on an approximation to $g$ at
|
||||
lower $\sigma$.
|
||||
|
||||
@s IntegDerivs int
|
||||
@s StochForwardDerivs int
|
||||
@s GContainer int
|
||||
@s ZContainer int
|
||||
@s GXContainer int
|
||||
@s ZXContainer int
|
||||
@s MatrixAA int
|
||||
@s KOrderStoch int
|
||||
@s StackContainer int
|
||||
@s _Ttype int
|
||||
@s _Ctype int
|
||||
@s UnfoldedStackContainer int
|
||||
@s FoldedStackContainer int
|
||||
|
||||
@c
|
||||
#include "korder.h"
|
||||
#include "faa_di_bruno.h"
|
||||
#include "journal.h"
|
||||
|
||||
|
||||
@<|IntegDerivs| class declaration@>;
|
||||
@<|StochForwardDerivs| class declaration@>;
|
||||
@<|GXContainer| class declaration@>;
|
||||
@<|ZXContainer| class declaration@>;
|
||||
@<|UnfoldedGXContainer| class declaration@>;
|
||||
@<|FoldedGXContainer| class declaration@>;
|
||||
@<|UnfoldedZXContainer| class declaration@>;
|
||||
@<|FoldedZXContainer| class declaration@>;
|
||||
@<|MatrixAA| class declaration@>;
|
||||
@<|KOrderStoch| class declaration@>;
|
||||
|
||||
@ This class is a container, which has a specialized constructor
|
||||
integrating the policy rule at given $\sigma$.
|
||||
|
||||
@<|IntegDerivs| class declaration@>=
|
||||
template <int t>
|
||||
class IntegDerivs : public ctraits<t>::Tgss {
|
||||
public:@;
|
||||
@<|IntegDerivs| constructor code@>;
|
||||
};
|
||||
|
||||
@ This constuctor integrates a rule (namely its $g^{**}$ part) with
|
||||
respect to $u=\tilde\sigma\eta$, and stores to the object the
|
||||
derivatives of this integral $h$ at $(y^*,u,\sigma)=(\tilde
|
||||
y^*,0,\tilde\sigma)$. The original container of $g^{**}$, the moments of
|
||||
the stochastic shocks |mom| and the $\tilde\sigma$ are input.
|
||||
|
||||
The code follows the following derivation
|
||||
\def\lims{\vbox{\baselineskip=0pt\lineskip=1pt
|
||||
\setbox0=\hbox{$\scriptstyle n+k=p$}\hbox to\wd0{\hss$\scriptstyle m=0$\hss}\box0}}
|
||||
$$
|
||||
\eqalign{h(y,\sigma)&=E_t\left[g(y,u',\sigma)\right]=\cr
|
||||
&=\tilde y+\sum_{d=1}{1\over d!}\sum_{i+j+k=d}\pmatrix{d\cr i,j,k}\left[g_{y^iu^j\sigma^k}\right]
|
||||
(y^*-\tilde y^*)^i\sigma^j\Sigma^j(\sigma-\tilde\sigma)^k\cr
|
||||
&=\tilde y+\sum_{d=1}{1\over d!}\sum_{i+m+n+k=d}\pmatrix{d\cr i,m+n,k}
|
||||
\left[g_{y^iu^{m+n}\sigma^k}\right]
|
||||
\hat y^{*i}\Sigma^{m+n}\pmatrix{m+n\cr m,n}{\tilde\sigma}^m\hat\sigma^{k+n}\cr
|
||||
&=\tilde y+\sum_{d=1}{1\over d!}\sum_{i+m+n+k=d}\pmatrix{d\cr i,m,n,k}
|
||||
\left[g_{y^iu^{m+n}\sigma^k}\right]
|
||||
\Sigma^{m+n}{\tilde\sigma}^m\hat y^{*i}\hat\sigma^{k+n}\cr
|
||||
&=\tilde y+\sum_{d=1}{1\over d!}\sum_{i+p=d}\sum_{\lims}\pmatrix{d\cr i,m,n,k}
|
||||
\left[g_{y^iu^{m+n}\sigma^k}\right]
|
||||
\Sigma^{m+n}{\tilde\sigma}^m\hat y^{*i}\hat\sigma^{k+n}\cr
|
||||
&=\tilde y+\sum_{d=1}{1\over d!}\sum_{i+p=d}\pmatrix{d\cr i,p}
|
||||
\left[\sum_{\lims}\pmatrix{p\cr n,k}{1\over m!}
|
||||
\left[g_{y^iu^{m+n}\sigma^k}\right]
|
||||
\Sigma^{m+n}{\tilde\sigma}^m\right]\hat y^{*i}\hat\sigma^{k+n},
|
||||
}
|
||||
$$
|
||||
where $\pmatrix{a\cr b_1,\ldots, b_n}$ is a generalized combination
|
||||
number, $p=k+n$, $\hat\sigma=\sigma-\tilde\sigma$, $\hat
|
||||
y^*=y^*-\tilde y$, and we dropped writing the multidimensional indexes
|
||||
in Einstein summation.
|
||||
|
||||
This implies that
|
||||
$$h_{y^i\sigma^p}=\sum_{\lims}\pmatrix{p\cr n,k}{1\over m!}
|
||||
\left[g_{y^iu^{m+n}\sigma^k}\right]
|
||||
\Sigma^{m+n}{\tilde\sigma}^m$$
|
||||
and this is exactly what the code does.
|
||||
|
||||
@<|IntegDerivs| constructor code@>=
|
||||
IntegDerivs(int r, const IntSequence& nvs, const _Tgss& g, const _Tm& mom,
|
||||
double at_sigma)
|
||||
: ctraits<t>::Tgss(4)
|
||||
{
|
||||
int maxd = g.getMaxDim();
|
||||
for (int d = 1; d <= maxd; d++) {
|
||||
for (int i = 0; i <= d; i++) {
|
||||
int p = d-i;
|
||||
Symmetry sym(i,0,0,p);
|
||||
_Ttensor* ten = new _Ttensor(r, TensorDimens(sym, nvs));
|
||||
@<calculate derivative $h_{y^i\sigma^p}$@>;
|
||||
insert(ten);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ This code calculates
|
||||
$$h_{y^i\sigma^p}=\sum_{\lims}\pmatrix{p\cr n,k}{1\over m!}
|
||||
\left[g_{y^iu^{m+n}\sigma^k}\right]
|
||||
\Sigma^{m+n}{\tilde\sigma}^m$$
|
||||
and stores it in |ten|.
|
||||
|
||||
@<calculate derivative $h_{y^i\sigma^p}$@>=
|
||||
ten->zeros();
|
||||
for (int n = 0; n <= p; n++) {
|
||||
int k = p-n;
|
||||
int povern = Tensor::noverk(p,n);
|
||||
int mfac = 1;
|
||||
for (int m = 0; i+m+n+k <= maxd; m++, mfac*=m) {
|
||||
double mult = (pow(at_sigma,m)*povern)/mfac;
|
||||
Symmetry sym_mn(i,m+n,0,k);
|
||||
if (m+n == 0 && g.check(sym_mn))
|
||||
ten->add(mult, *(g.get(sym_mn)));
|
||||
if (m+n > 0 && KOrder::is_even(m+n) && g.check(sym_mn)) {
|
||||
_Ttensor gtmp(*(g.get(sym_mn)));
|
||||
gtmp.mult(mult);
|
||||
gtmp.contractAndAdd(1, *ten, *(mom.get(Symmetry(m+n))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ This class calculates an extrapolation of expectation of forward
|
||||
derivatives. It is a container, all calculations are done in a
|
||||
constructor.
|
||||
|
||||
The class calculates derivatives of $E[g(y*,u,\sigma)]$ at $(\bar
|
||||
y^*,\bar\sigma)$. The derivatives are extrapolated based on
|
||||
derivatives at $(\tilde y^*,\tilde\sigma)$.
|
||||
|
||||
@<|StochForwardDerivs| class declaration@>=
|
||||
template <int t>
|
||||
class StochForwardDerivs : public ctraits<t>::Tgss {
|
||||
public:@;
|
||||
@<|StochForwardDerivs| constructor code@>;
|
||||
};
|
||||
|
||||
@ This is the constructor which performs the integration and the
|
||||
extrapolation. Its parameters are: |g| is the container of derivatives
|
||||
at $(\tilde y,\tilde\sigma)$; |m| are the moments of stochastic
|
||||
shocks; |ydelta| is a difference of the steady states $\bar y-\tilde
|
||||
y$; |sdelta| is a difference between new sigma and old sigma
|
||||
$\bar\sigma-\tilde\sigma$, and |at_sigma| is $\tilde\sigma$. There is
|
||||
no need of inputing the $\tilde y$.
|
||||
|
||||
We do the operation in four steps:
|
||||
\orderedlist
|
||||
\li Integrate $g^{**}$, the derivatives are at $(\tilde y,\tilde\sigma)$
|
||||
\li Form the (full symmetric) polynomial from the derivatives stacking
|
||||
$\left[\matrix{y^*\cr\sigma}\right]$
|
||||
\li Centralize this polynomial about $(\bar y,\bar\sigma)$
|
||||
\li Recover general symmetry tensors from the (full symmetric) polynomial
|
||||
\endorderedlist
|
||||
|
||||
@<|StochForwardDerivs| constructor code@>=
|
||||
StochForwardDerivs(const PartitionY& ypart, int nu,
|
||||
const _Tgss& g, const _Tm& m,
|
||||
const Vector& ydelta, double sdelta,
|
||||
double at_sigma)
|
||||
: ctraits<t>::Tgss(4)
|
||||
{
|
||||
int maxd = g.getMaxDim();
|
||||
int r = ypart.nyss();
|
||||
|
||||
@<make |g_int| be integral of $g^{**}$ at $(\tilde y,\tilde\sigma)$@>;
|
||||
@<make |g_int_sym| be full symmetric polynomial from |g_int|@>;
|
||||
@<make |g_int_cent| the centralized polynomial about $(\bar y,\bar\sigma)$@>;
|
||||
@<pull out general symmetry tensors from |g_int_cent|@>;
|
||||
}
|
||||
|
||||
@ This simply constructs |IntegDerivs| class. Note that the |nvs| of
|
||||
the tensors has zero dimensions for shocks, this is because we need to
|
||||
make easily stacks of the form $\left[\matrix{y^*\cr\sigma}\right]$ in
|
||||
the next step.
|
||||
|
||||
@<make |g_int| be integral of $g^{**}$ at $(\tilde y,\tilde\sigma)$@>=
|
||||
IntSequence nvs(4);
|
||||
nvs[0] = ypart.nys(); nvs[1] = 0; nvs[2] = 0; nvs[3] = 1;
|
||||
IntegDerivs<t> g_int(r, nvs, g, m, at_sigma);
|
||||
|
||||
@ Here we just form a polynomial whose unique variable corresponds to
|
||||
$\left[\matrix{y^*\cr\sigma}\right]$ stack.
|
||||
|
||||
@<make |g_int_sym| be full symmetric polynomial from |g_int|@>=
|
||||
_Tpol g_int_sym(r, ypart.nys()+1);
|
||||
for (int d = 1; d <= maxd; d++) {
|
||||
_Ttensym* ten = new _Ttensym(r, ypart.nys()+1, d);
|
||||
ten->zeros();
|
||||
for (int i = 0; i <= d; i++) {
|
||||
int k = d-i;
|
||||
if (g_int.check(Symmetry(i,0,0,k)))
|
||||
ten->addSubTensor(*(g_int.get(Symmetry(i,0,0,k))));
|
||||
}
|
||||
g_int_sym.insert(ten);
|
||||
}
|
||||
|
||||
@ Here we centralize the polynomial to $(\bar y,\bar\sigma)$ knowing
|
||||
that the polynomial was centralized about $(\tilde
|
||||
y,\tilde\sigma)$. This is done by derivating and evaluating the
|
||||
derivated polynomial at $(\bar y-\tilde
|
||||
y,\bar\sigma-\tilde\sigma)$. The stack of this vector is |delta| in
|
||||
the code.
|
||||
|
||||
@<make |g_int_cent| the centralized polynomial about $(\bar y,\bar\sigma)$@>=
|
||||
Vector delta(ypart.nys()+1);
|
||||
Vector dy(delta, 0, ypart.nys());
|
||||
ConstVector dy_in(ydelta, ypart.nstat, ypart.nys());
|
||||
dy = dy_in;
|
||||
delta[ypart.nys()] = sdelta;
|
||||
_Tpol g_int_cent(r, ypart.nys()+1);
|
||||
for (int d = 1; d <= maxd; d++) {
|
||||
g_int_sym.derivative(d-1);
|
||||
_Ttensym* der = g_int_sym.evalPartially(d, delta);
|
||||
g_int_cent.insert(der);
|
||||
}
|
||||
|
||||
@ Here we only recover the general symmetry derivatives from the full
|
||||
symmetric polynomial. Note that the derivative get the true |nvs|.
|
||||
|
||||
@<pull out general symmetry tensors from |g_int_cent|@>=
|
||||
IntSequence ss(4);
|
||||
ss[0]=ypart.nys(); ss[1]=0; ss[2]=0; ss[3]=1;
|
||||
IntSequence pp(4);
|
||||
pp[0]=0; pp[1]=1; pp[2]=2; pp[3]=3;
|
||||
IntSequence true_nvs(nvs);
|
||||
true_nvs[1]=nu; true_nvs[2]=nu;
|
||||
for (int d = 1; d <= maxd; d++) {
|
||||
if (g_int_cent.check(Symmetry(d))) {
|
||||
for (int i = 0; i <= d; i++) {
|
||||
Symmetry sym(i, 0, 0, d-i);
|
||||
IntSequence coor(sym, pp);
|
||||
_Ttensor* ten = new _Ttensor(*(g_int_cent.get(Symmetry(d))), ss, coor,
|
||||
TensorDimens(sym, true_nvs));
|
||||
insert(ten);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ This container corresponds to $h(g^*(y,u,\sigma),\sigma)$. Note that
|
||||
in our application, the $\sigma$ as a second argument to $h$ will be
|
||||
its fourth variable in symmetry, so we have to do four member stack
|
||||
having the second and third stack dummy.
|
||||
|
||||
@<|GXContainer| class declaration@>=
|
||||
template <class _Ttype>
|
||||
class GXContainer : public GContainer<_Ttype> {
|
||||
public:@;
|
||||
typedef StackContainerInterface<_Ttype> _Stype;
|
||||
typedef typename StackContainer<_Ttype>::_Ctype _Ctype;
|
||||
typedef typename StackContainer<_Ttype>::itype itype;
|
||||
GXContainer(const _Ctype* gs, int ngs, int nu)
|
||||
: GContainer<_Ttype>(gs, ngs, nu)@+ {}
|
||||
@<|GXContainer::getType| code@>;
|
||||
};
|
||||
|
||||
@ This routine corresponds to this stack:
|
||||
$$\left[\matrix{g^*(y,u,\sigma)\cr dummy\cr dummy\cr\sigma}\right]$$
|
||||
|
||||
@<|GXContainer::getType| code@>=
|
||||
itype getType(int i, const Symmetry& s) const
|
||||
{
|
||||
if (i == 0)
|
||||
if (s[2] > 0)
|
||||
return _Stype::zero;
|
||||
else
|
||||
return _Stype::matrix;
|
||||
if (i == 1)
|
||||
return _Stype::zero;
|
||||
if (i == 2)
|
||||
return _Stype::zero;
|
||||
if (i == 3)
|
||||
if (s == Symmetry(0,0,0,1))
|
||||
return _Stype::unit;
|
||||
else
|
||||
return _Stype::zero;
|
||||
|
||||
KORD_RAISE("Wrong stack index in GXContainer::getType");
|
||||
}
|
||||
|
||||
|
||||
@ This container corresponds to $f(H(y,u,\sigma),g(y,u,sigma),y,u)$,
|
||||
where the $H$ has the size (number of rows) as $g^{**}$. Since it is
|
||||
very simmilar to |ZContainer|, we inherit form it and override only
|
||||
|getType| method.
|
||||
|
||||
@<|ZXContainer| class declaration@>=
|
||||
template <class _Ttype>
|
||||
class ZXContainer : public ZContainer<_Ttype> {
|
||||
public:@;
|
||||
typedef StackContainerInterface<_Ttype> _Stype;
|
||||
typedef typename StackContainer<_Ttype>::_Ctype _Ctype;
|
||||
typedef typename StackContainer<_Ttype>::itype itype;
|
||||
ZXContainer(const _Ctype* gss, int ngss, const _Ctype* g, int ng, int ny, int nu)
|
||||
: ZContainer<_Ttype>(gss, ngss, g, ng, ny, nu) @+{}
|
||||
@<|ZXContainer::getType| code@>;
|
||||
};
|
||||
|
||||
@ This |getType| method corresponds to this stack:
|
||||
$$\left[\matrix{H(y,u,\sigma)\cr g(y,u,\sigma)\cr y\cr u}\right]$$
|
||||
|
||||
@<|ZXContainer::getType| code@>=
|
||||
itype getType(int i, const Symmetry& s) const
|
||||
{
|
||||
if (i == 0)
|
||||
if (s[2] > 0)
|
||||
return _Stype::zero;
|
||||
else
|
||||
return _Stype::matrix;
|
||||
if (i == 1)
|
||||
if (s[2] > 0)
|
||||
return _Stype::zero;
|
||||
else
|
||||
return _Stype::matrix;
|
||||
if (i == 2)
|
||||
if (s == Symmetry(1,0,0,0))
|
||||
return _Stype::unit;
|
||||
else
|
||||
return _Stype::zero;
|
||||
if (i == 3)
|
||||
if (s == Symmetry(0,1,0,0))
|
||||
return _Stype::unit;
|
||||
else
|
||||
return _Stype::zero;
|
||||
|
||||
KORD_RAISE("Wrong stack index in ZXContainer::getType");
|
||||
}
|
||||
|
||||
@
|
||||
@<|UnfoldedGXContainer| class declaration@>=
|
||||
class UnfoldedGXContainer : public GXContainer<UGSTensor>, public UnfoldedStackContainer {
|
||||
public:@;
|
||||
typedef TensorContainer<UGSTensor> _Ctype;
|
||||
UnfoldedGXContainer(const _Ctype* gs, int ngs, int nu)
|
||||
: GXContainer<UGSTensor>(gs, ngs, nu)@+ {}
|
||||
};
|
||||
|
||||
@
|
||||
@<|FoldedGXContainer| class declaration@>=
|
||||
class FoldedGXContainer : public GXContainer<FGSTensor>, public FoldedStackContainer {
|
||||
public:@;
|
||||
typedef TensorContainer<FGSTensor> _Ctype;
|
||||
FoldedGXContainer(const _Ctype* gs, int ngs, int nu)
|
||||
: GXContainer<FGSTensor>(gs, ngs, nu)@+ {}
|
||||
};
|
||||
|
||||
@
|
||||
@<|UnfoldedZXContainer| class declaration@>=
|
||||
class UnfoldedZXContainer : public ZXContainer<UGSTensor>, public UnfoldedStackContainer {
|
||||
public:@;
|
||||
typedef TensorContainer<UGSTensor> _Ctype;
|
||||
UnfoldedZXContainer(const _Ctype* gss, int ngss, const _Ctype* g, int ng, int ny, int nu)
|
||||
: ZXContainer<UGSTensor>(gss, ngss, g, ng, ny, nu)@+ {}
|
||||
};
|
||||
|
||||
@
|
||||
@<|FoldedZXContainer| class declaration@>=
|
||||
class FoldedZXContainer : public ZXContainer<FGSTensor>, public FoldedStackContainer {
|
||||
public:@;
|
||||
typedef TensorContainer<FGSTensor> _Ctype;
|
||||
FoldedZXContainer(const _Ctype* gss, int ngss, const _Ctype* g, int ng, int ny, int nu)
|
||||
: ZXContainer<FGSTensor>(gss, ngss, g, ng, ny, nu)@+ {}
|
||||
};
|
||||
|
||||
@ This matrix corresponds to
|
||||
$$\left[f_{y}\right]+ \left[0
|
||||
\left[f_{y^{**}_+}\right]\cdot\left[h^{**}_{y^*}\right] 0\right]$$
|
||||
This is very the same as |MatrixA|, the only difference that the
|
||||
|MatrixA| is constructed from whole $h_{y^*}$, not only from
|
||||
$h^{**}_{y^*}$, hence the new abstraction.
|
||||
|
||||
@<|MatrixAA| class declaration@>=
|
||||
class MatrixAA : public PLUMatrix {
|
||||
public:@;
|
||||
MatrixAA(const FSSparseTensor& f, const IntSequence& ss,
|
||||
const TwoDMatrix& gyss, const PartitionY& ypart);
|
||||
};
|
||||
|
||||
|
||||
@ This class calculates derivatives of $g$ given implicitly by
|
||||
$f(h(g^*(y,u,\sigma),\sigma),g(y,u,\sigma),y,u)$, where $h(y,\sigma)$
|
||||
is given from outside.
|
||||
|
||||
Structurally, the class is very similar to |KOrder|, but calculations
|
||||
are much easier. The two constructors construct an object from sparse
|
||||
derivatives of $f$, and derivatives of $h$. The caller must ensure
|
||||
that the both derivatives are done at the same point.
|
||||
|
||||
The calculation for order $k$ (including $k=1$) is done by a call
|
||||
|performStep(k)|. The derivatives can be retrived by |getFoldDers()|
|
||||
or |getUnfoldDers()|.
|
||||
|
||||
@<|KOrderStoch| class declaration@>=
|
||||
class KOrderStoch {
|
||||
protected:@;
|
||||
IntSequence nvs;
|
||||
PartitionY ypart;
|
||||
Journal& journal;
|
||||
UGSContainer _ug;
|
||||
FGSContainer _fg;
|
||||
UGSContainer _ugs;
|
||||
FGSContainer _fgs;
|
||||
UGSContainer _uG;
|
||||
FGSContainer _fG;
|
||||
const UGSContainer* _uh;
|
||||
const FGSContainer* _fh;
|
||||
UnfoldedZXContainer _uZstack;
|
||||
FoldedZXContainer _fZstack;
|
||||
UnfoldedGXContainer _uGstack;
|
||||
FoldedGXContainer _fGstack;
|
||||
const TensorContainer<FSSparseTensor>& f;
|
||||
MatrixAA matA;
|
||||
public:@;
|
||||
KOrderStoch(const PartitionY& ypart, int nu, const TensorContainer<FSSparseTensor>& fcont,
|
||||
const FGSContainer& hh, Journal& jr);
|
||||
KOrderStoch(const PartitionY& ypart, int nu, const TensorContainer<FSSparseTensor>& fcont,
|
||||
const UGSContainer& hh, Journal& jr);
|
||||
@<|KOrderStoch::performStep| templated code@>;
|
||||
const FGSContainer& getFoldDers() const
|
||||
{@+ return _fg;@+}
|
||||
const UGSContainer& getUnfoldDers() const
|
||||
{@+ return _ug;@+}
|
||||
protected:@;
|
||||
@<|KOrderStoch::faaDiBrunoZ| templated code@>;
|
||||
@<|KOrderStoch::faaDiBrunoG| templated code@>;
|
||||
@<|KOrderStoch| convenience access methods@>;
|
||||
};
|
||||
|
||||
@ This calculates a derivative of $f(G(y,u,\sigma),g(y,u,\sigma),y,u)$
|
||||
of a given symmetry.
|
||||
|
||||
@<|KOrderStoch::faaDiBrunoZ| templated code@>=
|
||||
template <int t>
|
||||
_Ttensor* faaDiBrunoZ(const Symmetry& sym) const
|
||||
{
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Faa Di Bruno ZX container for " << sym << endrec;
|
||||
_Ttensor* res = new _Ttensor(ypart.ny(), TensorDimens(sym, nvs));
|
||||
FaaDiBruno bruno(journal);
|
||||
bruno.calculate(Zstack<t>(), f, *res);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ This calculates a derivative of
|
||||
$G(y,u,\sigma)=h(g^*(y,u,\sigma),\sigma)$ of a given symmetry.
|
||||
|
||||
@<|KOrderStoch::faaDiBrunoG| templated code@>=
|
||||
template <int t>
|
||||
_Ttensor* faaDiBrunoG(const Symmetry& sym) const
|
||||
{
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Faa Di Bruno GX container for " << sym << endrec;
|
||||
TensorDimens tdims(sym, nvs);
|
||||
_Ttensor* res = new _Ttensor(ypart.nyss(), tdims);
|
||||
FaaDiBruno bruno(journal);
|
||||
bruno.calculate(Gstack<t>(), h<t>(), *res);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ This retrives all $g$ derivatives of a given dimension from implicit
|
||||
$f(h(g^*(y,u,\sigma),\sigma),g(y,u,\sigma),y,u)$. It suppose that all
|
||||
derivatives of smaller dimensions have been retrieved.
|
||||
|
||||
So, we go through all symmetries $s$, calculate $G_s$ conditional on
|
||||
$g_s=0$, insert the derivative to the $G$ container, then calculate
|
||||
$F_s$ conditional on $g_s=0$. This is a righthand side. The left hand
|
||||
side is $matA\cdot g_s$. The $g_s$ is retrieved as
|
||||
$$g_s=-matA^{-1}\cdot RHS.$$ Finally we have to update $G_s$ by
|
||||
calling |Gstack<t>().multAndAdd(1, h<t>(), *G_sym)|.
|
||||
|
||||
@<|KOrderStoch::performStep| templated code@>=
|
||||
template <int t>
|
||||
void performStep(int order)
|
||||
{
|
||||
int maxd = g<t>().getMaxDim();
|
||||
KORD_RAISE_IF(order-1 != maxd && (order != 1 || maxd != -1),
|
||||
"Wrong order for KOrderStoch::performStep");
|
||||
SymmetrySet ss(order, 4);
|
||||
for (symiterator si(ss); !si.isEnd(); ++si) {
|
||||
if ((*si)[2] == 0) {
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Recovering symmetry " << *si << endrec;
|
||||
|
||||
_Ttensor* G_sym = faaDiBrunoG<t>(*si);
|
||||
G<t>().insert(G_sym);
|
||||
|
||||
_Ttensor* g_sym = faaDiBrunoZ<t>(*si);
|
||||
g_sym->mult(-1.0);
|
||||
matA.multInv(*g_sym);
|
||||
g<t>().insert(g_sym);
|
||||
gs<t>().insert(new _Ttensor(ypart.nstat, ypart.nys(), *g_sym));
|
||||
|
||||
Gstack<t>().multAndAdd(1, h<t>(), *G_sym);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|KOrderStoch| convenience access methods@>=
|
||||
template<int t> _Tg& g();
|
||||
template<int t> const _Tg& g() const;
|
||||
template<int t> _Tgs& gs();
|
||||
template<int t> const _Tgs& gs() const;
|
||||
template<int t> const _Tgss& h() const;
|
||||
template<int t> _TG& G();
|
||||
template<int t> const _TG& G() const;
|
||||
template<int t> _TZXstack& Zstack();
|
||||
template<int t> const _TZXstack& Zstack() const;
|
||||
template<int t> _TGXstack& Gstack();
|
||||
template<int t> const _TGXstack& Gstack() const;
|
||||
|
||||
|
||||
@ End of {\tt korder\_stoch.h} file.
|
|
@ -0,0 +1,66 @@
|
|||
@q $Id: main.web 2333 2009-01-14 10:32:55Z kamenik $ @>
|
||||
@q Copyright 2004, Ondra Kamenik @>
|
||||
|
||||
\let\ifpdf\relax
|
||||
\input eplain
|
||||
\def\title{{\mainfont Dynare++}}
|
||||
|
||||
|
||||
@i c++lib.w
|
||||
@s const_reverse_iterator int
|
||||
@s value_type int
|
||||
|
||||
\titletrue
|
||||
\null\vfill
|
||||
\centerline{\titlefont Dynare++ DSGE solver}
|
||||
\vskip\baselineskip
|
||||
\centerline{\vtop{\hsize=10cm\leftskip=0pt plus 1fil
|
||||
\rightskip=0pt plus 1fil\noindent
|
||||
solves higher order approximation to a decision rule of a Dynamic Stochastic
|
||||
General Equilibrium model about deterministic and stochastic fix point}}
|
||||
\vfill\vfill
|
||||
Copyright \copyright\ 2004, 2005, 2006, 2007, 2008, 2009 by Ondra Kamenik
|
||||
|
||||
@*1 Utilities.
|
||||
@i kord_exception.hweb
|
||||
|
||||
@i journal.hweb
|
||||
@i journal.cweb
|
||||
|
||||
@i normal_conjugate.hweb
|
||||
@i normal_conjugate.cweb
|
||||
|
||||
@i random.hweb
|
||||
@i random.cweb
|
||||
|
||||
@i mersenne_twister.hweb
|
||||
|
||||
@i faa_di_bruno.hweb
|
||||
@i faa_di_bruno.cweb
|
||||
|
||||
@*1 Retrieving derivatives.
|
||||
|
||||
@i first_order.hweb
|
||||
@i first_order.cweb
|
||||
|
||||
@i korder.hweb
|
||||
@i korder.cweb
|
||||
|
||||
@i korder_stoch.hweb
|
||||
@i korder_stoch.cweb
|
||||
|
||||
@*1 Putting all together.
|
||||
|
||||
@i dynamic_model.hweb
|
||||
@i dynamic_model.cweb
|
||||
|
||||
@i approximation.hweb
|
||||
@i approximation.cweb
|
||||
|
||||
@i decision_rule.hweb
|
||||
@i decision_rule.cweb
|
||||
|
||||
@i global_check.hweb
|
||||
@i global_check.cweb
|
||||
|
||||
@*1 Index.
|
|
@ -0,0 +1,141 @@
|
|||
@q $Id: mersenne_twister.hweb 1490 2007-12-19 14:29:46Z kamenik $ @>
|
||||
@q Copyright 2007, Ondra Kamenik @>
|
||||
|
||||
@*2 Mersenne Twister PRNG. Start of {\tt mersenne\_twister.h} file.
|
||||
|
||||
This file provides a class for generating random numbers with
|
||||
encapsulated state. It is based on the work of Makoto Matsumoto and
|
||||
Takuji Nishimura, implementation inspired by code of Richard Wagner
|
||||
and Geoff Kuenning.
|
||||
|
||||
@s uint32 int
|
||||
@s MersenneTwister int
|
||||
|
||||
@c
|
||||
#ifndef MERSENNE_TWISTER_H
|
||||
#define MERSENNE_TWISTER_H
|
||||
|
||||
#include "random.h"
|
||||
#include <string.h>
|
||||
|
||||
@<|MersenneTwister| class declaration@>;
|
||||
@<|MersenneTwister| inline method definitions@>;
|
||||
|
||||
#endif
|
||||
|
||||
@
|
||||
@<|MersenneTwister| class declaration@>=
|
||||
class MersenneTwister : public RandomGenerator {
|
||||
protected:@;
|
||||
typedef unsigned int uint32;
|
||||
enum {STATE_SIZE = 624};
|
||||
enum {RECUR_OFFSET = 397};
|
||||
uint32 statevec[STATE_SIZE];
|
||||
int stateptr;
|
||||
public:@;
|
||||
MersenneTwister(uint32 iseed);
|
||||
MersenneTwister(const MersenneTwister& mt);
|
||||
virtual ~MersenneTwister() {}
|
||||
uint32 lrand();
|
||||
double drand();
|
||||
double uniform()
|
||||
{@+return drand();@+}
|
||||
protected:@;
|
||||
void seed(uint32 iseed);
|
||||
void refresh();
|
||||
private:@;
|
||||
@<|MersenneTwister| static inline methods@>;
|
||||
};
|
||||
|
||||
@
|
||||
@<|MersenneTwister| static inline methods@>=
|
||||
static uint32 hibit(uint32 u)
|
||||
{return u & 0x80000000UL;}
|
||||
static uint32 lobit(uint32 u)
|
||||
{return u & 0x00000001UL;}
|
||||
static uint32 lobits(uint32 u)
|
||||
{return u & 0x7fffffffUL;}
|
||||
static uint32 mixbits(uint32 u, uint32 v)
|
||||
{return hibit(u) | lobits(v);}
|
||||
static uint32 twist(uint32 m, uint32 s0, uint32 s1)
|
||||
{return m ^ (mixbits(s0,s1)>>1) ^ (-lobit(s1) & 0x9908b0dfUL);}
|
||||
|
||||
|
||||
@
|
||||
@<|MersenneTwister| inline method definitions@>=
|
||||
@<|MersenneTwister| constructor code@>;
|
||||
@<|MersenneTwister| copy constructor code@>;
|
||||
@<|MersenneTwister::lrand| code@>;
|
||||
@<|MersenneTwister::drand| code@>;
|
||||
@<|MersenneTwister::seed| code@>;
|
||||
@<|MersenneTwister::refresh| code@>;
|
||||
|
||||
@
|
||||
@<|MersenneTwister| constructor code@>=
|
||||
inline MersenneTwister::MersenneTwister(uint32 iseed)
|
||||
{
|
||||
seed(iseed);
|
||||
}
|
||||
|
||||
@
|
||||
@<|MersenneTwister| copy constructor code@>=
|
||||
inline MersenneTwister::MersenneTwister(const MersenneTwister& mt)
|
||||
: stateptr(mt.stateptr)
|
||||
{
|
||||
memcpy(statevec, mt.statevec, sizeof(uint32)*STATE_SIZE);
|
||||
}
|
||||
|
||||
@
|
||||
@<|MersenneTwister::lrand| code@>=
|
||||
inline MersenneTwister::uint32 MersenneTwister::lrand()
|
||||
{
|
||||
if (stateptr >= STATE_SIZE)
|
||||
refresh();
|
||||
|
||||
register uint32 v = statevec[stateptr++];
|
||||
v ^= v >> 11;
|
||||
v ^= (v << 7) & 0x9d2c5680;
|
||||
v ^= (v << 15) & 0xefc60000;
|
||||
return (v ^ (v >> 18));
|
||||
}
|
||||
|
||||
@
|
||||
@<|MersenneTwister::drand| code@>=
|
||||
inline double MersenneTwister::drand()
|
||||
{
|
||||
uint32 a = lrand() >> 5;
|
||||
uint32 b = lrand() >> 6;
|
||||
return (a*67108864.0+b) * (1.0/9007199254740992.0);
|
||||
}
|
||||
|
||||
@ PRNG of D. Knuth
|
||||
@<|MersenneTwister::seed| code@>=
|
||||
inline void MersenneTwister::seed(uint32 iseed)
|
||||
{
|
||||
statevec[0] = iseed & 0xffffffffUL;
|
||||
for (int i = 1; i < STATE_SIZE; i++) {
|
||||
register uint32 val = statevec[i-1] >> 30;
|
||||
val ^= statevec[i-1];
|
||||
val *= 1812433253ul;
|
||||
val += i;
|
||||
statevec[i] = val & 0xffffffffUL;
|
||||
}
|
||||
|
||||
refresh();
|
||||
}
|
||||
|
||||
@
|
||||
@<|MersenneTwister::refresh| code@>=
|
||||
inline void MersenneTwister::refresh()
|
||||
{
|
||||
register uint32* p = statevec;
|
||||
for (int i = STATE_SIZE-RECUR_OFFSET; i--; ++p)
|
||||
*p = twist(p[RECUR_OFFSET], p[0], p[1]);
|
||||
for (int i = RECUR_OFFSET; --i; ++p)
|
||||
*p = twist(p[RECUR_OFFSET-STATE_SIZE], p[0], p[1]);
|
||||
*p = twist(p[RECUR_OFFSET-STATE_SIZE], p[0], statevec[0]);
|
||||
|
||||
stateptr = 0;
|
||||
}
|
||||
|
||||
@ End of {\tt mersenne\_twister.h} file.
|
|
@ -0,0 +1,123 @@
|
|||
@q $Id$ @>
|
||||
@q Copyright 2007, Ondra Kamenik @>
|
||||
|
||||
@ Start of {\tt normal\_conjugate.cpp} file.
|
||||
|
||||
@c
|
||||
|
||||
#include "normal_conjugate.h"
|
||||
#include "kord_exception.h"
|
||||
|
||||
@<|NormalConj| diffuse prior constructor@>;
|
||||
@<|NormalConj| data update constructor@>;
|
||||
@<|NormalConj| copy constructor@>;
|
||||
@<|NormalConj::update| one observation code@>;
|
||||
@<|NormalConj::update| multiple observations code@>;
|
||||
@<|NormalConj::update| with |NormalConj| code@>;
|
||||
@<|NormalConj::getVariance| code@>;
|
||||
|
||||
@
|
||||
@<|NormalConj| diffuse prior constructor@>=
|
||||
NormalConj::NormalConj(int d)
|
||||
: mu(d), kappa(0), nu(-1), lambda(d,d)
|
||||
{
|
||||
mu.zeros();
|
||||
lambda.zeros();
|
||||
}
|
||||
|
||||
@
|
||||
@<|NormalConj| data update constructor@>=
|
||||
NormalConj::NormalConj(const ConstTwoDMatrix& ydata)
|
||||
: mu(ydata.numRows()), kappa(ydata.numCols()), nu(ydata.numCols()-1),
|
||||
lambda(ydata.numRows(), ydata.numRows())
|
||||
{
|
||||
mu.zeros();
|
||||
for (int i = 0; i < ydata.numCols(); i++)
|
||||
mu.add(1.0/ydata.numCols(), ConstVector(ydata, i));
|
||||
|
||||
lambda.zeros();
|
||||
for (int i = 0; i < ydata.numCols(); i++) {
|
||||
Vector diff(ConstVector(ydata, i));
|
||||
diff.add(-1, mu);
|
||||
lambda.addOuter(diff);
|
||||
}
|
||||
}
|
||||
|
||||
@
|
||||
@<|NormalConj| copy constructor@>=
|
||||
NormalConj::NormalConj(const NormalConj& nc)
|
||||
: mu(nc.mu), kappa(nc.kappa), nu(nc.nu), lambda(nc.lambda)
|
||||
{
|
||||
}
|
||||
|
||||
@ The method performs the following:
|
||||
$$\eqalign{
|
||||
\mu_1 = &\; {\kappa_0\over \kappa_0+1}\mu_0 + {1\over \kappa_0+1}y\cr
|
||||
\kappa_1 = &\; \kappa_0 + 1\cr
|
||||
\nu_1 = &\; \nu_0 + 1\cr
|
||||
\Lambda_1 = &\; \Lambda_0 + {\kappa_0\over\kappa_0+1}(y-\mu_0)(y-\mu_0)^T,
|
||||
}$$
|
||||
|
||||
@<|NormalConj::update| one observation code@>=
|
||||
void NormalConj::update(const ConstVector& y)
|
||||
{
|
||||
KORD_RAISE_IF(y.length() != mu.length(),
|
||||
"Wrong length of a vector in NormalConj::update");
|
||||
|
||||
mu.mult(kappa/(1.0+kappa));
|
||||
mu.add(1.0/(1.0+kappa), y);
|
||||
|
||||
Vector diff(y);
|
||||
diff.add(-1, mu);
|
||||
lambda.addOuter(diff, kappa/(1.0+kappa));
|
||||
|
||||
kappa++;
|
||||
nu++;
|
||||
}
|
||||
|
||||
@ The method evaluates the formula in the header file.
|
||||
|
||||
@<|NormalConj::update| multiple observations code@>=
|
||||
void NormalConj::update(const ConstTwoDMatrix& ydata)
|
||||
{
|
||||
NormalConj nc(ydata);
|
||||
update(nc);
|
||||
}
|
||||
|
||||
|
||||
@
|
||||
@<|NormalConj::update| with |NormalConj| code@>=
|
||||
void NormalConj::update(const NormalConj& nc)
|
||||
{
|
||||
double wold = ((double)kappa)/(kappa+nc.kappa);
|
||||
double wnew = 1-wold;
|
||||
|
||||
mu.mult(wold);
|
||||
mu.add(wnew, nc.mu);
|
||||
|
||||
Vector diff(nc.mu);
|
||||
diff.add(-1, mu);
|
||||
lambda.add(1.0, nc.lambda);
|
||||
lambda.addOuter(diff);
|
||||
|
||||
kappa = kappa + nc.kappa;
|
||||
nu = nu + nc.kappa;
|
||||
}
|
||||
|
||||
|
||||
@ This returns ${1\over \nu-d-1}\Lambda$, which is the mean of the
|
||||
variance in the posterior distribution. If the number of degrees of
|
||||
freedom is less than $d$, then NaNs are returned.
|
||||
|
||||
@<|NormalConj::getVariance| code@>=
|
||||
void NormalConj::getVariance(TwoDMatrix& v) const
|
||||
{
|
||||
if (nu > getDim()+1) {
|
||||
v = (const TwoDMatrix&)lambda;
|
||||
v.mult(1.0/(nu-getDim()-1));
|
||||
} else
|
||||
v.nans();
|
||||
}
|
||||
|
||||
|
||||
@ End of {\tt normal\_conjugate.cpp} file.
|
|
@ -0,0 +1,82 @@
|
|||
@q $Id$ @>
|
||||
@q Copyright 2007, Ondra Kamenik @>
|
||||
|
||||
@*2 Conjugate family for normal distribution. Start of {\tt
|
||||
normal\_conjugate.h} file.
|
||||
|
||||
The main purpose here is to implement a class representing conjugate
|
||||
distributions for mean and variance of the normal distribution. The
|
||||
class has two main methods: the first one is to update itself with
|
||||
respect to one observation, the second one is to update itself with
|
||||
respect to anothe object of the class. In the both methods, the
|
||||
previous state of the class corresponds to the prior distribution, and
|
||||
the final state corresponds to the posterior distribution.
|
||||
|
||||
The algrebra can be found in Gelman, Carlin, Stern, Rubin (p.87). It
|
||||
goes as follows: Prior conjugate distribution takes the following form:
|
||||
$$\eqalign{
|
||||
\Sigma \sim& {\rm InvWishart}_{\nu_0}(\Lambda_0^{-1}) \cr
|
||||
\mu\vert\Sigma \sim& N(\mu_0,\Sigma/\kappa_0)
|
||||
}$$
|
||||
If the observations are $y_1\ldots y_n$, then the posterior distribution has the
|
||||
same form with the following parameters:
|
||||
$$\eqalign{
|
||||
\mu_n = &\; {\kappa_0\over \kappa_0+n}\mu_0 + {n\over \kappa_0+n}\bar y\cr
|
||||
\kappa_n = &\; \kappa_0 + n\cr
|
||||
\nu_n = &\; \nu_0 + n\cr
|
||||
\Lambda_n = &\; \Lambda_0 + S + {\kappa_0 n\over\kappa_0+n}(\bar y-\mu_0)(\bar y-\mu_0)^T,
|
||||
}$$
|
||||
where
|
||||
$$\eqalign{
|
||||
\bar y = &\; {1\over n}\sum_{i=1}^ny_i\cr
|
||||
S = &\; \sum_{i=1}^n(y_i-\bar y)(y_i-\bar y)^T
|
||||
}$$
|
||||
|
||||
@s NormalConj int
|
||||
|
||||
@c
|
||||
#ifndef NORMAL_CONJUGATE_H
|
||||
#define NORMAL_CONJUGATE_H
|
||||
|
||||
#include "twod_matrix.h"
|
||||
|
||||
@<|NormalConj| class declaration@>;
|
||||
|
||||
#endif
|
||||
|
||||
@ The class is described by the four parameters: $\mu$, $\kappa$, $\nu$ and $\Lambda$.
|
||||
|
||||
@<|NormalConj| class declaration@>=
|
||||
class NormalConj {
|
||||
protected:@;
|
||||
Vector mu;
|
||||
int kappa;
|
||||
int nu;
|
||||
TwoDMatrix lambda;
|
||||
public:@;
|
||||
@<|NormalConj| constructors@>;
|
||||
virtual ~NormalConj() @+{}
|
||||
void update(const ConstVector& y);
|
||||
void update(const ConstTwoDMatrix& ydata);
|
||||
void update(const NormalConj& nc);
|
||||
int getDim() const
|
||||
{@+ return mu.length();@+}
|
||||
const Vector& getMean() const
|
||||
{@+ return mu;@+}
|
||||
void getVariance(TwoDMatrix& v) const;
|
||||
};
|
||||
|
||||
@ We provide the following constructors: The first constructs diffuse
|
||||
(Jeffrey's) prior. It sets $\kappa$, and $\Lambda$ to zeros, $nu$ to
|
||||
$-1$ and also the mean $\mu$ to zero (it should not be
|
||||
referenced). The second constructs the posterior using the diffuse
|
||||
prior and the observed data (columnwise). The third is a copy
|
||||
constructor.
|
||||
|
||||
@<|NormalConj| constructors@>=
|
||||
NormalConj(int d);
|
||||
NormalConj(const ConstTwoDMatrix& ydata);
|
||||
NormalConj(const NormalConj& nc);
|
||||
|
||||
|
||||
@ End of {\tt normal\_conjugate.h} file.
|
|
@ -0,0 +1,63 @@
|
|||
@q $Id: random.cweb 1491 2007-12-19 14:36:53Z kamenik $ @>
|
||||
@q Copyright 2007, Ondra Kamenik @>
|
||||
|
||||
@ Start of {\tt random.cpp} file.
|
||||
@c
|
||||
|
||||
#include "random.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <limits>
|
||||
#include <cmath>
|
||||
|
||||
@<|RandomGenerator::int_uniform| code@>;
|
||||
@<|RandomGenerator::normal| code@>;
|
||||
SystemRandomGenerator system_random_generator;
|
||||
@<|SystemRandomGenerator::uniform| code@>;
|
||||
@<|SystemRandomGenerator::initSeed| code@>;
|
||||
|
||||
@
|
||||
@<|RandomGenerator::int_uniform| code@>=
|
||||
int RandomGenerator::int_uniform()
|
||||
{
|
||||
double s = std::numeric_limits<int>::max()*uniform();
|
||||
return (int)s;
|
||||
}
|
||||
|
||||
@ This implements Marsaglia Polar Method.
|
||||
@<|RandomGenerator::normal| code@>=
|
||||
double RandomGenerator::normal()
|
||||
{
|
||||
double x1, x2;
|
||||
double w;
|
||||
do {
|
||||
x1 = 2*uniform()-1;
|
||||
x2 = 2*uniform()-1;
|
||||
w = x1*x1 + x2*x2;
|
||||
} while (w >= 1.0 || w < 1.0e-30);
|
||||
return x1*std::sqrt((-2.0*std::log(w))/w);
|
||||
}
|
||||
|
||||
@
|
||||
@<|SystemRandomGenerator::uniform| code@>=
|
||||
double SystemRandomGenerator::uniform()
|
||||
{
|
||||
#ifndef __MINGW32__
|
||||
return drand48();
|
||||
#else
|
||||
return ((double)rand())/RAND_MAX;
|
||||
#endif
|
||||
}
|
||||
|
||||
@
|
||||
@<|SystemRandomGenerator::initSeed| code@>=
|
||||
void SystemRandomGenerator::initSeed(int seed)
|
||||
{
|
||||
#ifndef __MINGW32__
|
||||
srand48(seed);
|
||||
#else
|
||||
srand(seed);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ End of {\tt random.cpp} file.
|
|
@ -0,0 +1,39 @@
|
|||
@q $Id: random.hweb 2335 2009-01-14 10:35:21Z kamenik $ @>
|
||||
@q Copyright 2007, Ondra Kamenik @>
|
||||
|
||||
@*2 Random number generation. Start of {\tt random.h} file.
|
||||
|
||||
@s RandomGenerator int
|
||||
@s SystemRandomGenerator int
|
||||
|
||||
@c
|
||||
#ifndef RANDOM_H
|
||||
#define RANDOM_H
|
||||
|
||||
@<|RandomGenerator| class declaration@>;
|
||||
@<|SystemRandomGenerator| class declaration@>;
|
||||
extern SystemRandomGenerator system_random_generator;
|
||||
|
||||
#endif
|
||||
|
||||
@ This is a general interface to an object able to generate random
|
||||
numbers. Subclass needs to implement |uniform| method, other is, by
|
||||
default, implemented here.
|
||||
@<|RandomGenerator| class declaration@>=
|
||||
class RandomGenerator {
|
||||
public:@;
|
||||
virtual double uniform() = 0;
|
||||
int int_uniform();
|
||||
double normal();
|
||||
};
|
||||
|
||||
@ This implements |RandomGenerator| interface with system |drand| or
|
||||
|rand|. It is not thread aware.
|
||||
@<|SystemRandomGenerator| class declaration@>=
|
||||
class SystemRandomGenerator : public RandomGenerator {
|
||||
public:@;
|
||||
double uniform();
|
||||
void initSeed(int seed);
|
||||
};
|
||||
|
||||
@ End of {\tt random.h} file.
|
|
@ -0,0 +1,413 @@
|
|||
/* $Id: tests.cpp 148 2005-04-19 15:12:26Z kamenik $ */
|
||||
/* Copyright 2004, Ondra Kamenik */
|
||||
|
||||
#include "korder.h"
|
||||
#include "SylvException.h"
|
||||
|
||||
struct Rand {
|
||||
static void init(int n1, int n2, int n3, int n4, int n5);
|
||||
static double get(double m);
|
||||
static int get(int m);
|
||||
static bool discrete(double prob); // answers true with given probability
|
||||
};
|
||||
|
||||
void Rand::init(int n1, int n2, int n3, int n4, int n5)
|
||||
{
|
||||
long int seed = n1;
|
||||
seed = 256*seed+n2;
|
||||
seed = 256*seed+n3;
|
||||
seed = 256*seed+n4;
|
||||
seed = 256*seed+n5;
|
||||
srand48(seed);
|
||||
}
|
||||
|
||||
double Rand::get(double m)
|
||||
{
|
||||
return 2*m*(drand48()-0.5);
|
||||
}
|
||||
|
||||
int Rand::get(int m)
|
||||
{
|
||||
return (int)(Rand::get(0.9999*m));
|
||||
}
|
||||
|
||||
bool Rand::discrete(double prob)
|
||||
{
|
||||
return drand48() < prob;
|
||||
}
|
||||
|
||||
struct SparseGenerator {
|
||||
static FSSparseTensor* makeTensor(int dim, int nv, int r,
|
||||
double fill, double m);
|
||||
static void fillContainer(TensorContainer<FSSparseTensor>& c,
|
||||
int maxdim, int nv, int r, double m);
|
||||
};
|
||||
|
||||
FSSparseTensor* SparseGenerator::makeTensor(int dim, int nv, int r,
|
||||
double fill, double m)
|
||||
{
|
||||
FSSparseTensor* res = new FSSparseTensor(dim, nv, r);
|
||||
FFSTensor dummy(0, nv, dim);
|
||||
for (Tensor::index fi = dummy.begin(); fi != dummy.end(); ++fi) {
|
||||
for (int i = 0; i < r; i++) {
|
||||
if (Rand::discrete(fill)) {
|
||||
double x = Rand::get(m);
|
||||
res->insert(fi.getCoor(), i, x);
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void SparseGenerator::fillContainer(TensorContainer<FSSparseTensor>& c,
|
||||
int maxdim, int nv, int r,
|
||||
double m)
|
||||
{
|
||||
Rand::init(maxdim, nv, r, (int)(5*m), 0);
|
||||
double fill = 0.5;
|
||||
for (int d = 1; d <= maxdim; d++) {
|
||||
c.insert(makeTensor(d,nv,r,fill,m));
|
||||
fill *= 0.3;
|
||||
}
|
||||
}
|
||||
|
||||
const double vdata [] = { // 3x3
|
||||
0.1307870268, 0.1241940078, 0.1356703123,
|
||||
0.1241940078, 0.1986920419, 0.2010160581,
|
||||
0.1356703123, 0.2010160581, 0.2160336975
|
||||
};
|
||||
|
||||
|
||||
const double gy_data [] = { // 8x4
|
||||
0.3985178619, -0.5688233582, 0.9572900437, -0.6606847776, 0.1453004017,
|
||||
0.3025310675, -0.8627437750, -0.6903410191, 0.4751910580, -0.7270018589,
|
||||
-0.0939612498, -0.1463831989, 0.6742110220, 0.6046671043, 0.5215893126,
|
||||
-1.0412969986, -0.3524898417, -1.0986703430, 0.8006531522, 0.8879776376,
|
||||
-0.1037608317, -0.5587378073, -0.1010366945, 0.9462411248, -0.2439199881,
|
||||
1.3420621236, -0.7820285935, 0.3205293447, 0.3606124791, 0.2975422208,
|
||||
-0.5452861965, 1.6320340279
|
||||
};
|
||||
|
||||
const double gu_data [] = { // just some numbers, no structure
|
||||
1.8415286914, -0.2638743845, 1.7690713274, 0.9668585956, 0.2303143646,
|
||||
-0.2229624279, -0.4381991822, 1.0082401405, -0.3186555860, -0.0624691529,
|
||||
-0.5189085756, 1.4269672156, 0.1163282969, 1.4020183445, -0.0952660426,
|
||||
0.2099097124, 0.6912400502, -0.5180935114, 0.5288316624, 0.2188053448,
|
||||
0.5715516767, 0.7813893410, -0.6385073106, 0.8335131513, 0.3605202168,
|
||||
-1.1167944865, -1.2263750934, 0.6113636081, 0.6964915482, -0.6451217688,
|
||||
0.4062810500, -2.0552251116, -1.6383406284, 0.0198915095, 0.0111014458,
|
||||
-1.2421792262, -1.0724161722, -0.4276904972, 0.1801494950, -2.0716473264
|
||||
};
|
||||
|
||||
const double vdata2 [] = { // 10x10 positive definite
|
||||
0.79666, -0.15536, 0.05667, -0.21026, 0.20262, 0.28505, 0.60341, -0.09703, 0.32363, 0.13299,
|
||||
-0.15536, 0.64380, -0.01131, 0.00980, 0.03755, 0.43791, 0.21784, -0.31755, -0.55911, -0.29655,
|
||||
0.05667, -0.01131, 0.56165, -0.34357, -0.40584, 0.20990, 0.28348, 0.20398, -0.19856, 0.35820,
|
||||
-0.21026, 0.00980, -0.34357, 0.56147, 0.10972, -0.34146, -0.49906, -0.19685, 0.21088, -0.31560,
|
||||
0.20262, 0.03755, -0.40584, 0.10972, 0.72278, 0.02155, 0.04089, -0.19696, 0.03446, -0.12919,
|
||||
0.28505, 0.43791, 0.20990, -0.34146, 0.02155, 0.75867, 0.77699, -0.31125, -0.55141, -0.02155,
|
||||
0.60341, 0.21784, 0.28348, -0.49906, 0.04089, 0.77699, 1.34553, -0.18613, -0.25811, -0.19016,
|
||||
-0.09703, -0.31755, 0.20398, -0.19685, -0.19696, -0.31125, -0.18613, 0.59470, 0.08386, 0.41750,
|
||||
0.32363, -0.55911, -0.19856, 0.21088, 0.03446, -0.55141, -0.25811, 0.08386, 0.98917, -0.12992,
|
||||
0.13299, -0.29655, 0.35820, -0.31560, -0.12919, -0.02155, -0.19016, 0.41750, -0.12992, 0.89608
|
||||
};
|
||||
|
||||
const double gy_data2 [] = { // 600 items make gy 30x20, whose gy(6:25,:) has spectrum within unit
|
||||
0.39414, -0.29766, 0.08948, -0.19204, -0.00750, 0.21159, 0.05494, 0.06225, 0.01771, 0.21913,
|
||||
-0.01373, 0.20086, -0.06086, -0.10955, 0.14424, -0.08390, 0.03948, -0.14713, 0.11674, 0.05091,
|
||||
0.24039, 0.28307, -0.11835, 0.13030, 0.11682, -0.27444, -0.19311, -0.16654, 0.12867, 0.25116,
|
||||
-0.19781, 0.45242, -0.15862, 0.24428, -0.11966, 0.11483, -0.32279, 0.29727, 0.20934, -0.18190,
|
||||
-0.15080, -0.09477, -0.30551, -0.02672, -0.26919, 0.11165, -0.06390, 0.03449, -0.26622, 0.22197,
|
||||
0.45141, -0.41683, 0.09760, 0.31094, -0.01652, 0.05809, -0.04514, -0.05645, 0.00554, 0.47980,
|
||||
0.11726, 0.42459, -0.13136, -0.30902, -0.14648, 0.11455, 0.02947, -0.03835, -0.04044, 0.03559,
|
||||
-0.26575, -0.01783, 0.31243, -0.14412, -0.13218, -0.05080, 0.18576, 0.13840, -0.05560, 0.35530,
|
||||
-0.25573, -0.11560, 0.15187, -0.18431, 0.08193, -0.32278, 0.17560, -0.05529, -0.10020, -0.23088,
|
||||
-0.20979, -0.49245, 0.09915, -0.16909, -0.03443, 0.19497, 0.18473, 0.25662, 0.29605, -0.20531,
|
||||
-0.39244, -0.43369, 0.05588, 0.24823, -0.14236, -0.08311, 0.16371, -0.19975, 0.30605, -0.17087,
|
||||
-0.01270, 0.00123, -0.22426, -0.13810, 0.05079, 0.06971, 0.01922, -0.09952, -0.23177, -0.41962,
|
||||
-0.41991, 0.41430, -0.04247, -0.13706, -0.12048, -0.28906, -0.22813, -0.25057, -0.18579, -0.20642,
|
||||
-0.47976, 0.25490, -0.05138, -0.30794, 0.31651, 0.02034, 0.12954, -0.20110, 0.13336, -0.40775,
|
||||
-0.30195, -0.13704, 0.12396, 0.28152, 0.02986, 0.27669, 0.24623, 0.08635, -0.11956, -0.02949,
|
||||
0.37401, 0.20838, 0.24801, -0.26872, 0.11195, 0.00315, -0.19069, 0.12839, -0.23036, -0.48228,
|
||||
0.08434, -0.39872, -0.28896, -0.28754, 0.24668, 0.23285, 0.25437, 0.10456, -0.14124, 0.20483,
|
||||
-0.19117, -0.33836, -0.24875, 0.08207, -0.03930, 0.20364, 0.15384, -0.15270, 0.24372, -0.11199,
|
||||
-0.46591, 0.30319, 0.05745, 0.09084, 0.06058, 0.31884, 0.05071, -0.28899, -0.30793, -0.03566,
|
||||
0.02286, 0.28178, 0.00736, -0.31378, -0.18144, -0.22346, -0.27239, 0.31043, -0.26228, 0.22181,
|
||||
-0.15096, -0.36953, -0.06032, 0.21496, 0.29545, -0.13112, 0.16420, -0.07573, -0.43111, -0.43057,
|
||||
0.26716, -0.31209, -0.05866, -0.29101, -0.27437, -0.18727, 0.28732, -0.19014, 0.08837, 0.30405,
|
||||
0.06103, -0.35612, 0.00173, 0.25134, -0.08987, -0.22766, -0.03254, -0.18662, -0.08491, 0.49401,
|
||||
-0.12145, -0.02961, -0.03668, -0.30043, -0.08555, 0.01701, -0.12544, 0.10969, -0.48202, 0.07245,
|
||||
0.20673, 0.11408, 0.04343, -0.01815, -0.31594, -0.23632, -0.06258, -0.27474, 0.12180, 0.16613,
|
||||
-0.37931, 0.30219, 0.15765, 0.25489, 0.17529, -0.17020, -0.30060, 0.22058, -0.02450, -0.42143,
|
||||
0.49642, 0.46899, -0.28552, -0.22549, -0.01333, 0.21567, 0.22251, 0.21639, -0.19194, -0.19140,
|
||||
-0.24106, 0.10952, -0.11019, 0.29763, -0.02039, -0.25748, 0.23169, 0.01357, 0.09802, -0.19022,
|
||||
0.37604, -0.40777, 0.18131, -0.10258, 0.29573, -0.31773, 0.09069, -0.02198, -0.26594, 0.48302,
|
||||
-0.10041, 0.20210, -0.05609, -0.01169, -0.17339, 0.17862, -0.22502, 0.29009, -0.45160, 0.19771,
|
||||
0.27634, 0.31695, -0.09993, 0.17167, 0.12394, 0.28088, -0.12502, -0.16967, -0.06296, -0.17036,
|
||||
0.27320, 0.01595, 0.16955, 0.30146, -0.15173, -0.29807, 0.08178, -0.06811, 0.21655, 0.26348,
|
||||
0.06316, 0.45661, -0.29756, -0.05742, -0.14715, -0.03037, -0.16656, -0.08768, 0.38078, 0.40679,
|
||||
-0.32779, -0.09106, 0.16107, -0.07301, 0.07700, -0.22694, -0.15692, -0.02548, 0.38749, -0.12203,
|
||||
-0.02980, -0.22067, 0.00680, -0.23058, -0.29112, 0.23032, -0.16026, 0.23392, -0.09990, 0.03628,
|
||||
-0.42592, -0.33474, -0.09499, -0.17442, -0.20110, 0.24618, -0.06418, -0.06715, 0.40754, 0.29377,
|
||||
0.29543, -0.16832, -0.08468, 0.06491, -0.01410, 0.19988, 0.24950, 0.14626, -0.27851, 0.06079,
|
||||
0.48134, -0.13475, 0.25398, 0.11738, 0.23369, -0.00661, -0.16811, -0.04557, -0.12030, -0.39527,
|
||||
-0.35760, 0.01840, -0.15941, 0.03290, 0.09988, -0.08307, 0.06644, -0.24637, 0.34112, -0.08026,
|
||||
0.00951, 0.27656, 0.16247, 0.28217, 0.17198, -0.16389, -0.03835, -0.02675, -0.08032, -0.21045,
|
||||
-0.38946, 0.23207, 0.10987, -0.31674, -0.28653, -0.27430, -0.29109, -0.00648, 0.38431, -0.38478,
|
||||
-0.41195, -0.19364, -0.20977, -0.05524, 0.05558, -0.20109, 0.11803, -0.19884, 0.43318, -0.39255,
|
||||
0.26612, -0.21771, 0.12471, 0.12856, -0.15104, -0.11676, 0.17582, -0.25330, 0.00298, -0.31712,
|
||||
0.21532, -0.20319, 0.14507, -0.04588, -0.22995, -0.06470, 0.18849, -0.13444, 0.37107, 0.07387,
|
||||
-0.14008, 0.09896, 0.13727, -0.28417, -0.09461, -0.18703, 0.04080, 0.02343, -0.49988, 0.17993,
|
||||
0.23189, -0.30581, -0.18334, -0.09667, -0.27699, -0.05998, 0.09118, -0.32453, 0.46251, 0.41500,
|
||||
-0.45314, -0.00544, 0.08529, 0.29099, -0.00937, -0.31650, 0.26163, 0.14506, 0.37498, -0.16454,
|
||||
0.35215, 0.31642, -0.09161, -0.31452, -0.04792, -0.04677, -0.19523, 0.27998, 0.05491, 0.44461,
|
||||
-0.01258, -0.27887, 0.18361, -0.04539, -0.02977, 0.30821, 0.29454, -0.17932, 0.16193, 0.23934,
|
||||
0.47923, 0.25373, 0.23258, 0.31484, -0.17958, -0.01136, 0.17681, 0.12869, 0.03235, 0.43762,
|
||||
0.13734, -0.09433, -0.03735, 0.17949, 0.14122, -0.17814, 0.06359, 0.16044, 0.12249, -0.22314,
|
||||
0.40775, 0.05147, 0.12389, 0.04290, -0.01642, 0.00082, -0.18056, 0.02875, 0.32690, 0.17712,
|
||||
0.34001, -0.21581, -0.01086, -0.18180, 0.17480, -0.17774, -0.07503, 0.28438, -0.19747, 0.29595,
|
||||
-0.28002, -0.02073, -0.16522, -0.18234, -0.20565, 0.29620, 0.07502, 0.01429, -0.31418, 0.43693,
|
||||
-0.12212, 0.11178, -0.28503, 0.04683, 0.00072, 0.05566, 0.18857, 0.26101, -0.38891, -0.21216,
|
||||
-0.21850, -0.15147, -0.30749, -0.23762, 0.14984, 0.03535, -0.02862, -0.00105, -0.39907, -0.06909,
|
||||
-0.36094, 0.21717, 0.15930, -0.18924, 0.13741, 0.01039, 0.13613, 0.00659, 0.07676, -0.13711,
|
||||
0.24285, -0.07564, -0.28349, -0.15658, 0.03135, -0.30909, -0.22534, 0.17363, -0.19376, 0.26038,
|
||||
0.05546, -0.22607, 0.32420, -0.02552, -0.05400, 0.13388, 0.04643, -0.31535, -0.06181, 0.30237,
|
||||
-0.04680, -0.29441, 0.12231, 0.03960, -0.01188, 0.01406, 0.25402, 0.03315, 0.25026, -0.10922
|
||||
};
|
||||
|
||||
const double gu_data2 [] = { // raw data 300 items
|
||||
0.26599, 0.41329, 0.31846, 0.92590, 0.43050, 0.17466, 0.02322, 0.72621, 0.37921, 0.70597,
|
||||
0.97098, 0.14023, 0.57619, 0.09938, 0.02281, 0.92341, 0.72654, 0.71000, 0.76687, 0.70182,
|
||||
0.88752, 0.49524, 0.42549, 0.42806, 0.57615, 0.76051, 0.15341, 0.47457, 0.60066, 0.40880,
|
||||
0.20668, 0.41949, 0.97620, 0.94318, 0.71491, 0.56402, 0.23553, 0.94387, 0.78567, 0.06362,
|
||||
0.85252, 0.86262, 0.25190, 0.03274, 0.93216, 0.37971, 0.08797, 0.14596, 0.73871, 0.06574,
|
||||
0.67447, 0.28575, 0.43911, 0.92133, 0.12327, 0.87762, 0.71060, 0.07141, 0.55443, 0.53310,
|
||||
0.91529, 0.25121, 0.07593, 0.94490, 0.28656, 0.82174, 0.68887, 0.67337, 0.99291, 0.03316,
|
||||
0.02849, 0.33891, 0.25594, 0.90071, 0.01248, 0.67871, 0.65953, 0.65369, 0.97574, 0.31578,
|
||||
0.23678, 0.39220, 0.06706, 0.80943, 0.57694, 0.08220, 0.18151, 0.19969, 0.37096, 0.37858,
|
||||
0.70153, 0.46816, 0.76511, 0.02520, 0.39387, 0.25527, 0.39050, 0.60141, 0.30322, 0.46195,
|
||||
0.12025, 0.33616, 0.04174, 0.00196, 0.68886, 0.74445, 0.15869, 0.18994, 0.95195, 0.62874,
|
||||
0.82874, 0.53369, 0.34383, 0.50752, 0.97023, 0.22695, 0.62407, 0.25840, 0.71279, 0.28785,
|
||||
0.31611, 0.20391, 0.19702, 0.40760, 0.85158, 0.68369, 0.63760, 0.09879, 0.11924, 0.32920,
|
||||
0.53052, 0.15900, 0.21229, 0.84080, 0.33933, 0.93651, 0.42705, 0.06199, 0.50092, 0.47192,
|
||||
0.57152, 0.01818, 0.31404, 0.50173, 0.87725, 0.50530, 0.10717, 0.04035, 0.32901, 0.33538,
|
||||
0.04780, 0.40984, 0.78216, 0.91288, 0.11314, 0.25248, 0.23823, 0.74001, 0.48089, 0.55531,
|
||||
0.82486, 0.01058, 0.05409, 0.44357, 0.52641, 0.68188, 0.94629, 0.61627, 0.33037, 0.11961,
|
||||
0.57988, 0.19653, 0.91902, 0.59838, 0.52974, 0.28364, 0.45767, 0.65836, 0.63045, 0.76140,
|
||||
0.27918, 0.27256, 0.46035, 0.77418, 0.92918, 0.14095, 0.89645, 0.25146, 0.21172, 0.47910,
|
||||
0.95451, 0.34377, 0.29927, 0.79220, 0.97654, 0.67591, 0.44385, 0.38434, 0.44860, 0.28170,
|
||||
0.90712, 0.20337, 0.00292, 0.55046, 0.62255, 0.45127, 0.80896, 0.43965, 0.59145, 0.23801,
|
||||
0.33601, 0.30119, 0.89935, 0.40850, 0.98226, 0.75430, 0.68318, 0.65407, 0.68067, 0.32942,
|
||||
0.11756, 0.27626, 0.83879, 0.72174, 0.75430, 0.13702, 0.03402, 0.58781, 0.07393, 0.23067,
|
||||
0.92537, 0.29445, 0.43437, 0.47685, 0.54548, 0.66082, 0.23805, 0.60208, 0.94337, 0.21363,
|
||||
0.72637, 0.57181, 0.77679, 0.63931, 0.72860, 0.38901, 0.94920, 0.04535, 0.12863, 0.40550,
|
||||
0.90095, 0.21418, 0.13953, 0.99639, 0.02526, 0.70018, 0.21828, 0.20294, 0.20191, 0.30954,
|
||||
0.39490, 0.68955, 0.11506, 0.15748, 0.40252, 0.91680, 0.61547, 0.78443, 0.19693, 0.67630,
|
||||
0.56552, 0.58556, 0.53554, 0.53507, 0.09831, 0.21229, 0.83135, 0.26375, 0.89287, 0.97069,
|
||||
0.70615, 0.42041, 0.43117, 0.21291, 0.26086, 0.26978, 0.77340, 0.43833, 0.46179, 0.54418,
|
||||
0.67878, 0.42776, 0.61454, 0.55915, 0.36363, 0.31999, 0.42442, 0.86649, 0.62513, 0.02047
|
||||
};
|
||||
|
||||
class TestRunnable {
|
||||
char name[100];
|
||||
public:
|
||||
int dim; // dimension of the solved problem
|
||||
int nvar; // number of variable of the solved problem
|
||||
TestRunnable(const char* n, int d, int nv)
|
||||
: dim(d), nvar(nv)
|
||||
{strncpy(name, n, 100);}
|
||||
bool test() const;
|
||||
virtual bool run() const =0;
|
||||
const char* getName() const
|
||||
{return name;}
|
||||
protected:
|
||||
static double korder_unfold_fold(int maxdim, int unfold_dim,
|
||||
int nstat, int npred, int nboth, int forw,
|
||||
const TwoDMatrix& gy, const TwoDMatrix& gu,
|
||||
const TwoDMatrix& v);
|
||||
};
|
||||
|
||||
|
||||
bool TestRunnable::test() const
|
||||
{
|
||||
printf("Running test <%s>\n",name);
|
||||
clock_t start = clock();
|
||||
bool passed = run();
|
||||
clock_t end = clock();
|
||||
printf("CPU time %8.4g (CPU seconds)..................",
|
||||
((double)(end-start))/CLOCKS_PER_SEC);
|
||||
if (passed) {
|
||||
printf("passed\n\n");
|
||||
return passed;
|
||||
} else {
|
||||
printf("FAILED\n\n");
|
||||
return passed;
|
||||
}
|
||||
}
|
||||
|
||||
double TestRunnable::korder_unfold_fold(int maxdim, int unfold_dim,
|
||||
int nstat, int npred, int nboth, int nforw,
|
||||
const TwoDMatrix& gy, const TwoDMatrix& gu,
|
||||
const TwoDMatrix& v)
|
||||
{
|
||||
TensorContainer<FSSparseTensor> c(1);
|
||||
int ny = nstat+npred+nboth+nforw;
|
||||
int nu = v.nrows();
|
||||
int nz = nboth+nforw+ny+nboth+npred+nu;
|
||||
SparseGenerator::fillContainer(c, maxdim, nz, ny, 5.0);
|
||||
for (int d = 1; d <= maxdim; d++) {
|
||||
printf("\ttensor fill for dim=%d is: %3.2f %%\n",
|
||||
d, c.get(Symmetry(d))->getFillFactor()*100.0);
|
||||
}
|
||||
Journal jr("out.txt");
|
||||
KOrder kord(nstat, npred, nboth, nforw, c, gy, gu, v, jr);
|
||||
// perform unfolded steps until unfold_dim
|
||||
double maxerror = 0.0;
|
||||
for (int d = 2; d <= unfold_dim; d++) {
|
||||
clock_t pertime = clock();
|
||||
kord.performStep<KOrder::unfold>(d);
|
||||
pertime = clock()-pertime;
|
||||
printf("\ttime for unfolded step dim=%d: %8.4g\n",
|
||||
d, ((double)(pertime))/CLOCKS_PER_SEC);
|
||||
clock_t checktime = clock();
|
||||
double err = kord.check<KOrder::unfold>(d);
|
||||
checktime = clock()-checktime;
|
||||
printf("\ttime for step check dim=%d: %8.4g\n",
|
||||
d, ((double)(checktime))/CLOCKS_PER_SEC);
|
||||
printf("\tmax error in step dim=%d: %10.6g\n",
|
||||
d, err);
|
||||
if (maxerror < err)
|
||||
maxerror = err;
|
||||
}
|
||||
// perform folded steps until maxdim
|
||||
if (unfold_dim < maxdim) {
|
||||
clock_t swtime = clock();
|
||||
kord.switchToFolded();
|
||||
swtime = clock()-swtime;
|
||||
printf("\ttime for switching dim=%d: %8.4g\n",
|
||||
unfold_dim, ((double)(swtime))/CLOCKS_PER_SEC);
|
||||
|
||||
for (int d = unfold_dim+1; d <= maxdim; d++) {
|
||||
clock_t pertime = clock();
|
||||
kord.performStep<KOrder::fold>(d);
|
||||
pertime = clock()-pertime;
|
||||
printf("\ttime for folded step dim=%d: %8.4g\n",
|
||||
d, ((double)(pertime))/CLOCKS_PER_SEC);
|
||||
clock_t checktime = clock();
|
||||
double err = kord.check<KOrder::fold>(d);
|
||||
checktime = clock()-checktime;
|
||||
printf("\ttime for step check dim=%d: %8.4g\n",
|
||||
d, ((double)(checktime))/CLOCKS_PER_SEC);
|
||||
printf("\tmax error in step dim=%d: %10.6g\n",
|
||||
d, err);
|
||||
if (maxerror < err)
|
||||
maxerror = err;
|
||||
}
|
||||
}
|
||||
return maxerror;
|
||||
}
|
||||
|
||||
class UnfoldKOrderSmall : public TestRunnable {
|
||||
public:
|
||||
UnfoldKOrderSmall()
|
||||
: TestRunnable("unfold-3 fold-4 korder (stat=2,pred=3,both=1,forw=2,u=3,dim=4)",
|
||||
4, 18) {}
|
||||
|
||||
bool run() const
|
||||
{
|
||||
TwoDMatrix gy(8, 4, gy_data);
|
||||
TwoDMatrix gu(8, 3, gu_data);
|
||||
TwoDMatrix v(3, 3, vdata);
|
||||
double err = korder_unfold_fold(4, 3, 2, 3, 1, 2,
|
||||
gy, gu, v);
|
||||
|
||||
return err < 0.08;
|
||||
}
|
||||
};
|
||||
|
||||
// same dimension as Smets & Wouters
|
||||
class UnfoldKOrderSW : public TestRunnable {
|
||||
public:
|
||||
UnfoldKOrderSW()
|
||||
: TestRunnable("unfold S&W korder (stat=5,pred=12,both=8,forw=5,u=10,dim=4)",
|
||||
4, 73) {}
|
||||
|
||||
bool run() const
|
||||
{
|
||||
TwoDMatrix gy(30, 20, gy_data2);
|
||||
TwoDMatrix gu(30, 10, gu_data2);
|
||||
TwoDMatrix v(10, 10, vdata2);
|
||||
v.mult(0.001);
|
||||
gu.mult(.01);
|
||||
double err = korder_unfold_fold(4, 4, 5, 12, 8, 5,
|
||||
gy, gu, v);
|
||||
|
||||
return err < 0.08;
|
||||
}
|
||||
};
|
||||
|
||||
class UnfoldFoldKOrderSW : public TestRunnable {
|
||||
public:
|
||||
UnfoldFoldKOrderSW()
|
||||
: TestRunnable("unfold-2 fold-3 S&W korder (stat=5,pred=12,both=8,forw=5,u=10,dim=3)",
|
||||
4, 73) {}
|
||||
|
||||
bool run() const
|
||||
{
|
||||
TwoDMatrix gy(30, 20, gy_data2);
|
||||
TwoDMatrix gu(30, 10, gu_data2);
|
||||
TwoDMatrix v(10, 10, vdata2);
|
||||
v.mult(0.001);
|
||||
gu.mult(.01);
|
||||
double err = korder_unfold_fold(4, 3, 5, 12, 8, 5,
|
||||
gy, gu, v);
|
||||
|
||||
return err < 0.08;
|
||||
}
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
TestRunnable* all_tests[50];
|
||||
// fill in vector of all tests
|
||||
int num_tests = 0;
|
||||
all_tests[num_tests++] = new UnfoldKOrderSmall();
|
||||
all_tests[num_tests++] = new UnfoldKOrderSW();
|
||||
all_tests[num_tests++] = new UnfoldFoldKOrderSW();
|
||||
|
||||
// find maximum dimension and maximum nvar
|
||||
int dmax=0;
|
||||
int nvmax = 0;
|
||||
for (int i = 0; i < num_tests; i++) {
|
||||
if (dmax < all_tests[i]->dim)
|
||||
dmax = all_tests[i]->dim;
|
||||
if (nvmax < all_tests[i]->nvar)
|
||||
nvmax = all_tests[i]->nvar;
|
||||
}
|
||||
tls.init(dmax, nvmax); // initialize library
|
||||
|
||||
// launch the tests
|
||||
int success = 0;
|
||||
for (int i = 0; i < num_tests; i++) {
|
||||
try {
|
||||
if (all_tests[i]->test())
|
||||
success++;
|
||||
} catch (const TLException& e) {
|
||||
printf("Caugth TL exception in <%s>:\n", all_tests[i]->getName());
|
||||
e.print();
|
||||
} catch (SylvException& e) {
|
||||
printf("Caught Sylv exception in <%s>:\n", all_tests[i]->getName());
|
||||
e.printMessage();
|
||||
}
|
||||
}
|
||||
|
||||
printf("There were %d tests that failed out of %d tests run.\n",
|
||||
num_tests - success, num_tests);
|
||||
|
||||
// destroy
|
||||
for (int i = 0; i < num_tests; i++) {
|
||||
delete all_tests[i];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,113 @@
|
|||
# Copyright (C) 2005, Ondra Kamenik
|
||||
|
||||
# $Id: Makefile 1211 2007-03-19 21:43:42Z kamenik $
|
||||
|
||||
CC_FLAGS := -Wall -I../..
|
||||
|
||||
ifeq ($(DEBUG),yes)
|
||||
CC_FLAGS := $(CC_FLAGS) -g
|
||||
else
|
||||
CC_FLAGS := $(CC_FLAGS) -O3
|
||||
endif
|
||||
|
||||
objects := $(patsubst %.cpp,%.o,$(wildcard *.cpp))
|
||||
headers := $(wildcard *.h)
|
||||
source := $(wildcard *.cpp)
|
||||
objects := $(objects) formula_tab.o formula_ll.o matrix_tab.o matrix_ll.o \
|
||||
assign_tab.o assign_ll.o namelist_tab.o namelist_ll.o \
|
||||
csv_tab.o csv_ll.o
|
||||
headers := $(headers) formula_tab.hh matrix_tab.hh assign_tab.hh namelist_tab.hh csv_tab.hh
|
||||
source := $(source) formula_tab.cc formula_ll.cc formula.y formula.lex \
|
||||
matrix_tab.cc matrix_ll.cc matrix.y matrix.lex \
|
||||
assign_tab.cc assign_ll.cc assign.y assign.lex \
|
||||
namelist_tab.cc namelist_ll.cc namelist.y namelist.lex \
|
||||
csv_tab.cc csv_ll.cc csv.y csv.lex
|
||||
|
||||
formula_tab.cc: formula.y tree.h formula_parser.h
|
||||
bison -d -t --verbose -oformula_tab.cc formula.y
|
||||
|
||||
formula_tab.hh: formula.y tree.h formula_parser.h
|
||||
bison -d -t --verbose -oformula_tab.cc formula.y
|
||||
|
||||
formula_ll.cc: formula.lex formula_tab.hh
|
||||
flex -i -oformula_ll.cc formula.lex
|
||||
|
||||
formula_ll.o: formula_ll.cc $(headers)
|
||||
$(CC) -O3 -c formula_ll.cc
|
||||
|
||||
formula_tab.o: formula_tab.cc $(headers)
|
||||
$(CC) -O3 -c formula_tab.cc
|
||||
|
||||
matrix_tab.cc: matrix.y tree.h matrix_parser.h
|
||||
bison -d -t --verbose -omatrix_tab.cc matrix.y
|
||||
|
||||
matrix_tab.hh: matrix.y tree.h matrix_parser.h
|
||||
bison -d -t --verbose -omatrix_tab.cc matrix.y
|
||||
|
||||
matrix_ll.cc: matrix.lex matrix_tab.hh
|
||||
flex -i -omatrix_ll.cc matrix.lex
|
||||
|
||||
matrix_ll.o: matrix_ll.cc $(headers)
|
||||
$(CC) -O3 -c matrix_ll.cc
|
||||
|
||||
matrix_tab.o: matrix_tab.cc $(headers)
|
||||
$(CC) -O3 -c matrix_tab.cc
|
||||
|
||||
assign_tab.cc: assign.y static_atoms.h atom_assignings.h
|
||||
bison -d -t --verbose -oassign_tab.cc assign.y
|
||||
|
||||
assign_tab.hh: assign.y static_atoms.h atom_assignings.h
|
||||
bison -d -t --verbose -oassign_tab.cc assign.y
|
||||
|
||||
assign_ll.cc: assign.lex assign_tab.hh
|
||||
flex -i -oassign_ll.cc assign.lex
|
||||
|
||||
assign_ll.o: assign_ll.cc $(headers)
|
||||
$(CC) -O3 -c assign_ll.cc
|
||||
|
||||
assign_tab.o: assign_tab.cc $(hsource)
|
||||
$(CC) -O3 -c assign_tab.cc
|
||||
|
||||
namelist_tab.cc: namelist.y namelist.h
|
||||
bison -d -t --verbose -onamelist_tab.cc namelist.y
|
||||
|
||||
namelist_tab.hh: namelist.y namelist.h
|
||||
bison -d -t --verbose -onamelist_tab.cc namelist.y
|
||||
|
||||
namelist_ll.cc: namelist.lex namelist_tab.hh
|
||||
flex -i -onamelist_ll.cc namelist.lex
|
||||
|
||||
namelist_ll.o: namelist_ll.cc $(headers)
|
||||
$(CC) -O3 -c namelist_ll.cc
|
||||
|
||||
namelist_tab.o: namelist_tab.cc $(hsource)
|
||||
$(CC) -O3 -c namelist_tab.cc
|
||||
|
||||
csv_tab.cc: csv.y csv_parser.h
|
||||
bison -d -t --verbose -ocsv_tab.cc csv.y
|
||||
|
||||
csv_tab.hh: csv.y csv_parser.h
|
||||
bison -d -t --verbose -ocsv_tab.cc csv.y
|
||||
|
||||
csv_ll.cc: csv.lex csv_tab.hh
|
||||
flex -i -ocsv_ll.cc csv.lex
|
||||
|
||||
csv_ll.o: csv_ll.cc $(headers)
|
||||
$(CC) -O3 -c csv_ll.cc
|
||||
|
||||
csv_tab.o: csv_tab.cc $(hsource)
|
||||
$(CC) -O3 -c csv_tab.cc
|
||||
|
||||
%.o: %.cpp $(headers)
|
||||
$(CC) $(CC_FLAGS) -c $*.cpp
|
||||
|
||||
parser.a: $(objects) $(source) $(headers)
|
||||
ar cr parser.a $(objects)
|
||||
|
||||
clear:
|
||||
rm -f *~
|
||||
rm -f *.o
|
||||
rm -f *.cc *.hh
|
||||
rm -f parser.a
|
||||
rm -rf html/
|
||||
rm -rf latex/
|
|
@ -0,0 +1,54 @@
|
|||
%{
|
||||
#include "location.h"
|
||||
#include "assign_tab.hh"
|
||||
|
||||
extern YYLTYPE asgn_lloc;
|
||||
|
||||
#define YY_USER_ACTION SET_LLOC(asgn_);
|
||||
%}
|
||||
|
||||
%option nounput
|
||||
%option noyy_top_state
|
||||
%option stack
|
||||
%option yylineno
|
||||
%option prefix="asgn_"
|
||||
%option never-interactive
|
||||
%x CMT
|
||||
|
||||
%%
|
||||
|
||||
/* comments */
|
||||
<*>"/*" {yy_push_state(CMT);}
|
||||
<CMT>[^*\n]*
|
||||
<CMT>"*"+[^*/\n]*
|
||||
<CMT>"*"+"/" {yy_pop_state();}
|
||||
<CMT>[\n]
|
||||
"//".*\n
|
||||
|
||||
/* spaces */
|
||||
[ \t\r\n] {return BLANK;}
|
||||
|
||||
/* names */
|
||||
[A-Za-z_][A-Za-z0-9_]* {
|
||||
asgn_lval.string = asgn_text;
|
||||
return NAME;
|
||||
}
|
||||
|
||||
; {return SEMICOLON;}
|
||||
= {return EQUAL_SIGN;}
|
||||
. {
|
||||
asgn_lval.character = asgn_text[0];
|
||||
return CHARACTER;
|
||||
}
|
||||
|
||||
%%
|
||||
|
||||
int asgn_wrap()
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
void asgn__destroy_buffer(void* p)
|
||||
{
|
||||
asgn__delete_buffer((YY_BUFFER_STATE)p);
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
%{
|
||||
/* Copyright 2006, Ondra Kamenik */
|
||||
|
||||
/* $Id: assign.y 1748 2008-03-28 11:52:07Z kamenik $ */
|
||||
|
||||
#include "location.h"
|
||||
#include "atom_assignings.h"
|
||||
#include "assign_tab.hh"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
int asgn_error(char*);
|
||||
int asgn_lex(void);
|
||||
extern int asgn_lineno;
|
||||
extern ogp::AtomAssignings* aparser;
|
||||
|
||||
%}
|
||||
|
||||
%union {
|
||||
int integer;
|
||||
char *string;
|
||||
char character;
|
||||
}
|
||||
|
||||
%token EQUAL_SIGN SEMICOLON CHARACTER BLANK
|
||||
%token <string> NAME;
|
||||
|
||||
%name-prefix="asgn_"
|
||||
|
||||
%locations
|
||||
%error-verbose
|
||||
|
||||
%%
|
||||
|
||||
root : assignments | ;
|
||||
|
||||
assignments : assignments BLANK | assignments assignment | assignment | BLANK;
|
||||
|
||||
assignment : NAME EQUAL_SIGN material SEMICOLON {
|
||||
aparser->add_assignment(@1.off, $1, @1.ll, @3.off-@1.off, @3.ll + @4.ll);}
|
||||
| NAME space EQUAL_SIGN material SEMICOLON {
|
||||
aparser->add_assignment(@1.off, $1, @1.ll, @4.off-@1.off, @4.ll + @5.ll);}
|
||||
;
|
||||
|
||||
material : material CHARACTER | material NAME | material BLANK | NAME | CHARACTER | BLANK;
|
||||
|
||||
space : space BLANK | BLANK;
|
||||
|
||||
%%
|
||||
|
||||
int asgn_error(char* mes)
|
||||
{
|
||||
aparser->error(mes);
|
||||
}
|
|
@ -0,0 +1,215 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: atom_assignings.cpp 92 2007-04-19 11:38:21Z ondra $
|
||||
|
||||
#include "atom_assignings.h"
|
||||
#include "location.h"
|
||||
#include "parser_exception.h"
|
||||
|
||||
#include "utils/cc/exception.h"
|
||||
|
||||
#include <limits>
|
||||
|
||||
using namespace ogp;
|
||||
|
||||
AtomAssignings::AtomAssignings(const AtomAssignings& aa, ogp::StaticAtoms& a)
|
||||
: atoms(a), expr(aa.expr, atoms), left_names(aa.left_names),
|
||||
order(aa.order)
|
||||
{
|
||||
// fill the lname2expr
|
||||
for (Tvarintmap::const_iterator it = aa.lname2expr.begin();
|
||||
it != aa.lname2expr.end(); ++it)
|
||||
lname2expr.insert(Tvarintmap::value_type(left_names.query((*it).first), (*it).second));
|
||||
}
|
||||
|
||||
/** A global symbol for passing info to the AtomAssignings from
|
||||
* asgn_parse(). */
|
||||
AtomAssignings* aparser;
|
||||
|
||||
/** The declaration of functions defined in asgn_ll.cc and asgn_tab.cc
|
||||
* generated from assign.lex assign.y */
|
||||
void* asgn__scan_buffer(char*, size_t);
|
||||
void asgn__destroy_buffer(void*);
|
||||
void asgn_parse();
|
||||
extern location_type asgn_lloc;
|
||||
|
||||
void AtomAssignings::parse(int length, const char* stream)
|
||||
{
|
||||
char* buffer = new char[length+2];
|
||||
strncpy(buffer, stream, length);
|
||||
buffer[length] = '\0';
|
||||
buffer[length+1] = '\0';
|
||||
asgn_lloc.off = 0;
|
||||
asgn_lloc.ll = 0;
|
||||
void* p = asgn__scan_buffer(buffer, (unsigned int)length+2);
|
||||
aparser = this;
|
||||
asgn_parse();
|
||||
delete [] buffer;
|
||||
asgn__destroy_buffer(p);
|
||||
}
|
||||
|
||||
void AtomAssignings::error(const char* mes)
|
||||
{
|
||||
throw ParserException(mes, asgn_lloc.off);
|
||||
}
|
||||
|
||||
void AtomAssignings::add_assignment_to_double(const char* name, double val)
|
||||
{
|
||||
// if left hand side is a registered atom, insert it to tree
|
||||
int t;
|
||||
try {
|
||||
if (atoms.check(name))
|
||||
t = expr.add_nulary(name);
|
||||
else
|
||||
t = -1;
|
||||
} catch (const ParserException& e) {
|
||||
t = -1;
|
||||
}
|
||||
// register left hand side in order
|
||||
order.push_back(t);
|
||||
|
||||
// add the double to the tree
|
||||
char tmp[100];
|
||||
sprintf(tmp, "%30.25g", val);
|
||||
try {
|
||||
expr.parse(strlen(tmp), tmp);
|
||||
} catch (const ParserException& e) {
|
||||
// should never happen
|
||||
throw ParserException(string("Error parsing double ")+tmp+": "+e.message(), 0);
|
||||
}
|
||||
|
||||
// register name of the left hand side and put to lname2expr
|
||||
const char* ss = left_names.insert(name);
|
||||
lname2expr.insert(Tvarintmap::value_type(ss, order.size()-1));
|
||||
}
|
||||
|
||||
void AtomAssignings::add_assignment(int asgn_off, const char* str, int name_len,
|
||||
int right_off, int right_len)
|
||||
{
|
||||
// the order of doing things here is important: since the
|
||||
// FormulaParser requires that all references from the i-th tree
|
||||
// refere to trees with index lass than i, so to capture also a
|
||||
// nulary term for the left hand side, it must be inserted to the
|
||||
// expression tree before the expression is parsed.
|
||||
|
||||
// find the name in the atoms, make copy of name to be able to put
|
||||
// '\0' at the end
|
||||
char* buf = new char[name_len+1];
|
||||
strncpy(buf, str, name_len);
|
||||
buf[name_len] = '\0';
|
||||
// if left hand side is a registered atom, insert it to tree
|
||||
int t;
|
||||
try {
|
||||
t = atoms.check(buf);
|
||||
if (t == -1)
|
||||
t = expr.add_nulary(buf);
|
||||
} catch (const ParserException& e) {
|
||||
atoms.register_name(buf);
|
||||
t = expr.add_nulary(buf);
|
||||
}
|
||||
// register left hand side in order
|
||||
order.push_back(t);
|
||||
|
||||
// parse expression on the right
|
||||
try {
|
||||
expr.parse(right_len, str+right_off);
|
||||
} catch (const ParserException& e) {
|
||||
throw ParserException(e, asgn_off+right_off);
|
||||
}
|
||||
|
||||
// register name of the left hand side and put to lname2expr
|
||||
const char* ss = left_names.insert(buf);
|
||||
lname2expr[ss] = order.size()-1;
|
||||
|
||||
// delete name
|
||||
delete [] buf;
|
||||
}
|
||||
|
||||
void AtomAssignings::apply_subst(const AtomSubstitutions::Toldnamemap& mm)
|
||||
{
|
||||
// go through all old variables and see what are their derived new
|
||||
// variables
|
||||
for (AtomSubstitutions::Toldnamemap::const_iterator it = mm.begin();
|
||||
it != mm.end(); ++it) {
|
||||
const char* oldname = (*it).first;
|
||||
int told = atoms.index(oldname);
|
||||
const AtomSubstitutions::Tshiftnameset& sset = (*it).second;
|
||||
if (told >= 0 && ! sset.empty()) {
|
||||
// at least one substitution here, so make an expression
|
||||
expr.add_formula(told);
|
||||
// say that this expression is not assigned to any atom
|
||||
order.push_back(-1);
|
||||
// now go through all new names derived from the old name and
|
||||
// reference to the newly added formula
|
||||
for (AtomSubstitutions::Tshiftnameset::const_iterator itt = sset.begin();
|
||||
itt != sset.end(); ++itt) {
|
||||
const char* newname = (*itt).first;
|
||||
const char* nn = left_names.insert(newname);
|
||||
lname2expr.insert(Tvarintmap::value_type(nn, expr.nformulas()-1));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AtomAssignings::print() const
|
||||
{
|
||||
printf("Atom Assignings\nExpressions:\n");
|
||||
expr.print();
|
||||
printf("Left names:\n");
|
||||
for (Tvarintmap::const_iterator it = lname2expr.begin();
|
||||
it != lname2expr.end(); ++it)
|
||||
printf("%s ==> %d (t=%d)\n", (*it).first, expr.formula((*it).second), order[(*it).second]);
|
||||
}
|
||||
|
||||
void AtomAsgnEvaluator::setValues(EvalTree& et) const
|
||||
{
|
||||
// set values of constants
|
||||
aa.atoms.setValues(et);
|
||||
|
||||
// set values of variables to NaN or to user set values
|
||||
double nan = std::numeric_limits<double>::quiet_NaN();
|
||||
for (int i = 0; i < aa.atoms.nvar(); i++) {
|
||||
const char* ss = aa.atoms.name(i);
|
||||
int t = aa.atoms.index(ss);
|
||||
if (t >= 0) {
|
||||
Tusrvalmap::const_iterator it = user_values.find(t);
|
||||
if (it == user_values.end())
|
||||
et.set_nulary(t, nan);
|
||||
else
|
||||
et.set_nulary(t, (*it).second);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AtomAsgnEvaluator::set_user_value(const char* name, double val)
|
||||
{
|
||||
int t = aa.atoms.index(name);
|
||||
if (t >= 0) {
|
||||
Tusrvalmap::iterator it = user_values.find(t);
|
||||
if (it == user_values.end())
|
||||
user_values.insert(Tusrvalmap::value_type(t, val));
|
||||
else
|
||||
(*it).second = val;
|
||||
}
|
||||
}
|
||||
|
||||
void AtomAsgnEvaluator::load(int i, double res)
|
||||
{
|
||||
// set the value
|
||||
operator[](i) = res;
|
||||
// if i-th expression is atom, set its value to this EvalTree
|
||||
int t = aa.order[i];
|
||||
if (t >= 0)
|
||||
etree.set_nulary(t, res);
|
||||
}
|
||||
|
||||
double AtomAsgnEvaluator::get_value(const char* name) const
|
||||
{
|
||||
AtomAssignings::Tvarintmap::const_iterator it = aa.lname2expr.find(name);
|
||||
if (it == aa.lname2expr.end())
|
||||
return std::numeric_limits<double>::quiet_NaN();
|
||||
else
|
||||
return operator[]((*it).second);
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,130 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: atom_assignings.h 149 2007-04-30 02:11:46Z okamenik $
|
||||
|
||||
#ifndef OGP_ATOM_ASSIGNINGS_H
|
||||
#define OGP_ATOM_ASSIGNINGS_H
|
||||
|
||||
#include "static_atoms.h"
|
||||
#include "formula_parser.h"
|
||||
#include "atom_substitutions.h"
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
|
||||
namespace ogp {
|
||||
|
||||
class AtomAsgnEvaluator;
|
||||
|
||||
/** This class represents atom assignments used in parameters
|
||||
* settings and initval initialization. It maintains atoms of the
|
||||
* all expressions on the right hand side, the parsed formulas of
|
||||
* the right hand sides, and the information about the left hand
|
||||
* sides. See documentation to the order member below. */
|
||||
class AtomAssignings {
|
||||
friend class AtomAsgnEvaluator;
|
||||
protected:
|
||||
typedef std::map<const char*, int, ltstr> Tvarintmap;
|
||||
/** All atoms which should be sufficient for formulas at the
|
||||
* right hand sides. The atoms should be filled with names
|
||||
* (preregistered). This is a responsibility of the caller. */
|
||||
StaticAtoms& atoms;
|
||||
/** The formulas of right hand sides. */
|
||||
FormulaParser expr;
|
||||
/** Name storage of the names from left hand sides. */
|
||||
NameStorage left_names;
|
||||
/** Information on left hand sides. This maps a name to the
|
||||
* index of its assigned expression in expr. More than one
|
||||
* name may reference to the same expression. */
|
||||
Tvarintmap lname2expr;
|
||||
/** Information on left hand sides. If order[i] >= 0, then it
|
||||
* says that i-th expression in expr is assigned to atom with
|
||||
* order[i] tree index. */
|
||||
std::vector<int> order;
|
||||
public:
|
||||
/** Construct the object using the provided static atoms. */
|
||||
AtomAssignings(StaticAtoms& a) : atoms(a), expr(atoms)
|
||||
{}
|
||||
/** Make a copy with provided reference to (posibly different)
|
||||
* static atoms. */
|
||||
AtomAssignings(const AtomAssignings& aa, StaticAtoms& a);
|
||||
virtual ~AtomAssignings()
|
||||
{}
|
||||
/** Parse the assignments from the given string. */
|
||||
void parse(int length, const char* stream);
|
||||
/** Process a syntax error from bison. */
|
||||
void error(const char* mes);
|
||||
/** Add an assignment of the given name to the given
|
||||
* double. Can be called by a user, anytime. */
|
||||
void add_assignment_to_double(const char* name, double val);
|
||||
/** Add an assignment. Called from assign.y. */
|
||||
void add_assignment(int asgn_off, const char* str, int name_len,
|
||||
int right_off, int right_len);
|
||||
/** This applies old2new map (possibly from atom
|
||||
* substitutions) to this object. It registers new variables
|
||||
* in the atoms, and adds the expressions to expr, and left
|
||||
* names to lname2expr. The information about dynamical part
|
||||
* of substitutions is ignored, since we are now in the static
|
||||
* world. */
|
||||
void apply_subst(const AtomSubstitutions::Toldnamemap& mm);
|
||||
/** Debug print. */
|
||||
void print() const;
|
||||
};
|
||||
|
||||
/** This class basically evaluates the atom assignments
|
||||
* AtomAssignings, so it inherits from ogp::FormulaEvaluator. It
|
||||
* is also a storage for the results of the evaluation stored as a
|
||||
* vector, so the class inherits from std::vector<double> and
|
||||
* ogp::FormulaEvalLoader. As the expressions for atoms are
|
||||
* evaluated, the results are values for atoms which will be
|
||||
* used in subsequent evaluations. For this reason, the class
|
||||
* inherits also from AtomValues. */
|
||||
class AtomAsgnEvaluator : public FormulaEvalLoader,
|
||||
public AtomValues,
|
||||
protected FormulaEvaluator,
|
||||
public std::vector<double> {
|
||||
protected:
|
||||
typedef std::map<int, double> Tusrvalmap;
|
||||
Tusrvalmap user_values;
|
||||
const AtomAssignings& aa;
|
||||
public:
|
||||
AtomAsgnEvaluator(const AtomAssignings& a)
|
||||
: FormulaEvaluator(a.expr),
|
||||
std::vector<double>(a.expr.nformulas()), aa(a) {}
|
||||
virtual ~AtomAsgnEvaluator() {}
|
||||
/** This sets all initial values to NaNs, all constants and
|
||||
* all values set by user by call set_value. This is called by
|
||||
* FormulaEvaluator::eval() method, which is called by eval()
|
||||
* method passing this argument as AtomValues. So the
|
||||
* ogp::EvalTree will be always this->etree. */
|
||||
void setValues(EvalTree& et) const;
|
||||
/** User setting of the values. For example in initval,
|
||||
* parameters are known and should be set to their values. In
|
||||
* constrast endogenous variables are set initially to NaNs by
|
||||
* AtomValues::setValues. */
|
||||
void set_user_value(const char* name, double val);
|
||||
/** This sets the result of i-th expression in aa to res, and
|
||||
* also checks whether the i-th expression is an atom. If so,
|
||||
* it sets the value of the atom in ogp::EvalTree
|
||||
* this->etree. */
|
||||
void load(int i, double res);
|
||||
/** After the user values have been set, the assignments can
|
||||
* be evaluated. For this purpose we have eval() method. The
|
||||
* result is that this object as std::vector<double> will
|
||||
* contain the values. It is ordered given by formulas in
|
||||
* expr. */
|
||||
void eval()
|
||||
{FormulaEvaluator::eval(*this, *this);}
|
||||
/** This returns a value for a given name. If the name is not
|
||||
* found among atoms, or there is no assignment for the atom,
|
||||
* NaN is returned. */
|
||||
double get_value(const char* name) const;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,275 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: atom_substitutions.cpp 42 2007-01-22 21:53:24Z ondra $
|
||||
|
||||
#include "atom_substitutions.h"
|
||||
#include "utils/cc/exception.h"
|
||||
|
||||
using namespace ogp;
|
||||
|
||||
AtomSubstitutions::AtomSubstitutions(const AtomSubstitutions& as, const FineAtoms& oa,
|
||||
FineAtoms& na)
|
||||
: old_atoms(oa), new_atoms(na)
|
||||
{
|
||||
const NameStorage& ns = na.get_name_storage();
|
||||
|
||||
// fill new2old
|
||||
for (Tshiftmap::const_iterator it = as.new2old.begin();
|
||||
it != as.new2old.end(); ++it)
|
||||
new2old.insert(Tshiftmap::value_type(ns.query((*it).first),
|
||||
Tshiftname(ns.query((*it).second.first),
|
||||
(*it).second.second)));
|
||||
// fill old2new
|
||||
for (Toldnamemap::const_iterator it = as.old2new.begin();
|
||||
it != as.old2new.end(); ++it) {
|
||||
Tshiftnameset sset;
|
||||
for (Tshiftnameset::const_iterator itt = (*it).second.begin();
|
||||
itt != (*it).second.end(); ++itt)
|
||||
sset.insert(Tshiftname(ns.query((*itt).first), (*itt).second));
|
||||
old2new.insert(Toldnamemap::value_type(ns.query((*it).first), sset));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void AtomSubstitutions::add_substitution(const char* newname, const char* oldname, int tshift)
|
||||
{
|
||||
// make sure the storage is from the new_atoms
|
||||
newname = new_atoms.get_name_storage().query(newname);
|
||||
oldname = new_atoms.get_name_storage().query(oldname);
|
||||
if (newname == NULL || oldname == NULL)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Bad newname or oldname in AtomSubstitutions::add_substitution");
|
||||
|
||||
// insert to new2old map
|
||||
new2old.insert(Tshiftmap::value_type(newname, Tshiftname(oldname, tshift)));
|
||||
// insert to old2new map
|
||||
Toldnamemap::iterator it = old2new.find(oldname);
|
||||
if (it != old2new.end())
|
||||
(*it).second.insert(Tshiftname(newname, -tshift));
|
||||
else {
|
||||
Tshiftnameset snset;
|
||||
snset.insert(Tshiftname(newname, -tshift));
|
||||
old2new.insert(Toldnamemap::value_type(oldname, snset));
|
||||
}
|
||||
|
||||
// put to info
|
||||
info.num_substs++;
|
||||
}
|
||||
|
||||
void AtomSubstitutions::substitutions_finished(VarOrdering::ord_type ot)
|
||||
{
|
||||
// create an external ordering of new_atoms from old_atoms
|
||||
const vector<const char*>& oa_ext = old_atoms.get_allvar();
|
||||
vector<const char*> na_ext;
|
||||
for (unsigned int i = 0; i < oa_ext.size(); i++) {
|
||||
const char* oname = oa_ext[i];
|
||||
// add the old name itself
|
||||
na_ext.push_back(oname);
|
||||
// add all new names derived from the old name
|
||||
Toldnamemap::const_iterator it = old2new.find(oname);
|
||||
if (it != old2new.end())
|
||||
for (Tshiftnameset::const_iterator itt = (*it).second.begin();
|
||||
itt != (*it).second.end(); ++itt)
|
||||
na_ext.push_back((*itt).first);
|
||||
}
|
||||
|
||||
// call parsing finished for the new_atoms
|
||||
new_atoms.parsing_finished(ot, na_ext);
|
||||
}
|
||||
|
||||
const char* AtomSubstitutions::get_new4old(const char* oldname, int tshift) const
|
||||
{
|
||||
Toldnamemap::const_iterator it = old2new.find(oldname);
|
||||
if (it != old2new.end()) {
|
||||
const Tshiftnameset& sset = (*it).second;
|
||||
for (Tshiftnameset::const_iterator itt = sset.begin();
|
||||
itt != sset.end(); ++itt)
|
||||
if ((*itt).second == - tshift)
|
||||
return (*itt).first;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void AtomSubstitutions::print() const
|
||||
{
|
||||
printf("Atom Substitutions:\nOld ==> New:\n");
|
||||
for (Toldnamemap::const_iterator it = old2new.begin(); it != old2new.end(); ++it)
|
||||
for (Tshiftnameset::const_iterator itt = (*it).second.begin();
|
||||
itt != (*it).second.end(); ++itt)
|
||||
printf(" %s ==> [%s, %d]\n", (*it).first, (*itt).first, (*itt).second);
|
||||
|
||||
printf("Old <== New:\n");
|
||||
for (Tshiftmap::const_iterator it = new2old.begin(); it != new2old.end(); ++it)
|
||||
printf(" [%s, %d] <== %s\n", (*it).second.first, (*it).second.second, (*it).first);
|
||||
}
|
||||
|
||||
void SAtoms::substituteAllLagsAndLeads(FormulaParser& fp, AtomSubstitutions& as)
|
||||
{
|
||||
const char* name;
|
||||
|
||||
int mlead, mlag;
|
||||
endovarspan(mlead, mlag);
|
||||
|
||||
// substitute all endo lagged more than 1
|
||||
while (NULL != (name = findEndoWithLeadInInterval(mlag, -2)))
|
||||
makeAuxVariables(name, -1, -2, mlag, fp, as);
|
||||
// substitute all endo leaded more than 1
|
||||
while (NULL != (name = findEndoWithLeadInInterval(2, mlead)))
|
||||
makeAuxVariables(name, 1, 2, mlead, fp, as);
|
||||
|
||||
exovarspan(mlead, mlag);
|
||||
|
||||
// substitute all lagged exo
|
||||
while (NULL != (name = findExoWithLeadInInterval(mlag, -1)))
|
||||
makeAuxVariables(name, -1, -1, mlag, fp, as);
|
||||
// substitute all leaded exo
|
||||
while (NULL != (name = findExoWithLeadInInterval(1, mlead)))
|
||||
makeAuxVariables(name, 1, 1, mlead, fp, as);
|
||||
|
||||
// notify that substitution have been finished
|
||||
as.substitutions_finished(order_type);
|
||||
}
|
||||
|
||||
void SAtoms::substituteAllLagsAndExo1Leads(FormulaParser& fp, AtomSubstitutions& as)
|
||||
{
|
||||
const char* name;
|
||||
|
||||
int mlead, mlag;
|
||||
endovarspan(mlead, mlag);
|
||||
|
||||
// substitute all endo lagged more than 1
|
||||
while (NULL != (name = findEndoWithLeadInInterval(mlag, -2)))
|
||||
makeAuxVariables(name, -1, -2, mlag, fp, as);
|
||||
|
||||
exovarspan(mlead, mlag);
|
||||
|
||||
// substitute all lagged exo
|
||||
while (NULL != (name = findExoWithLeadInInterval(mlag, -1)))
|
||||
makeAuxVariables(name, -1, -1, mlag, fp, as);
|
||||
// substitute all leaded exo by 1
|
||||
while (NULL != (name = findExoWithLeadInInterval(1,1)))
|
||||
makeAuxVariables(name, 1, 1, 1, fp, as);
|
||||
|
||||
// notify that substitution have been finished
|
||||
as.substitutions_finished(order_type);
|
||||
}
|
||||
|
||||
const char* SAtoms::findNameWithLeadInInterval(const vector<const char*>& names,
|
||||
int ll1, int ll2) const
|
||||
{
|
||||
for (unsigned int i = 0; i < names.size(); i++) {
|
||||
const char* name = names[i];
|
||||
DynamicAtoms::Tvarmap::const_iterator it = vars.find(name);
|
||||
if (it != vars.end()) {
|
||||
const DynamicAtoms::Tlagmap& lmap = (*it).second;
|
||||
for (DynamicAtoms::Tlagmap::const_iterator itt = lmap.begin();
|
||||
itt != lmap.end(); ++itt)
|
||||
if ((*itt).first >= ll1 && (*itt).first <= ll2)
|
||||
return name;
|
||||
}
|
||||
}
|
||||
|
||||
// nothing found
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void SAtoms::attemptAuxName(const char* str, int ll, string& out) const
|
||||
{
|
||||
char c = (ll >= 0)? ((ll == 0)? 'e' : 'p' ) : 'm';
|
||||
char absll[100];
|
||||
sprintf(absll, "%d", std::abs(ll));
|
||||
int iter = 1;
|
||||
do {
|
||||
out = string(str) + '_';
|
||||
for (int i = 0; i < iter; i++)
|
||||
out += c;
|
||||
if (ll != 0)
|
||||
out += absll;
|
||||
iter++;
|
||||
} while (varnames.query(out.c_str()));
|
||||
}
|
||||
|
||||
void SAtoms::makeAuxVariables(const char* name, int step, int start, int limit_lead,
|
||||
FormulaParser& fp, AtomSubstitutions& as)
|
||||
{
|
||||
if (! (step == 1 || step == -1))
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Wrong value of step in SAtoms::makeAuxVariables");
|
||||
if (step*start > step*limit_lead)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Wrong value of start in SAtoms::makeAuxVariables");
|
||||
|
||||
// make sure that we do not go further than necessary, this is
|
||||
// that the limit lead is not behind maxlead or minlag
|
||||
int mlead, mlag;
|
||||
varspan(name, mlead, mlag);
|
||||
if (step == -1)
|
||||
limit_lead = std::max(limit_lead, mlag);
|
||||
else
|
||||
limit_lead = std::min(limit_lead, mlead);
|
||||
|
||||
// Comment to comments: name="a"; start=-3; step=-1;
|
||||
|
||||
char tmp[500];
|
||||
|
||||
// recover tree index of a previous atom, i.e. set tprev to a tree
|
||||
// index of atom "a(-2)"
|
||||
int tprev = index(name, start-step);
|
||||
if (tprev == -1) {
|
||||
sprintf(tmp, "%s(%d)", name, start-step);
|
||||
tprev = fp.add_nulary(tmp);
|
||||
}
|
||||
|
||||
int ll = start;
|
||||
do {
|
||||
// either create atom "a_m2(0)" with tree index taux and add
|
||||
// equation "a_m2(0)=a(-2)"
|
||||
// or
|
||||
// check if "a_m2(0)" has not been already created (with
|
||||
// different step), in this case do not add equation "a_m2(0)
|
||||
// = a(-2)"
|
||||
const char* newname;
|
||||
string newname_str;
|
||||
int taux;
|
||||
if (NULL == (newname=as.get_new4old(name, ll-step))) {
|
||||
attemptAuxName(name, ll-step, newname_str);
|
||||
newname = newname_str.c_str();
|
||||
register_uniq_endo(newname);
|
||||
newname = varnames.query(newname);
|
||||
sprintf(tmp, "%s(0)", newname);
|
||||
taux = fp.add_nulary(tmp);
|
||||
// add to substitutions
|
||||
as.add_substitution(newname, name, ll-step);
|
||||
|
||||
// add equation "a_m2(0) = a(-2)", this is taux = tprev
|
||||
fp.add_formula(fp.add_binary(MINUS, taux, tprev));
|
||||
} else {
|
||||
// example: exogenous EPS and occurrence at both EPS(-1)
|
||||
// EPS(+1)
|
||||
// first call makeAuxVariables("EPS",1,1,...) will make endo EPS_p0 = EPS
|
||||
// second call makeAuxVariables("EPS",-1,-1,...) will use this EPS_p0
|
||||
// to substitute for EPS(-1)
|
||||
taux = index(newname, 0);
|
||||
if (taux < 0)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Couldn't find tree index of previously substituted variable");
|
||||
}
|
||||
|
||||
// create atom "a_m2(-1)" or turn "a(-3)" if any to "a_m2(-1)"; tree index t
|
||||
int t = index(name, ll);
|
||||
if (t == -1) {
|
||||
// no "a(-3)", make t <-> a_m2(-1)
|
||||
sprintf(tmp, "%s(%d)", newname, step);
|
||||
t = fp.add_nulary(tmp);
|
||||
} else {
|
||||
// turn a(-3) to a_m2(-1)
|
||||
unassign_variable(name, ll, t);
|
||||
assign_variable(newname, step, t);
|
||||
}
|
||||
|
||||
// next iteration starts with tprev <-> "a_m2(-1)" (this will be made equal to "a_m3(0)")
|
||||
tprev = t;
|
||||
|
||||
ll += step;
|
||||
} while (step*ll <= step*limit_lead);
|
||||
}
|
|
@ -0,0 +1,161 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: atom_substitutions.h 42 2007-01-22 21:53:24Z ondra $
|
||||
|
||||
#ifndef OGP_ATOM_SUBSTITUTIONS_H
|
||||
#define OGP_ATOM_SUBSTITUTIONS_H
|
||||
|
||||
#include "fine_atoms.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace ogp {
|
||||
|
||||
using std::string;
|
||||
using std::map;
|
||||
using std::pair;
|
||||
|
||||
/** This class tracts an information about the performed
|
||||
* substitutions. In fact, there is only one number to keep track
|
||||
* about, this is a number of substitutions. */
|
||||
struct SubstInfo {
|
||||
int num_substs;
|
||||
SubstInfo() : num_substs(0) {}
|
||||
};
|
||||
|
||||
/** This class tracks all atom substitutions during the job and
|
||||
* then builds structures when all substitutions are finished. */
|
||||
class AtomSubstitutions {
|
||||
public:
|
||||
typedef pair<const char*, int> Tshiftname;
|
||||
typedef map<const char*, Tshiftname, ltstr> Tshiftmap;
|
||||
typedef set<Tshiftname> Tshiftnameset;
|
||||
typedef map<const char*, Tshiftnameset, ltstr> Toldnamemap;
|
||||
protected:
|
||||
/** This maps a new name to a shifted old name. This is, one
|
||||
* entry looks as "a_m3 ==> a(-3)", saying that a variable
|
||||
* "a_m3" corresponds to a variable "a" lagged by 3. */
|
||||
Tshiftmap new2old;
|
||||
/** This is inverse to new2old, which is not unique. For old
|
||||
* name, say "a", it says what new names are derived with what
|
||||
* shifts from the "a". For example, it can map "a" to a two
|
||||
* element set {["a_m3", +3], ["a_p2", -2]}. This says that
|
||||
* leading "a_m3" by 3 one gets old "a" and lagging "a_p2" by
|
||||
* 2 one gets also old "a". */
|
||||
Toldnamemap old2new;
|
||||
/** This is a reference to old atoms with multiple leads and
|
||||
* lags. They are supposed to be used with parsing finished
|
||||
* being had called, so that the external ordering is
|
||||
* available. */
|
||||
const FineAtoms& old_atoms;
|
||||
/** This is a reference to new atoms. All name pointers point
|
||||
* to storage of these atoms. */
|
||||
FineAtoms& new_atoms;
|
||||
/** Substitutions information. */
|
||||
SubstInfo info;
|
||||
public:
|
||||
/** Create the object with reference to the old and new
|
||||
* atoms. In the beginning, old atoms are supposed to be with
|
||||
* parsing_finished() called, and new atoms a simple copy of
|
||||
* old atoms. The new atoms will be an instance of SAtoms. All
|
||||
* substitution job is done by a substitution method of the
|
||||
* new atoms. */
|
||||
AtomSubstitutions(const FineAtoms& oa, FineAtoms& na)
|
||||
: old_atoms(oa), new_atoms(na) {}
|
||||
/** Construct a copy of the object using a different instances
|
||||
* of old atoms and new atoms, which are supposed to be
|
||||
* semantically same as the atoms from as. */
|
||||
AtomSubstitutions(const AtomSubstitutions& as, const FineAtoms& oa, FineAtoms& na);
|
||||
virtual ~AtomSubstitutions() {}
|
||||
/** This is called during the substitution job from the
|
||||
* substitution method of the new atoms. This says that the
|
||||
* new name, say "a_m3" is a substitution of old name "a"
|
||||
* shifted by -3. */
|
||||
void add_substitution(const char* newname, const char* oldname, int tshift);
|
||||
/** This is called when all substitutions are finished. This
|
||||
* forms the new external ordering of the new atoms and calls
|
||||
* parsing_finished() for the new atoms with the given ordering type. */
|
||||
void substitutions_finished(VarOrdering::ord_type ot);
|
||||
/** Returns a new name for old name and given tshift. For "a"
|
||||
* and tshift=-3, it returns "a_m3". If there is no such
|
||||
* substitution, it returns NULL. */
|
||||
const char* get_new4old(const char* oldname, int tshift) const;
|
||||
/** Return new2old. */
|
||||
const Tshiftmap& get_new2old() const
|
||||
{return new2old;}
|
||||
/** Return old2new. */
|
||||
const Toldnamemap& get_old2new() const
|
||||
{return old2new;}
|
||||
/** Return substitution info. */
|
||||
const SubstInfo& get_info() const
|
||||
{return info;}
|
||||
/** Return old atoms. */
|
||||
const FineAtoms& get_old_atoms() const
|
||||
{return old_atoms;}
|
||||
/** Return new atoms. */
|
||||
const FineAtoms& get_new_atoms() const
|
||||
{return new_atoms;}
|
||||
/** Debug print. */
|
||||
void print() const;
|
||||
};
|
||||
|
||||
class SAtoms : public FineAtoms {
|
||||
public:
|
||||
SAtoms()
|
||||
: FineAtoms() {}
|
||||
SAtoms(const SAtoms& sa)
|
||||
: FineAtoms(sa) {}
|
||||
virtual ~SAtoms() {}
|
||||
/** This substitutes all lags and leads for all exogenous and
|
||||
* all lags and leads greater than 1 for all endogenous
|
||||
* variables. This is useful for perfect foresight problems
|
||||
* where we can do that. */
|
||||
void substituteAllLagsAndLeads(FormulaParser& fp, AtomSubstitutions& as);
|
||||
/** This substitutes all lags of all endo and exo and one step
|
||||
* leads of all exo variables. This is useful for stochastic
|
||||
* models where we cannot solve leads more than 1. */
|
||||
void substituteAllLagsAndExo1Leads(FormulaParser& fp, AtomSubstitutions& as);
|
||||
protected:
|
||||
/** This finds an endogenous variable name which occurs between
|
||||
* ll1 and ll2 included. */
|
||||
const char* findEndoWithLeadInInterval(int ll1, int ll2) const
|
||||
{return findNameWithLeadInInterval(get_endovars(), ll1, ll2);}
|
||||
/** This finds an exogenous variable name which occurs between
|
||||
* ll1 and ll2 included. */
|
||||
const char* findExoWithLeadInInterval(int ll1, int ll2) const
|
||||
{return findNameWithLeadInInterval(get_exovars(), ll1, ll2);}
|
||||
|
||||
/** This attempts to find a non registered name of the form
|
||||
* <str>_m<abs(ll)> or <str>_p<abs(ll)>. A letter 'p' is
|
||||
* chosen if ll is positive, 'm' if negative. If a name of
|
||||
* such form is already registered, one more character (either
|
||||
* 'p' or 'm') is added and the test is performed again. The
|
||||
* resulting name is returned in a string out. */
|
||||
void attemptAuxName(const char* str, int ll, string& out) const;
|
||||
|
||||
/** This makes auxiliary variables to eliminate all leads/lags
|
||||
* greater/less than or equal to start up to the limit_lead
|
||||
* for a variable with the given name. If the limit_lead is
|
||||
* greater/less than the maxlead/minlag of the variable, than
|
||||
* maxlead/minlag is used. This process is recorded in
|
||||
* AtomSubstitutions. The new auxiliary variables and their
|
||||
* atoms are created in this object. The auxiliary equations
|
||||
* are created in the given FormulaParser. The value of step
|
||||
* is allowed to be either -1 (lags) or +1 (leads). */
|
||||
void makeAuxVariables(const char* name, int step, int start, int limit_lead,
|
||||
FormulaParser& fp, AtomSubstitutions& as);
|
||||
private:
|
||||
/** This is a worker routine for findEndoWithLeadInInterval
|
||||
* and findExoWithLeadInInterval. */
|
||||
const char* findNameWithLeadInInterval(const vector<const char*>& names,
|
||||
int ll1, int ll2) const;
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,34 @@
|
|||
%{
|
||||
#include "location.h"
|
||||
#include "csv_tab.hh"
|
||||
|
||||
extern YYLTYPE csv_lloc;
|
||||
|
||||
#define YY_USER_ACTION SET_LLOC(csv_);
|
||||
%}
|
||||
|
||||
%option nounput
|
||||
%option noyy_top_state
|
||||
%option stack
|
||||
%option yylineno
|
||||
%option prefix="csv_"
|
||||
%option never-interactive
|
||||
|
||||
%%
|
||||
|
||||
, {return COMMA;}
|
||||
\n {return NEWLINE;}
|
||||
\r\n {return NEWLINE;}
|
||||
[^,\n\r]+ {return ITEM;}
|
||||
|
||||
%%
|
||||
|
||||
int csv_wrap()
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
void csv__destroy_buffer(void* p)
|
||||
{
|
||||
csv__delete_buffer((YY_BUFFER_STATE)p);
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
%{
|
||||
#include "location.h"
|
||||
#include "csv_parser.h"
|
||||
#include "csv_tab.hh"
|
||||
|
||||
void csv_error(const char*);
|
||||
int csv_lex(void);
|
||||
extern int csv_lineno;
|
||||
extern ogp::CSVParser* csv_parser;
|
||||
extern YYLTYPE csv_lloc;
|
||||
%}
|
||||
|
||||
%union {
|
||||
char* string;
|
||||
int integer;
|
||||
}
|
||||
|
||||
%token COMMA NEWLINE BOGUS
|
||||
%token <string> ITEM
|
||||
|
||||
%name-prefix="csv_";
|
||||
|
||||
%locations
|
||||
%error-verbose
|
||||
|
||||
%%
|
||||
|
||||
csv_file : line_list | line_list line;
|
||||
|
||||
line_list : line_list line newline | line newline | line_list newline | newline;
|
||||
|
||||
line : line comma | line item | item | comma;
|
||||
|
||||
comma : COMMA {csv_parser->nextcol();};
|
||||
|
||||
newline : NEWLINE {csv_parser->nextrow();};
|
||||
|
||||
item : ITEM {csv_parser->item(@1.off, @1.ll);};
|
||||
|
||||
|
||||
%%
|
||||
|
||||
void csv_error(const char* mes)
|
||||
{
|
||||
csv_parser->csv_error(mes);
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
#include "csv_parser.h"
|
||||
#include "parser_exception.h"
|
||||
#include "location.h"
|
||||
#include "csv_tab.hh"
|
||||
#include <cstring>
|
||||
|
||||
using namespace ogp;
|
||||
|
||||
/** A global symbol for passing info to the CSVParser from
|
||||
* csv_parse(). */
|
||||
CSVParser* csv_parser;
|
||||
|
||||
/** The declaration of functions defined in csv_ll.cc and
|
||||
* csv_tab.cc generated from csv.lex and csv.y. */
|
||||
void* csv__scan_buffer(char*, unsigned int);
|
||||
void csv__destroy_buffer(void*);
|
||||
void csv_parse();
|
||||
|
||||
extern ogp::location_type csv_lloc;
|
||||
|
||||
void CSVParser::csv_error(const char* mes)
|
||||
{
|
||||
throw ParserException(mes, csv_lloc.off);
|
||||
}
|
||||
|
||||
void CSVParser::csv_parse(int length, const char* str)
|
||||
{
|
||||
// allocate temporary buffer and parse
|
||||
char* buffer = new char[length+2];
|
||||
strncpy(buffer, str, length);
|
||||
buffer[length] = '\0';
|
||||
buffer[length+1] = '\0';
|
||||
csv_lloc.off = 0;
|
||||
csv_lloc.ll = 0;
|
||||
parsed_string = buffer;
|
||||
void* p = csv__scan_buffer(buffer, (unsigned int)length+2);
|
||||
csv_parser = this;
|
||||
::csv_parse();
|
||||
delete [] buffer;
|
||||
csv__destroy_buffer(p);
|
||||
parsed_string = NULL;
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
// Copyright (C) 2007, Ondra Kamenik
|
||||
|
||||
// $Id$
|
||||
|
||||
#ifndef OGP_CSV_PARSER
|
||||
#define OGP_CSV_PARSER
|
||||
|
||||
namespace ogp {
|
||||
|
||||
class CSVParserPeer {
|
||||
public:
|
||||
virtual ~CSVParserPeer() {}
|
||||
virtual void item(int irow, int icol, const char* str, int length) = 0;
|
||||
};
|
||||
|
||||
class CSVParser {
|
||||
private:
|
||||
CSVParserPeer& peer;
|
||||
int row;
|
||||
int col;
|
||||
const char* parsed_string;
|
||||
public:
|
||||
CSVParser(CSVParserPeer& p)
|
||||
: peer(p), row(0), col(0), parsed_string(0) {}
|
||||
CSVParser(const CSVParser& csvp)
|
||||
: peer(csvp.peer), row(csvp.row),
|
||||
col(csvp.col), parsed_string(csvp.parsed_string) {}
|
||||
virtual ~CSVParser() {}
|
||||
|
||||
void csv_error(const char* mes);
|
||||
void csv_parse(int length, const char* str);
|
||||
|
||||
void nextrow()
|
||||
{row++; col = 0;}
|
||||
void nextcol()
|
||||
{col++;}
|
||||
void item(int off, int length)
|
||||
{peer.item(row, col, parsed_string+off, length);}
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,609 @@
|
|||
// Copyright (C) 2005, Ondra Kamenik
|
||||
|
||||
// $Id: dynamic_atoms.cpp 1362 2007-07-10 11:50:18Z kamenik $
|
||||
|
||||
#include "utils/cc/exception.h"
|
||||
#include "dynamic_atoms.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <limits.h>
|
||||
|
||||
using namespace ogp;
|
||||
|
||||
NameStorage::NameStorage(const NameStorage& stor)
|
||||
{
|
||||
for (unsigned int i = 0; i < stor.name_store.size(); i++) {
|
||||
char* str = new char[strlen(stor.name_store[i])+1];
|
||||
strcpy(str, stor.name_store[i]);
|
||||
name_store.push_back(str);
|
||||
name_set.insert(str);
|
||||
}
|
||||
}
|
||||
|
||||
NameStorage::~NameStorage()
|
||||
{
|
||||
while (name_store.size() > 0) {
|
||||
delete [] name_store.back();
|
||||
name_store.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
const char* NameStorage::query(const char* name) const
|
||||
{
|
||||
set<const char*, ltstr>::const_iterator it = name_set.find(name);
|
||||
if (it == name_set.end())
|
||||
return NULL;
|
||||
else
|
||||
return (*it);
|
||||
}
|
||||
|
||||
const char* NameStorage::insert(const char* name)
|
||||
{
|
||||
set<const char*, ltstr>::const_iterator it = name_set.find(name);
|
||||
if (it == name_set.end()) {
|
||||
char* str = new char[strlen(name)+1];
|
||||
strcpy(str, name);
|
||||
name_store.push_back(str);
|
||||
name_set.insert(str);
|
||||
return str;
|
||||
} else {
|
||||
return (*it);
|
||||
}
|
||||
}
|
||||
|
||||
void NameStorage::print() const
|
||||
{
|
||||
for (unsigned int i = 0; i < name_store.size(); i++)
|
||||
printf("%s\n", name_store[i]);
|
||||
}
|
||||
|
||||
void Constants::import_constants(const Constants& c, OperationTree& otree, Tintintmap& tmap)
|
||||
{
|
||||
for (Tconstantmap::const_iterator it = c.cmap.begin();
|
||||
it != c.cmap.end(); ++it) {
|
||||
int told = (*it).first;
|
||||
int tnew = otree.add_nulary();
|
||||
tmap.insert(Tintintmap::value_type(told, tnew));
|
||||
add_constant(tnew, (*it).second);
|
||||
}
|
||||
}
|
||||
|
||||
void Constants::setValues(EvalTree& et) const
|
||||
{
|
||||
Tconstantmap::const_iterator it;
|
||||
for (it = cmap.begin(); it != cmap.end(); ++it)
|
||||
et.set_nulary((*it).first, (*it).second);
|
||||
}
|
||||
|
||||
void Constants::add_constant(int t, double val)
|
||||
{
|
||||
cmap.insert(Tconstantmap::value_type(t, val));
|
||||
cinvmap.insert(Tconstantinvmap::value_type(val, t));
|
||||
}
|
||||
|
||||
bool Constants::is_constant(int t) const
|
||||
{
|
||||
if (t < OperationTree::num_constants)
|
||||
return true;
|
||||
Tconstantmap::const_iterator it = cmap.find(t);
|
||||
return (it != cmap.end());
|
||||
}
|
||||
|
||||
double Constants::get_constant_value(int t) const
|
||||
{
|
||||
Tconstantmap::const_iterator it = cmap.find(t);
|
||||
if (it != cmap.end())
|
||||
return (*it).second;
|
||||
else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Tree index is not constant in Constants::get_constant_value");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int Constants::check(const char* str) const
|
||||
{
|
||||
double d;
|
||||
sscanf(str, "%lf", &d);
|
||||
Tconstantinvmap::const_iterator it = cinvmap.find(d);
|
||||
if (it != cinvmap.end())
|
||||
return (*it).second;
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
void Constants::print() const
|
||||
{
|
||||
Tconstantmap::const_iterator it;
|
||||
for (it = cmap.begin(); it != cmap.end(); ++it)
|
||||
printf("$%d: %8.4g\n", (*it).first, (*it).second);
|
||||
}
|
||||
|
||||
|
||||
DynamicAtoms::DynamicAtoms()
|
||||
: nv(0), minlag(INT_MAX), maxlead(INT_MIN)
|
||||
{
|
||||
}
|
||||
|
||||
DynamicAtoms::DynamicAtoms(const DynamicAtoms& da)
|
||||
: Constants(da),
|
||||
varnames(da.varnames), vars(), indices(),
|
||||
nv(da.nv), minlag(da.minlag), maxlead(da.maxlead)
|
||||
{
|
||||
// copy vars
|
||||
for (Tvarmap::const_iterator it = da.vars.begin();
|
||||
it != da.vars.end(); ++it)
|
||||
vars.insert(Tvarmap::value_type(varnames.query((*it).first),
|
||||
(*it).second));
|
||||
// copy indices
|
||||
for (Tindexmap::const_iterator it = da.indices.begin();
|
||||
it != da.indices.end(); ++it)
|
||||
indices.insert(Tindexmap::value_type((*it).first,
|
||||
varnames.query((*it).second)));
|
||||
}
|
||||
|
||||
|
||||
int DynamicAtoms::check(const char* name) const
|
||||
{
|
||||
if (is_string_constant(name))
|
||||
return Constants::check(name);
|
||||
|
||||
return check_variable(name);
|
||||
}
|
||||
|
||||
int DynamicAtoms::check_variable(const char* name) const
|
||||
{
|
||||
string str;
|
||||
int ll;
|
||||
parse_variable(name, str, ll);
|
||||
Tvarmap::const_iterator it = vars.find(str.c_str());
|
||||
|
||||
if (it != vars.end()) {
|
||||
const Tlagmap& lmap = (*it).second;
|
||||
Tlagmap::const_iterator itt = lmap.find(ll);
|
||||
if (itt != lmap.end())
|
||||
return (*itt).second;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
void DynamicAtoms::assign(const char* name, int t)
|
||||
{
|
||||
if (is_string_constant(name))
|
||||
assign_constant(name, t);
|
||||
else
|
||||
assign_variable(name, t);
|
||||
}
|
||||
|
||||
void DynamicAtoms::assign_constant(const char* name, int t)
|
||||
{
|
||||
double val;
|
||||
sscanf(name, "%lf", &val);
|
||||
add_constant(t, val);
|
||||
}
|
||||
|
||||
// parse the name and then call assing_variable(varname, ll, t)
|
||||
|
||||
void DynamicAtoms::assign_variable(const char* name, int t)
|
||||
{
|
||||
int ll;
|
||||
string str;
|
||||
parse_variable(name, str, ll);
|
||||
// here str is just name without lead/lag
|
||||
const char* ss = varnames.insert(str.c_str());
|
||||
|
||||
assign_variable(ss, ll, t);
|
||||
}
|
||||
|
||||
void DynamicAtoms::assign_variable(const char* varname, int ll, int t)
|
||||
{
|
||||
if (indices.end() != indices.find(t))
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Attempt to assign already allocated tree index");
|
||||
|
||||
Tvarmap::iterator it = vars.find(varname);
|
||||
if (it != vars.end()) {
|
||||
Tlagmap& lmap = (*it).second;
|
||||
if (lmap.end() != lmap.find(ll))
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Attempt to assign already allocated variable");
|
||||
lmap.insert(Tlagmap::value_type(ll, t));
|
||||
} else {
|
||||
Tlagmap lmap;
|
||||
lmap.insert(Tlagmap::value_type(ll, t));
|
||||
vars.insert(Tvarmap::value_type(varname, lmap));
|
||||
}
|
||||
indices.insert(Tindexmap::value_type(t, varname));
|
||||
|
||||
nv++;
|
||||
if (ll < minlag)
|
||||
minlag = ll;
|
||||
if (ll > maxlead)
|
||||
maxlead = ll;
|
||||
}
|
||||
|
||||
void DynamicAtoms::unassign_variable(const char* varname, int ll, int t)
|
||||
{
|
||||
Tvarmap::iterator it = vars.find(varname);
|
||||
if (it != vars.end()) {
|
||||
Tlagmap& lmap = (*it).second;
|
||||
Tlagmap::iterator itt = lmap.find(ll);
|
||||
if (itt != lmap.end()) {
|
||||
if ((*itt).second == t) {
|
||||
// erase it from the lagmap; if it becomes empty,
|
||||
// erase the lagmap from varmap
|
||||
lmap.erase(itt);
|
||||
if (lmap.size() == 0)
|
||||
vars.erase(it);
|
||||
// erase it from the indices
|
||||
Tindexmap::iterator ittt = indices.find(t);
|
||||
if (ittt != indices.end())
|
||||
indices.erase(ittt);
|
||||
|
||||
nv--;
|
||||
if (ll == minlag || ll == maxlead)
|
||||
update_minmaxll();
|
||||
} else
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Tree index inconsistent in DynamicAtoms::unassign_variable");
|
||||
} else
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Lead/lag of the variable not found in DynamicAtoms::unassign_variable");
|
||||
} else
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Variable not found in DynamicAtoms::unassign_variable");
|
||||
}
|
||||
|
||||
void DynamicAtoms::update_minmaxll()
|
||||
{
|
||||
minlag = INT_MAX;
|
||||
maxlead =INT_MIN;
|
||||
for (Tvarmap::const_iterator it = vars.begin(); it != vars.end(); ++it) {
|
||||
const Tlagmap& lmap = (*it).second;
|
||||
for (Tlagmap::const_iterator itt = lmap.begin(); itt != lmap.end(); ++itt) {
|
||||
int ll = (*itt).first;
|
||||
if (ll < minlag)
|
||||
minlag = ll;
|
||||
if (ll > maxlead)
|
||||
maxlead = ll;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vector<int> DynamicAtoms::variables() const
|
||||
{
|
||||
vector<int> res;
|
||||
for (Tvarmap::const_iterator it = vars.begin();
|
||||
it != vars.end(); ++it) {
|
||||
const Tlagmap& lmap = (*it).second;
|
||||
for (Tlagmap::const_iterator itt = lmap.begin();
|
||||
itt != lmap.end(); ++itt)
|
||||
res.push_back((*itt).second);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void DynamicAtoms::varspan(int t, int& mlead, int& mlag) const
|
||||
{
|
||||
Tindexmap::const_iterator it = indices.find(t);
|
||||
if (indices.end() == it) {
|
||||
mlead = INT_MIN;
|
||||
mlag = INT_MAX;
|
||||
return;
|
||||
}
|
||||
varspan((*it).second, mlead, mlag);
|
||||
}
|
||||
|
||||
void DynamicAtoms::varspan(const char* name, int& mlead, int& mlag) const
|
||||
{
|
||||
Tvarmap::const_iterator it = vars.find(name);
|
||||
if (vars.end() == it) {
|
||||
mlead = INT_MIN;
|
||||
mlag = INT_MAX;
|
||||
return;
|
||||
}
|
||||
const Tlagmap& lmap = (*it).second;
|
||||
Tlagmap::const_iterator beg = lmap.begin();
|
||||
Tlagmap::const_reverse_iterator end = lmap.rbegin();
|
||||
mlag = (*beg).first;
|
||||
mlead = (*end).first;
|
||||
}
|
||||
|
||||
void DynamicAtoms::varspan(const vector<const char*>& names, int& mlead, int& mlag) const
|
||||
{
|
||||
mlead = INT_MIN;
|
||||
mlag = INT_MAX;
|
||||
for (unsigned int i = 0; i < names.size(); i++) {
|
||||
int lag, lead;
|
||||
varspan(names[i], lead, lag);
|
||||
if (lead > mlead)
|
||||
mlead = lead;
|
||||
if (lag < mlag)
|
||||
mlag = lag;
|
||||
}
|
||||
}
|
||||
|
||||
bool DynamicAtoms::is_named_atom(int t) const
|
||||
{
|
||||
return (indices.end() != indices.find(t));
|
||||
}
|
||||
|
||||
int DynamicAtoms::index(const char* name, int ll) const
|
||||
{
|
||||
Tvarmap::const_iterator it = vars.find(name);
|
||||
if (vars.end() != it) {
|
||||
const Tlagmap& lmap = (*it).second;
|
||||
Tlagmap::const_iterator itt = lmap.find(ll);
|
||||
if (lmap.end() != itt)
|
||||
return (*itt).second;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
const DynamicAtoms::Tlagmap& DynamicAtoms::lagmap(const char* name) const
|
||||
{
|
||||
Tvarmap::const_iterator it = vars.find(name);
|
||||
if (vars.end() == it)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
std::string("Couldn't find the name ")
|
||||
+ name + " in DynamicAtoms::lagmap");
|
||||
return (*it).second;
|
||||
}
|
||||
|
||||
const char* DynamicAtoms::name(int t) const
|
||||
{
|
||||
Tindexmap::const_iterator it = indices.find(t);
|
||||
if (indices.end() == it)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Couldn't find tree index in DynamicAtoms::name");
|
||||
return (*it).second;
|
||||
}
|
||||
|
||||
int DynamicAtoms::lead(int t) const
|
||||
{
|
||||
const char* nam = name(t);
|
||||
const Tlagmap& lmap = lagmap(nam);
|
||||
Tlagmap::const_iterator it = lmap.begin();
|
||||
while (it != lmap.end() && (*it).second != t)
|
||||
++it;
|
||||
if (lmap.end() == it)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Couldn't find the three index in DynamicAtoms::lead");
|
||||
return (*it).first;
|
||||
}
|
||||
|
||||
void DynamicAtoms::print() const
|
||||
{
|
||||
printf("names:\n");
|
||||
varnames.print();
|
||||
printf("constants:\n");
|
||||
Constants::print();
|
||||
printf("variables:\n");
|
||||
for (Tvarmap::const_iterator it = vars.begin();
|
||||
it != vars.end(); ++it) {
|
||||
const Tlagmap& lmap = (*it).second;
|
||||
for (Tlagmap::const_iterator itt = lmap.begin();
|
||||
itt != lmap.end(); ++itt)
|
||||
printf("$%d: %s(%d)\n", (*itt).second, (*it).first, (*itt).first);
|
||||
}
|
||||
printf("indices:\n");
|
||||
for (Tindexmap::const_iterator it = indices.begin();
|
||||
it != indices.end(); ++it)
|
||||
printf("t=%d ==> %s\n", (*it).first, (*it).second);
|
||||
}
|
||||
|
||||
/** Note that the str has been parsed by the lexicographic
|
||||
* analyzer. It can be either a variable or a double. So it is easy to
|
||||
* recognize it by the first character. */
|
||||
bool DynamicAtoms::is_string_constant(const char* str)
|
||||
{
|
||||
return str[0] == '.' || str[0] == '-' || (str[0] >= '0' && str[0] <= '9');
|
||||
}
|
||||
|
||||
VarOrdering::VarOrdering(const VarOrdering& vo, const vector<const char*>& vnames,
|
||||
const DynamicAtoms& a)
|
||||
: n_stat(vo.n_stat), n_pred(vo.n_pred), n_both(vo.n_both), n_forw(vo.n_forw),
|
||||
der_atoms(vo.der_atoms), positions(vo.positions),
|
||||
outer2y(vo.outer2y), y2outer(vo.y2outer), varnames(vnames), atoms(a)
|
||||
{
|
||||
}
|
||||
|
||||
bool VarOrdering::check(int t) const
|
||||
{
|
||||
map<int,int>::const_iterator it = positions.find(t);
|
||||
return it != positions.end();
|
||||
}
|
||||
|
||||
int VarOrdering::get_pos_of(int t) const
|
||||
{
|
||||
map<int,int>::const_iterator it = positions.find(t);
|
||||
if (it != positions.end()) {
|
||||
return (*it).second;
|
||||
} else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Couldn't find the tree index in VarOrdering::get_pos_of");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
void VarOrdering::do_general(ord_type ordering)
|
||||
{
|
||||
// auxiliary vectors for setting der_atoms and map
|
||||
vector<int> pred_minus;
|
||||
vector<int> both_minus;
|
||||
vector<int> stat;
|
||||
vector<int> pred_pad;
|
||||
vector<int> both_pad;
|
||||
vector<int> forw_pad;
|
||||
vector<int> both_plus;
|
||||
vector<int> forw_plus;
|
||||
|
||||
// auxiliary vectors for setting y2outer and outer2y
|
||||
vector<int> y2o_stat;
|
||||
vector<int> y2o_pred;
|
||||
vector<int> y2o_both;
|
||||
vector<int> y2o_forw;
|
||||
|
||||
for (unsigned int i = 0; i < varnames.size(); i++) {
|
||||
const char* ss = varnames[i];
|
||||
int lead;
|
||||
int lag;
|
||||
atoms.varspan(ss, lead, lag);
|
||||
if (lag == 0 && lead == 0) {
|
||||
stat.push_back(atoms.index(ss, 0));
|
||||
y2o_stat.push_back(i);
|
||||
} else if (lag == -1 && lead < 1) {
|
||||
pred_minus.push_back(atoms.index(ss, -1));
|
||||
pred_pad.push_back(atoms.index(ss, 0));
|
||||
y2o_pred.push_back(i);
|
||||
} else if (lag > -1 && lead == 1) {
|
||||
forw_pad.push_back(atoms.index(ss, 0));
|
||||
forw_plus.push_back(atoms.index(ss, 1));
|
||||
y2o_forw.push_back(i);
|
||||
} else if (lag == -1 && lead == 1) {
|
||||
both_minus.push_back(atoms.index(ss, -1));
|
||||
both_pad.push_back(atoms.index(ss, 0));
|
||||
both_plus.push_back(atoms.index(ss, 1));
|
||||
y2o_both.push_back(i);
|
||||
} else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"A wrong lag/lead of a variable in VarOrdering::do_pbspbfbf");
|
||||
}
|
||||
}
|
||||
|
||||
// here we fill ords according to ordering
|
||||
vector<int>* ords[8];
|
||||
if (ordering == pbspbfbf) {
|
||||
ords[0] = &pred_minus;
|
||||
ords[1] = &both_minus;
|
||||
ords[2] = &stat;
|
||||
ords[3] = &pred_pad;
|
||||
ords[4] = &both_pad;
|
||||
ords[5] = &forw_pad;
|
||||
ords[6] = &both_plus;
|
||||
ords[7] = &forw_plus;
|
||||
} else if (ordering == bfspbfpb) {
|
||||
ords[0] = &both_plus;
|
||||
ords[1] = &forw_plus;
|
||||
ords[2] = &stat;
|
||||
ords[3] = &pred_pad;
|
||||
ords[4] = &both_pad;
|
||||
ords[5] = &forw_pad;
|
||||
ords[6] = &pred_minus;
|
||||
ords[7] = &both_minus;
|
||||
} else { // BEWARE: when implementing a new ordering, check also a
|
||||
// code below setting y2outer
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Ordering not implemented in VarOrdering::do_general");
|
||||
}
|
||||
|
||||
// make der_atoms and positions
|
||||
int off = 0;
|
||||
for (unsigned int i = 0; i < 8; i++)
|
||||
for (unsigned int j = 0; j < (ords[i])->size(); j++, off++)
|
||||
if ((*(ords[i]))[j] != -1) {
|
||||
der_atoms.push_back((*(ords[i]))[j]);
|
||||
positions.insert(std::pair<int,int>((*(ords[i]))[j], off));
|
||||
}
|
||||
|
||||
// set integer constants
|
||||
n_stat = stat.size();
|
||||
n_pred = pred_pad.size();
|
||||
n_both = both_pad.size();
|
||||
n_forw = forw_pad.size();
|
||||
|
||||
// make y2outer mapping
|
||||
y2outer.insert(y2outer.end(), y2o_stat.begin(), y2o_stat.end());
|
||||
y2outer.insert(y2outer.end(), y2o_pred.begin(), y2o_pred.end());
|
||||
y2outer.insert(y2outer.end(), y2o_both.begin(), y2o_both.end());
|
||||
y2outer.insert(y2outer.end(), y2o_forw.begin(), y2o_forw.end());
|
||||
// make outer2y mapping
|
||||
outer2y.resize(y2outer.size(), -1);
|
||||
for (unsigned int i = 0; i < y2outer.size(); i++)
|
||||
outer2y[y2outer[i]] = i;
|
||||
}
|
||||
|
||||
void VarOrdering::do_increasing_time()
|
||||
{
|
||||
// get maxlead and minlag of the variables
|
||||
int mlag, mlead;
|
||||
atoms.varspan(varnames, mlead, mlag);
|
||||
// setup the matrix of tree indices, if there is no occurrence,
|
||||
// the index is set to -1
|
||||
vector<int> ll_init(varnames.size(), -1);
|
||||
vector<vector<int> > tree_ind(mlead-mlag+1, ll_init);
|
||||
for (unsigned int iv = 0; iv < varnames.size(); iv++) {
|
||||
try {
|
||||
const DynamicAtoms::Tlagmap& lmap = atoms.lagmap(varnames[iv]);
|
||||
for (DynamicAtoms::Tlagmap::const_iterator it = lmap.begin();
|
||||
it != lmap.end(); ++it) {
|
||||
int ll = (*it).first;
|
||||
int t = (*it).second;
|
||||
tree_ind[ll-mlag][iv] = t;
|
||||
}
|
||||
} catch (const ogu::Exception& e) {
|
||||
// ignore the error of not found variable in the tree
|
||||
}
|
||||
}
|
||||
|
||||
// setup der_atoms and positions
|
||||
for (int ll = mlag; ll <= mlead; ll++)
|
||||
for (unsigned int iv = 0; iv < varnames.size(); iv++) {
|
||||
int t = tree_ind[ll-mlag][iv];
|
||||
if (t != -1) {
|
||||
der_atoms.push_back(t);
|
||||
int pos = (ll-mlag)*varnames.size() + iv;
|
||||
positions.insert(map<int,int>::value_type(t, pos));
|
||||
}
|
||||
}
|
||||
|
||||
// set outer2y and y2outer to identities
|
||||
for (unsigned int iv = 0; iv < varnames.size(); iv++) {
|
||||
outer2y.push_back(iv);
|
||||
y2outer.push_back(iv);
|
||||
}
|
||||
|
||||
// set n_stat, n_pred, n_both, and n_forw
|
||||
for (unsigned int iv = 0; iv < varnames.size(); iv++) {
|
||||
int mmlag, mmlead;
|
||||
atoms.varspan(varnames[iv], mmlead, mmlag);
|
||||
if (mmlead == 0 && mmlag == 0) {
|
||||
n_stat++;
|
||||
} else if (mmlead <= 0 && mmlag < 0) {
|
||||
n_pred++;
|
||||
} else if (mmlead > 0 && mmlag >=0) {
|
||||
n_forw++;
|
||||
} else if (mmlead > 0 && mmlag < 0) {
|
||||
n_both++;
|
||||
} else if (mmlead < mmlag) {
|
||||
// variable does not occur in the tree, cound as static
|
||||
n_stat++;
|
||||
} else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"A wrong lag/lead of a variable in VarOrdering::do_increasing_time");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VarOrdering::print() const
|
||||
{
|
||||
printf("nstat=%d, npred=%d, nboth=%d, nforw=%d\n", n_stat, n_pred, n_both, n_forw);
|
||||
printf("der_atoms:\n");
|
||||
for (unsigned int i = 0; i < der_atoms.size(); i++)
|
||||
printf(" %d", der_atoms[i]);
|
||||
printf("\nmap:\n");
|
||||
for (map<int,int>::const_iterator it = positions.begin(); it != positions.end(); ++it)
|
||||
printf(" [%d->%d]", (*it).first, (*it).second);
|
||||
printf("\ny2outer:\n");
|
||||
for (unsigned int i = 0; i < y2outer.size(); i++)
|
||||
printf(" %d", y2outer[i]);
|
||||
printf("\nouter2y:\n");
|
||||
for (unsigned int i = 0; i < outer2y.size(); i++)
|
||||
printf(" %d", outer2y[i]);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,403 @@
|
|||
// Copyright (C) 2005, Ondra Kamenik
|
||||
|
||||
// $Id: dynamic_atoms.h 2269 2008-11-23 14:33:22Z michel $
|
||||
|
||||
#ifndef OGP_DYNAMIC_ATOMS_H
|
||||
#define OGP_DYNAMIC_ATOMS_H
|
||||
|
||||
#include "formula_parser.h"
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <cstring>
|
||||
|
||||
namespace ogp {
|
||||
using std::vector;
|
||||
using std::map;
|
||||
using std::set;
|
||||
using std::string;
|
||||
|
||||
struct ltstr {
|
||||
bool operator()(const char* a1, const char* a2) const
|
||||
{ return strcmp(a1, a2) < 0; }
|
||||
};
|
||||
|
||||
/** Class storing names. We will keep names of variables in
|
||||
* various places, and all these pointers will point to one
|
||||
* storage, which will be responsible for allocation and
|
||||
* deallocation. The main function of the class is to allocate
|
||||
* space for names, and return a pointer of the stored name if
|
||||
* required. */
|
||||
class NameStorage {
|
||||
protected:
|
||||
/** Vector of names allocated, this is the storage. */
|
||||
vector<char*> name_store;
|
||||
/** Map useful to quickly decide if the name is already
|
||||
* allocated or not. */
|
||||
set<const char*, ltstr> name_set;
|
||||
public:
|
||||
NameStorage() {}
|
||||
NameStorage(const NameStorage& stor);
|
||||
virtual ~NameStorage();
|
||||
/** Query for the name. If the name has been stored, it
|
||||
* returns its address, otherwise 0. */
|
||||
const char* query(const char* name) const;
|
||||
/** Insert the name if it has not been inserted yet, and
|
||||
* return its new or old allocation. */
|
||||
const char* insert(const char* name);
|
||||
int num() const
|
||||
{return (int)name_store.size();}
|
||||
const char* get_name(int i) const
|
||||
{return name_store[i];}
|
||||
/** Debug print. */
|
||||
void print() const;
|
||||
};
|
||||
|
||||
class Constants : public AtomValues {
|
||||
public:
|
||||
/** Type for a map mapping tree indices to double values. */
|
||||
typedef map<int,double> Tconstantmap;
|
||||
typedef map<int,int> Tintintmap;
|
||||
protected:
|
||||
/** Map mapping a tree index of a constant to its double value. */
|
||||
Tconstantmap cmap;
|
||||
public:
|
||||
Constants() {}
|
||||
/** Copy constructor. */
|
||||
Constants(const Constants& c)
|
||||
: cmap(c.cmap), cinvmap(c.cinvmap) {}
|
||||
/** Copy constructor registering the constants in the given
|
||||
* tree. The mapping from old tree indices to new ones is
|
||||
* traced in tmap. */
|
||||
Constants(const Constants& c, OperationTree& otree, Tintintmap& tmap)
|
||||
{import_constants(c, otree, tmap);}
|
||||
/** Import constants registering their tree indices in the
|
||||
* given tree. The mapping form old tree indices to new ones
|
||||
* is traced in tmap. */
|
||||
void import_constants(const Constants& c, OperationTree& otree, Tintintmap& tmap);
|
||||
/** Implements AtomValues interface. This sets the values to
|
||||
* the evaluation tree EvalTree. */
|
||||
void setValues(EvalTree& et) const;
|
||||
/** This adds a constant with the given tree index. The
|
||||
* constant must be checked previously and asserted that it
|
||||
* does not exist. */
|
||||
void add_constant(int t, double val);
|
||||
/** Returns true if the tree index is either an hardwired
|
||||
* constant (initial number OperationTree:num_constants in
|
||||
* OperationTree) or the tree index is a registered constant
|
||||
* by add_constant method. */
|
||||
bool is_constant(int t) const;
|
||||
double get_constant_value(int t) const;
|
||||
/** Return -1 if the given string representation of a constant
|
||||
* is not among the constants (double represenations). If it
|
||||
* is, its tree index is returned. */
|
||||
int check(const char* str) const;
|
||||
/** Debug print. */
|
||||
void print() const;
|
||||
const Tconstantmap& get_constantmap() const
|
||||
{return cmap;}
|
||||
private:
|
||||
/** Inverse map to Tconstantmap. */
|
||||
typedef map<double,int> Tconstantinvmap;
|
||||
/** This is an inverse map to cmap. This is only used for fast
|
||||
* queries for the existing double constants in check
|
||||
* method and add_constant. */
|
||||
Tconstantinvmap cinvmap;
|
||||
};
|
||||
|
||||
/** This class is a parent to Atoms classes which distinguish between
|
||||
* constants (numerical literals), and variables with lags and
|
||||
* leads. This abstraction does not distinguish between a parameter
|
||||
* and a variable without lag or lead. In this sense, everything is a
|
||||
* variable.*/
|
||||
class DynamicAtoms : public Atoms, public Constants {
|
||||
public:
|
||||
/** Definition of a type mapping lags to the indices of the variables. */
|
||||
typedef map<int,int> Tlagmap;
|
||||
protected:
|
||||
/** Definition of a type mapping names of the atoms to Tlagmap. */
|
||||
typedef map<const char*, Tlagmap, ltstr> Tvarmap;
|
||||
/** Definition of a type mapping indices of variables to the variable names. */
|
||||
typedef map<int, const char*> Tindexmap;
|
||||
/** This is just a storage for variable names, since all other
|
||||
* instances of a variable name just point to the memory
|
||||
* allocated by this object. */
|
||||
NameStorage varnames;
|
||||
/** This is the map for variables. Each variable name is
|
||||
* mapped to the Tlagmap, which maps lags/leads to the nulary
|
||||
* term indices in the tree. */
|
||||
Tvarmap vars;
|
||||
/** This is almost inverse map to the vars. It maps variable
|
||||
* indices to the names. A returned name can be in turn used
|
||||
* as a key in vars. */
|
||||
Tindexmap indices;
|
||||
|
||||
/** Number of variables. */
|
||||
int nv;
|
||||
/** Minimum lag, if there is at least one lag, than this is a negative number. */
|
||||
int minlag;
|
||||
/** Maximum lead, if there is at least one lead, than this is a positive number. */
|
||||
int maxlead;
|
||||
public:
|
||||
/** Construct empty DynamicAtoms. */
|
||||
DynamicAtoms();
|
||||
DynamicAtoms(const DynamicAtoms& da);
|
||||
virtual ~DynamicAtoms() {}
|
||||
/** Check the nulary term identified by its string
|
||||
* representation. The nulary term can be either a constant or
|
||||
* a variable. If constant, -1 is returned so that it could be
|
||||
* assigned regardless if the same constant has already
|
||||
* appeared or not. If variable, then -1 is returned only if
|
||||
* the variable has not been assigned an index, otherwise the
|
||||
* assigned index is returned. */
|
||||
int check(const char* name) const;
|
||||
/** Assign the nulary term identified by its string
|
||||
* representation. This method should be called when check()
|
||||
* returns -1. */
|
||||
void assign(const char* name, int t);
|
||||
/** Return a number of all variables. */
|
||||
int nvar() const
|
||||
{ return nv; }
|
||||
/** Return the vector of variable indices. */
|
||||
vector<int> variables() const;
|
||||
/** Return max lead and min lag for a variable given by the
|
||||
* index. If a variable cannot be found, the method retursn
|
||||
* the smallest integer as maxlead and the largest integer as
|
||||
* minlag. */
|
||||
void varspan(int t, int& mlead, int& mlag) const;
|
||||
/** Return max lead and min lag for a variable given by the
|
||||
* name (without lead, lag). The same is valid if the variable
|
||||
* name cannot be found. */
|
||||
void varspan(const char* name, int& mlead, int& mlag) const;
|
||||
/** Return max lead and min lag for a vector of variables given by the names. */
|
||||
void varspan(const vector<const char*>& names, int& mlead, int& mlag) const;
|
||||
/** Return true for all tree indices corresponding to a
|
||||
* variable in the sense of this class. (This is parameters,
|
||||
* exo and endo). Since the semantics of 'variable' will be
|
||||
* changed in subclasses, we use name 'named atom'. These are
|
||||
* all atoms but constants. */
|
||||
bool is_named_atom(int t) const;
|
||||
/** Return index of the variable described by the variable
|
||||
* name and lag/lead. If it doesn't exist, return -1. */
|
||||
int index(const char* name, int ll) const;
|
||||
/** Return the lag map for the variable name. */
|
||||
const Tlagmap& lagmap(const char* name) const;
|
||||
/** Return the variable name for the tree index. It throws an
|
||||
* exception if the tree index t is not a named atom. */
|
||||
const char* name(int t) const;
|
||||
/** Return the lead/lag for the tree index. It throws an
|
||||
* exception if the tree index t is not a named atom. */
|
||||
int lead(int t) const;
|
||||
/** Return maximum lead. */
|
||||
int get_maxlead() const
|
||||
{return maxlead;}
|
||||
/** Return minimum lag. */
|
||||
int get_minlag() const
|
||||
{return minlag;}
|
||||
/** Return the name storage to allow querying to other
|
||||
* classes. */
|
||||
const NameStorage& get_name_storage() const
|
||||
{return varnames;}
|
||||
/** Assign the variable with a given lead. The varname must be
|
||||
* from the varnames storage. The method checks if the
|
||||
* variable iwht the given lead/lag is not assigned. If so, an
|
||||
* exception is thrown. */
|
||||
void assign_variable(const char* varname, int ll, int t);
|
||||
/** Unassign the variable with a given lead and given tree
|
||||
* index. The tree index is only provided as a check. An
|
||||
* exception is thrown if the name, ll, and the tree index t
|
||||
* are not consistent. The method also updates nv, indices,
|
||||
* maxlead and minlag. The varname must be from the varnames
|
||||
* storage. */
|
||||
void unassign_variable(const char* varname, int ll, int t);
|
||||
/** Debug print. */
|
||||
void print() const;
|
||||
protected:
|
||||
/** Do the check for the variable. A subclass may need to
|
||||
* reimplement this so that it could raise an error if the
|
||||
* variable is not among a given list. */
|
||||
virtual int check_variable(const char* name) const;
|
||||
/** Assign the constant. */
|
||||
void assign_constant(const char* name, int t);
|
||||
/** Assign the variable. */
|
||||
void assign_variable(const char* name, int t);
|
||||
/** The method just updates minlag or/and maxlead. Note that
|
||||
* when assigning variables, the update is done when inserting
|
||||
* to the maps, however, if removing a variable, we need to
|
||||
* call this method. */
|
||||
void update_minmaxll();
|
||||
/** The method parses the string to recover a variable name
|
||||
* and lag/lead ll. The variable name doesn't contain a lead/lag. */
|
||||
virtual void parse_variable(const char* in, string& out, int& ll) const = 0;
|
||||
public:
|
||||
/** Return true if the str represents a double.*/
|
||||
static bool is_string_constant(const char* str);
|
||||
};
|
||||
|
||||
|
||||
/** This class is a parent of all orderings of the dynamic atoms
|
||||
* of variables which can appear before t, at t, or after t. It
|
||||
* encapsulates the ordering, and the information about the number
|
||||
* of static (appearing only at time t) predetermined (appearing
|
||||
* before t and possibly at t), both (appearing before t and after
|
||||
* t and possibly at t) and forward looking (appearing after t and
|
||||
* possibly at t).
|
||||
*
|
||||
* The constructor takes a list of variable names. The class also
|
||||
* provides mapping from the ordering of the variables in the list
|
||||
* (outer) to the new ordering (at time t) and back.
|
||||
*
|
||||
* The user of the subclass must call do_ordering() after
|
||||
* initialization.
|
||||
*
|
||||
* The class contains a few preimplemented methods for
|
||||
* ordering. The class is used in this way: Make a subclass, and
|
||||
* implement pure virtual do_ordering() by just plugging a
|
||||
* preimplemented method, or plugging your own implementation. The
|
||||
* method do_ordering() is called by the user after the constructor.
|
||||
*/
|
||||
class VarOrdering {
|
||||
protected:
|
||||
/** Number of static variables. */
|
||||
int n_stat;
|
||||
/** Number of predetermined variables. */
|
||||
int n_pred;
|
||||
/** Number of both variables. */
|
||||
int n_both;
|
||||
/** Number of forward looking variables. */
|
||||
int n_forw;
|
||||
/** This is a set of tree indices corresponding to the
|
||||
* variables at all times as they occur in the formulas. In
|
||||
* fact, since this is used only for derivatives, the ordering
|
||||
* of this vector is only important for ordering of the
|
||||
* derivatives, in other contexts the ordering is not
|
||||
* important, so it is rather a set of indices.*/
|
||||
vector<int> der_atoms;
|
||||
/** This maps tree index of the variable to the position in
|
||||
* the row of the ordering. One should be careful with making
|
||||
* space in the positions for variables not appearing at time
|
||||
* t. For instance in the pred(t-1), both(t-1), stat(t),
|
||||
* pred(t), both(t), forw(t), both(t+1), forw(t+1) ordering,
|
||||
* the variables x(t-1), y(t-1), x(t+1), z(t-1), z(t), and
|
||||
* z(t+1) having tree indices 6,5,4,3,2,1 will be ordered as
|
||||
* follows: y(t-1), x(t-1), z(t-1), [y(t)], [x(t)], z(t),
|
||||
* x(t+1), where a bracketed expresion means non-existent by
|
||||
* occupying a space. The map thus will look as follows:
|
||||
* {5->0, 6->1, 3->2, 2->5, 3->6}. Note that nothing is mapped
|
||||
* to positions 3 and 4. */
|
||||
map<int,int> positions;
|
||||
/** This maps an ordering of the list of variables in
|
||||
* constructor to the new ordering (at time t). The length is
|
||||
* the number of variables. */
|
||||
vector<int> outer2y;
|
||||
/** This maps a new ordering to the ordering of the list of
|
||||
* variables in constructor (at time t). The length is the
|
||||
* number of variables. */
|
||||
vector<int> y2outer;
|
||||
/** This is just a reference for variable names to keep it
|
||||
* from constructor to do_ordering() implementations. */
|
||||
const vector<const char*>& varnames;
|
||||
/** This is just a reference to atoms to keep it from
|
||||
* constructor to do_ordering() implementations. */
|
||||
const DynamicAtoms& atoms;
|
||||
public:
|
||||
/** This is an enum type for an ordering type implemented by
|
||||
* do_general. */
|
||||
enum ord_type {pbspbfbf, bfspbfpb};
|
||||
/** Construct the ordering of the variables given by the names
|
||||
* with their dynamic occurrences defined by the atoms. It
|
||||
* calls the virtual method do_ordering which can be
|
||||
* reimplemented. */
|
||||
VarOrdering(const vector<const char*>& vnames, const DynamicAtoms& a)
|
||||
: n_stat(0), n_pred(0), n_both(0), n_forw(0), varnames(vnames), atoms(a)
|
||||
{}
|
||||
VarOrdering(const VarOrdering& vo, const vector<const char*>& vnames,
|
||||
const DynamicAtoms& a);
|
||||
virtual VarOrdering* clone(const vector<const char*>& vnames,
|
||||
const DynamicAtoms& a) const = 0;
|
||||
/** Destructor does nothing here. */
|
||||
virtual ~VarOrdering() {}
|
||||
/** This is the method setting the ordering and the map. A
|
||||
* subclass must reimplement it, possibly using a
|
||||
* preimplemented ordering. This method must be called by the
|
||||
* user after the class has been created. */
|
||||
virtual void do_ordering() = 0;
|
||||
/** Return number of static. */
|
||||
int nstat() const
|
||||
{return n_stat;}
|
||||
/** Return number of predetermined. */
|
||||
int npred() const
|
||||
{return n_pred;}
|
||||
/** Return number of both. */
|
||||
int nboth() const
|
||||
{return n_both;}
|
||||
/** Return number of forward looking. */
|
||||
int nforw() const
|
||||
{return n_forw;}
|
||||
/** Return the set of tree indices for derivatives. */
|
||||
const vector<int>& get_der_atoms() const
|
||||
{return der_atoms;}
|
||||
/** Return the y2outer. */
|
||||
const vector<int>& get_y2outer() const
|
||||
{return y2outer;}
|
||||
/** Return the outer2y. */
|
||||
const vector<int>& get_outer2y() const
|
||||
{return outer2y;}
|
||||
/** Query the atom given by the tree index. True is returned
|
||||
* if the atom is one of the variables in the object. */
|
||||
bool check(int t) const;
|
||||
/** Return the position of the atom (nulary term) given by a
|
||||
* tree index. It is a lookup to the map. If the atom cannot
|
||||
* be found, the exception is raised. */
|
||||
int get_pos_of(int t) const;
|
||||
/** This returns a length of ordered row of atoms. In all
|
||||
* cases so far, it does not depend on the ordering and it is
|
||||
* as follows. */
|
||||
int length() const
|
||||
{return n_stat+2*n_pred+3*n_both+2*n_forw;}
|
||||
/** Debug print. */
|
||||
void print() const;
|
||||
protected:
|
||||
/** This is a general ordering method which orders the
|
||||
* variables by the given ordering ord_type. See documentation
|
||||
* for respective do_ methods. */
|
||||
void do_general(ord_type ordering);
|
||||
/** This is a preimplemented ordering for do_ordering()
|
||||
* method. It assumes that the variables appear only at time
|
||||
* t-1, t, t+1. It orders the atoms as pred(t-1), both(t-1),
|
||||
* stat(t), pred(t), both(t), forw(t), both(t+1),
|
||||
* forw(t+1). It builds the der_atoms, the map of positions,
|
||||
* as well as y2outer and outer2y. */
|
||||
void do_pbspbfbf()
|
||||
{do_general(pbspbfbf);}
|
||||
/** This is a preimplemented ordering for do_ordering()
|
||||
* method. It assumes that the variables appear only at time
|
||||
* t-1, t, t+1. It orders the atoms as both(t+1), forw(t+1),
|
||||
* stat(t), pred(t), both(t), forw(t), pred(t-1),
|
||||
* both(t-1). It builds the der_atoms, the map of positions,
|
||||
* as well as y2outer and outer2y. */
|
||||
void do_bfspbfpb()
|
||||
{do_general(bfspbfpb);}
|
||||
/** This is a preimplemented ordering for do_ordering()
|
||||
* method. It makes no assumptions about occurences of
|
||||
* variables at different times. It orders the atoms with
|
||||
* increasing time keeping the given ordering within one
|
||||
* time. This implies that y2outer and outer2y will be
|
||||
* identities. The der_atoms will be just a sequence of atoms
|
||||
* from the least to the most time preserving the order of atoms
|
||||
* within one time. */
|
||||
void do_increasing_time();
|
||||
private:
|
||||
/** Declare this copy constructor as private to hide it. */
|
||||
VarOrdering(const VarOrdering& vo);
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,482 @@
|
|||
// Copyright (C) 2005, Ondra Kamenik
|
||||
|
||||
// $Id: fine_atoms.cpp 1759 2008-03-31 14:25:20Z kamenik $
|
||||
|
||||
#include "utils/cc/exception.h"
|
||||
|
||||
#include "parser_exception.h"
|
||||
#include "fine_atoms.h"
|
||||
|
||||
using namespace ogp;
|
||||
|
||||
AllvarOuterOrdering::AllvarOuterOrdering(const vector<const char*>& allvar_outer,
|
||||
const FineAtoms& a)
|
||||
: atoms(a), allvar(),
|
||||
endo2all(a.get_endovars().size(), -1),
|
||||
exo2all(a.get_exovars().size(), -1)
|
||||
{
|
||||
// fill in the allvar from allvar_outer
|
||||
for (unsigned int i = 0; i < allvar_outer.size(); i++) {
|
||||
const char* s = atoms.varnames.query(allvar_outer[i]);
|
||||
if (s)
|
||||
allvar.push_back(s);
|
||||
else
|
||||
throw ogu::Exception(__FILE__, __LINE__,
|
||||
string("Variable ") + allvar_outer[i] + " is not a declared symbol in AllvarOuterOrdering constructor");
|
||||
}
|
||||
|
||||
// fill in endo2all and exo2all
|
||||
for (unsigned int i = 0; i < allvar.size(); i++) {
|
||||
Tvarintmap::const_iterator it = atoms.endo_outer_map.find(allvar[i]);
|
||||
if (it != atoms.endo_outer_map.end())
|
||||
endo2all[(*it).second] = i;
|
||||
else {
|
||||
it = atoms.exo_outer_map.find(allvar[i]);
|
||||
if (it != atoms.exo_outer_map.end())
|
||||
exo2all[(*it).second] = i;
|
||||
else
|
||||
throw ogu::Exception(__FILE__, __LINE__,
|
||||
string("Name ") + allvar[i] + " is neither endogenous nor exogenous variable in AllvarOuterOrdering constructor");
|
||||
}
|
||||
}
|
||||
|
||||
// check whether everything has been filled
|
||||
unsigned int iendo = 0;
|
||||
while (iendo < endo2all.size() && endo2all[iendo] != -1) iendo++;
|
||||
unsigned int iexo = 0;
|
||||
while (iexo < exo2all.size() && exo2all[iexo] != -1) iexo++;
|
||||
if (iendo < endo2all.size())
|
||||
throw ogu::Exception(__FILE__, __LINE__,
|
||||
string("Endogenous variable ") + atoms.get_endovars()[iendo] +
|
||||
" not found in outer all ordering in AllvarOuterOrdering constructor");
|
||||
if (iexo < exo2all.size())
|
||||
throw ogu::Exception(__FILE__, __LINE__,
|
||||
string("Exogenous variable ") + atoms.get_exovars()[iexo] +
|
||||
" not found in outer all ordering in AllvarOuterOrdering constructor");
|
||||
}
|
||||
|
||||
AllvarOuterOrdering::AllvarOuterOrdering(const AllvarOuterOrdering& avo,
|
||||
const FineAtoms& a)
|
||||
: atoms(a), allvar(),
|
||||
endo2all(avo.endo2all),
|
||||
exo2all(avo.exo2all)
|
||||
{
|
||||
// fill in the allvar from avo.allvar
|
||||
for (unsigned int i = 0; i < avo.allvar.size(); i++) {
|
||||
const char* s = atoms.varnames.query(avo.allvar[i]);
|
||||
allvar.push_back(s);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FineAtoms::FineAtoms(const FineAtoms& fa)
|
||||
: DynamicAtoms(fa), params(), endovars(), exovars(),
|
||||
endo_order(NULL), exo_order(NULL), allvar_order(NULL),
|
||||
der_atoms(fa.der_atoms),
|
||||
endo_atoms_map(fa.endo_atoms_map),
|
||||
exo_atoms_map(fa.exo_atoms_map)
|
||||
{
|
||||
// fill in params
|
||||
for (unsigned int i = 0; i < fa.params.size(); i++) {
|
||||
const char* s = varnames.query(fa.params[i]);
|
||||
if (! s)
|
||||
throw ogu::Exception(__FILE__, __LINE__,
|
||||
string("Parameter ") + fa.params[i] + " does not exist in FineAtoms copy cosntructor");
|
||||
params.push_back(s);
|
||||
param_outer_map.insert(Tvarintmap::value_type(s, params.size()-1));
|
||||
}
|
||||
// fill in endovars
|
||||
for (unsigned int i = 0; i < fa.endovars.size(); i++) {
|
||||
const char* s = varnames.query(fa.endovars[i]);
|
||||
if (! s)
|
||||
throw ogu::Exception(__FILE__, __LINE__,
|
||||
string("Endo variable ") + fa.endovars[i] + " does not exist in FineAtoms copy constructor");
|
||||
endovars.push_back(s);
|
||||
endo_outer_map.insert(Tvarintmap::value_type(s, endovars.size()-1));
|
||||
}
|
||||
// fill in exovars
|
||||
for (unsigned int i = 0; i < fa.exovars.size(); i++) {
|
||||
const char* s = varnames.query(fa.exovars[i]);
|
||||
if (! s)
|
||||
throw ogu::Exception(__FILE__, __LINE__,
|
||||
string("Exo variable ") + fa.exovars[i] + " does not exist in FineAtoms copy cosntructor");
|
||||
exovars.push_back(s);
|
||||
exo_outer_map.insert(Tvarintmap::value_type(s, exovars.size()-1));
|
||||
}
|
||||
|
||||
if (fa.endo_order)
|
||||
endo_order = fa.endo_order->clone(endovars, *this);
|
||||
|
||||
if (fa.exo_order)
|
||||
exo_order = fa.exo_order->clone(exovars, *this);
|
||||
|
||||
if (fa.allvar_order)
|
||||
allvar_order = new AllvarOuterOrdering(*(fa.allvar_order), *this);
|
||||
}
|
||||
|
||||
int FineAtoms::check_variable(const char* name) const
|
||||
{
|
||||
string str;
|
||||
int ll;
|
||||
parse_variable(name, str, ll);
|
||||
if (varnames.query(str.c_str()))
|
||||
return DynamicAtoms::check_variable(name);
|
||||
else {
|
||||
throw ParserException(string("Variable <")+str+"> not declared.",0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int FineAtoms::num_exo_periods() const
|
||||
{
|
||||
int mlead, mlag;
|
||||
exovarspan(mlead, mlag);
|
||||
return mlead-mlag+1;
|
||||
}
|
||||
|
||||
void FineAtoms::parsing_finished(VarOrdering::ord_type ot)
|
||||
{
|
||||
make_internal_orderings(ot);
|
||||
|
||||
// by default, concatenate outer endo and outer exo and make it as
|
||||
// allvar outer:
|
||||
vector<const char*> allvar_tmp;
|
||||
allvar_tmp.insert(allvar_tmp.end(), endovars.begin(), endovars.end());
|
||||
allvar_tmp.insert(allvar_tmp.end(), exovars.begin(), exovars.end());
|
||||
|
||||
if (allvar_order)
|
||||
delete allvar_order;
|
||||
allvar_order = new AllvarOuterOrdering(allvar_tmp, *this);
|
||||
}
|
||||
|
||||
void FineAtoms::parsing_finished(VarOrdering::ord_type ot,
|
||||
const vector<const char*> allvar)
|
||||
{
|
||||
make_internal_orderings(ot);
|
||||
if (allvar_order)
|
||||
delete allvar_order;
|
||||
allvar_order = new AllvarOuterOrdering(allvar, *this);
|
||||
}
|
||||
|
||||
const vector<const char*>& FineAtoms::get_allvar() const
|
||||
{
|
||||
if (! allvar_order)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::get_allvars called before parsing_finished");
|
||||
|
||||
return allvar_order->get_allvar();
|
||||
}
|
||||
|
||||
const vector<int>& FineAtoms::outer_endo2all() const
|
||||
{
|
||||
if (! allvar_order)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::outer_endo2all called before parsing_finished");
|
||||
|
||||
return allvar_order->get_endo2all();
|
||||
}
|
||||
|
||||
const vector<int>& FineAtoms::outer_exo2all() const
|
||||
{
|
||||
if (! allvar_order)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::outer_exo2all called before parsing_finished");
|
||||
|
||||
return allvar_order->get_exo2all();
|
||||
}
|
||||
|
||||
|
||||
vector<int> FineAtoms::variables() const
|
||||
{
|
||||
if (endo_order) {
|
||||
return der_atoms;
|
||||
} else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::variables called before parsing_finished");
|
||||
return vector<int>();
|
||||
}
|
||||
}
|
||||
|
||||
int FineAtoms::nstat() const
|
||||
{
|
||||
if (endo_order) {
|
||||
return endo_order->nstat();
|
||||
} else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::nstat called before parsing_finished");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int FineAtoms::npred() const
|
||||
{
|
||||
if (endo_order) {
|
||||
return endo_order->npred();
|
||||
} else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::npred called before parsing_finished");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int FineAtoms::nboth() const
|
||||
{
|
||||
if (endo_order) {
|
||||
return endo_order->nboth();
|
||||
} else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::nboth called before parsing_finished");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int FineAtoms::nforw() const
|
||||
{
|
||||
if (endo_order) {
|
||||
return endo_order->nforw();
|
||||
} else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::nforw called before parsing_finished");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int FineAtoms::get_pos_of_endo(int t) const
|
||||
{
|
||||
if (endo_order) {
|
||||
return endo_order->get_pos_of(t);
|
||||
} else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::get_pos_of_endo called before parsing_finished");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int FineAtoms::get_pos_of_exo(int t) const
|
||||
{
|
||||
if (exo_order) {
|
||||
return exo_order->get_pos_of(t);
|
||||
} else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::get_pos_of_exo called before parsing_finished");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int FineAtoms::get_pos_of_all(int t) const
|
||||
{
|
||||
if (endo_order && exo_order) {
|
||||
if (endo_order->check(t))
|
||||
return endo_order->get_pos_of(t);
|
||||
else if (exo_order->check(t))
|
||||
return endo_order->length() + exo_order->get_pos_of(t);
|
||||
else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Atom is not endo nor exo in FineAtoms::get_pos_of_all");
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::get_pos_of_exo called before parsing_finished");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
const vector<int>& FineAtoms::y2outer_endo() const
|
||||
{
|
||||
if (! endo_order)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::y2outer_endo called before parsing_finished");
|
||||
return endo_order->get_y2outer();
|
||||
}
|
||||
|
||||
const vector<int>& FineAtoms::outer2y_endo() const
|
||||
{
|
||||
if (! endo_order)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::outer2y_endo called before parsing_finished");
|
||||
return endo_order->get_outer2y();
|
||||
}
|
||||
|
||||
const vector<int>& FineAtoms::y2outer_exo() const
|
||||
{
|
||||
if (! exo_order)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::y2outer_endo called before parsing_finished");
|
||||
return exo_order->get_y2outer();
|
||||
}
|
||||
|
||||
const vector<int>& FineAtoms::outer2y_exo() const
|
||||
{
|
||||
if (! exo_order)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::outer2y_exo called before parsing_finished");
|
||||
return exo_order->get_outer2y();
|
||||
}
|
||||
|
||||
const vector<int>& FineAtoms::get_endo_atoms_map() const
|
||||
{
|
||||
if (! endo_order)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::get_endo_atoms_map called before parsing_finished");
|
||||
return endo_atoms_map;
|
||||
}
|
||||
|
||||
const vector<int>& FineAtoms::get_exo_atoms_map() const
|
||||
{
|
||||
if (! exo_order)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::get_exo_atoms_map called before parsing_finished");
|
||||
return exo_atoms_map;
|
||||
}
|
||||
|
||||
int FineAtoms::name2outer_param(const char* name) const
|
||||
{
|
||||
Tvarintmap::const_iterator it = param_outer_map.find(name);
|
||||
if (it == param_outer_map.end())
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Name is not a parameter in FineAtoms::name2outer_param");
|
||||
return (*it).second;
|
||||
}
|
||||
|
||||
int FineAtoms::name2outer_endo(const char* name) const
|
||||
{
|
||||
Tvarintmap::const_iterator it = endo_outer_map.find(name);
|
||||
if (it == endo_outer_map.end())
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Name is not an endogenous variable in FineAtoms::name2outer_endo");
|
||||
return (*it).second;
|
||||
}
|
||||
|
||||
int FineAtoms::name2outer_exo(const char* name) const
|
||||
{
|
||||
Tvarintmap::const_iterator it = exo_outer_map.find(name);
|
||||
if (it == exo_outer_map.end())
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Name is not an exogenous variable in FineAtoms::name2outer_exo");
|
||||
return (*it).second;
|
||||
}
|
||||
|
||||
int FineAtoms::name2outer_allvar(const char* name) const
|
||||
{
|
||||
if (! allvar_order)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"FineAtoms::name2outer_allvar called beore parsing_finished");
|
||||
|
||||
Tvarintmap::const_iterator it = endo_outer_map.find(name);
|
||||
if (it != endo_outer_map.end())
|
||||
return allvar_order->get_endo2all()[(*it).second];
|
||||
else {
|
||||
it = exo_outer_map.find(name);
|
||||
if (it != exo_outer_map.end())
|
||||
return allvar_order->get_exo2all()[(*it).second];
|
||||
}
|
||||
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
string("Name ") + name + " is neither endo nor exo variable in FineAtoms::name2outer_allvar");
|
||||
return -1;
|
||||
}
|
||||
|
||||
void FineAtoms::register_uniq_endo(const char* name)
|
||||
{
|
||||
if (varnames.query(name))
|
||||
throw ogp::ParserException(string("Endogenous variable <")+name+"> is not unique.",0);
|
||||
const char* ss = varnames.insert(name);
|
||||
endovars.push_back(ss);
|
||||
endo_outer_map.insert(Tvarintmap::value_type(ss, endovars.size()-1));
|
||||
}
|
||||
|
||||
void FineAtoms::register_uniq_exo(const char* name)
|
||||
{
|
||||
if (varnames.query(name))
|
||||
throw ogp::ParserException(string("Exogenous variable <")+name+"> is not unique.",0);
|
||||
const char* ss = varnames.insert(name);
|
||||
exovars.push_back(ss);
|
||||
exo_outer_map.insert(Tvarintmap::value_type(ss, exovars.size()-1));
|
||||
}
|
||||
|
||||
void FineAtoms::register_uniq_param(const char* name)
|
||||
{
|
||||
if (varnames.query(name))
|
||||
throw ogp::ParserException(string("Parameter <")+name+"> is not unique.",0);
|
||||
const char* ss = varnames.insert(name);
|
||||
params.push_back(ss);
|
||||
param_outer_map.insert(Tvarintmap::value_type(ss, params.size()-1));
|
||||
}
|
||||
|
||||
void FineAtoms::make_internal_orderings(VarOrdering::ord_type ot)
|
||||
{
|
||||
bool endo_ordering_done = false;
|
||||
bool exo_ordering_done = false;
|
||||
|
||||
order_type = ot;
|
||||
|
||||
int mlead, mlag;
|
||||
endovarspan(mlead, mlag);
|
||||
if (mlag >= -1 && mlead <= 1) {
|
||||
// make endo ordering
|
||||
if (endo_order)
|
||||
delete endo_order;
|
||||
if (ot == VarOrdering::pbspbfbf)
|
||||
endo_order = new EndoVarOrdering1(endovars, *this);
|
||||
else
|
||||
endo_order = new EndoVarOrdering2(endovars, *this);
|
||||
endo_order->do_ordering();
|
||||
endo_ordering_done = true;
|
||||
}
|
||||
|
||||
exovarspan(mlead, mlag);
|
||||
if (mlag == 0 && mlead == 0) {
|
||||
// make exo ordering
|
||||
if (exo_order)
|
||||
delete exo_order;
|
||||
exo_order = new ExoVarOrdering(exovars, *this);
|
||||
exo_order->do_ordering();
|
||||
exo_ordering_done = true;
|
||||
}
|
||||
|
||||
if (endo_ordering_done && exo_ordering_done) {
|
||||
// concatenate der atoms from endo_order and exo_order
|
||||
der_atoms.clear();
|
||||
der_atoms.insert(der_atoms.end(),
|
||||
endo_order->get_der_atoms().begin(),
|
||||
endo_order->get_der_atoms().end());
|
||||
der_atoms.insert(der_atoms.end(),
|
||||
exo_order->get_der_atoms().begin(),
|
||||
exo_order->get_der_atoms().end());
|
||||
|
||||
// create endo_atoms_map; der_atoms is a concatenation, so it is easy
|
||||
int endo_atoms = endo_order->get_der_atoms().size();
|
||||
endo_atoms_map.clear();
|
||||
for (int i = 0; i < endo_atoms; i++)
|
||||
endo_atoms_map.push_back(i);
|
||||
// create exo_atoms_map
|
||||
int exo_atoms = exo_order->get_der_atoms().size();
|
||||
exo_atoms_map.clear();
|
||||
for (int i = 0; i < exo_atoms; i++)
|
||||
exo_atoms_map.push_back(endo_atoms + i);
|
||||
}
|
||||
}
|
||||
|
||||
void FineAtoms::print() const
|
||||
{
|
||||
DynamicAtoms::print();
|
||||
if (endo_order) {
|
||||
printf("Endo ordering:\n");
|
||||
endo_order->print();
|
||||
} else {
|
||||
printf("Endo ordering not created.\n");
|
||||
}
|
||||
if (exo_order) {
|
||||
printf("Exo ordering:\n");
|
||||
exo_order->print();
|
||||
} else {
|
||||
printf("Exo ordering not created.\n");
|
||||
}
|
||||
printf("endo atoms map:\n");
|
||||
for (unsigned int i = 0; i < endo_atoms_map.size(); i++)
|
||||
printf("%d --> %d\n", i, endo_atoms_map[i]);
|
||||
printf("exo atoms map:\n");
|
||||
for (unsigned int i = 0; i < exo_atoms_map.size(); i++)
|
||||
printf("%d --> %d\n", i, exo_atoms_map[i]);
|
||||
}
|
|
@ -0,0 +1,350 @@
|
|||
// Copyright (C) 2005, Ondra Kamenik
|
||||
|
||||
// $Id: fine_atoms.h 1759 2008-03-31 14:25:20Z kamenik $
|
||||
|
||||
#ifndef OGP_FINE_ATOMS_H
|
||||
#define OGP_FINE_ATOMS_H
|
||||
|
||||
#include "dynamic_atoms.h"
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
namespace ogp {
|
||||
|
||||
using std::vector;
|
||||
using std::string;
|
||||
|
||||
/** This is just ordering used for endogenous variables. It
|
||||
* assumes that we have only time t-1, t, and t+1, orders them as
|
||||
* pred(t-1), both(t-1), stat(t), pred(t), both(t), forw(t),
|
||||
* both(t+1), forw(t+1). */
|
||||
class EndoVarOrdering1 : public VarOrdering {
|
||||
public:
|
||||
EndoVarOrdering1(const vector<const char*>& vnames, const DynamicAtoms& a)
|
||||
: VarOrdering(vnames, a) {}
|
||||
EndoVarOrdering1(const EndoVarOrdering1& vo, const vector<const char*>& vnames,
|
||||
const DynamicAtoms& a)
|
||||
: VarOrdering(vo, vnames, a) {}
|
||||
VarOrdering* clone(const vector<const char*>& vnames, const DynamicAtoms& a) const
|
||||
{return new EndoVarOrdering1(*this, vnames, a);}
|
||||
void do_ordering()
|
||||
{do_pbspbfbf();}
|
||||
};
|
||||
|
||||
/** This is just another ordering used for endogenous
|
||||
* variables. It assumes that we have only time t-1, t, and t+1,
|
||||
* orders them as both(t+1), forw(t+1), pred(t-1), both(t-1),
|
||||
* stat(t), pred(t), both(t), forw(t). */
|
||||
class EndoVarOrdering2 : public VarOrdering {
|
||||
public:
|
||||
EndoVarOrdering2(const vector<const char*>& vnames, const DynamicAtoms& a)
|
||||
: VarOrdering(vnames, a) {}
|
||||
EndoVarOrdering2(const EndoVarOrdering2& vo, const vector<const char*>& vnames,
|
||||
const DynamicAtoms& a)
|
||||
: VarOrdering(vo, vnames, a) {}
|
||||
VarOrdering* clone(const vector<const char*>& vnames, const DynamicAtoms& a) const
|
||||
{return new EndoVarOrdering2(*this, vnames, a);}
|
||||
void do_ordering()
|
||||
{do_bfspbfpb();}
|
||||
};
|
||||
|
||||
/** This is just ordering used for exogenous variables. It makes
|
||||
* no assumptions about their timing. It orders them from the
|
||||
* least time to the latest time. */
|
||||
class ExoVarOrdering : public VarOrdering {
|
||||
public:
|
||||
ExoVarOrdering(const vector<const char*>& vnames, const DynamicAtoms& a)
|
||||
: VarOrdering(vnames, a) {}
|
||||
ExoVarOrdering(const ExoVarOrdering& vo, const vector<const char*>& vnames,
|
||||
const DynamicAtoms& a)
|
||||
: VarOrdering(vo, vnames, a) {}
|
||||
VarOrdering* clone(const vector<const char*>& vnames, const DynamicAtoms& a) const
|
||||
{return new ExoVarOrdering(*this, vnames, a);}
|
||||
void do_ordering()
|
||||
{do_increasing_time();}
|
||||
};
|
||||
|
||||
class FineAtoms;
|
||||
|
||||
/** This class provides an outer ordering of all variables (endo
|
||||
* and exo). It maps the ordering to the particular outer
|
||||
* orderings of endo and exo. It works tightly with the FineAtoms
|
||||
* class. */
|
||||
class AllvarOuterOrdering {
|
||||
protected:
|
||||
/** Type for a map mapping a variable name to an integer. */
|
||||
typedef map<const char*, int, ltstr> Tvarintmap;
|
||||
/** Reference to atoms. */
|
||||
const FineAtoms& atoms;
|
||||
/** The vector of all endo and exo variables in outer
|
||||
* ordering. The pointers point to storage in atoms. */
|
||||
vector<const char*> allvar;
|
||||
/** The mapping from outer endogenous to outer all. For
|
||||
* example endo2all[0] is the order of the first outer
|
||||
* endogenous variable in the allvar ordering. */
|
||||
vector<int> endo2all;
|
||||
/** The mapping from outer exogenous to outer all. For example
|
||||
* exo2all[0] is the order of the first outer exogenous
|
||||
* variables in the allvar ordering. */
|
||||
vector<int> exo2all;
|
||||
public:
|
||||
/** Construct the allvar outer ordering from the provided
|
||||
* sequence of endo and exo names. The names can have an
|
||||
* arbitrary storage, the storage is transformed to the atoms
|
||||
* storage. An exception is thrown if either the list is not
|
||||
* exhaustive, or some string is not a variable. */
|
||||
AllvarOuterOrdering(const vector<const char*>& allvar_outer, const FineAtoms& a);
|
||||
/** Copy constructor using the storage of provided atoms. */
|
||||
AllvarOuterOrdering(const AllvarOuterOrdering& allvar_outer, const FineAtoms& a);
|
||||
/** Return endo2all mapping. */
|
||||
const vector<int>& get_endo2all() const
|
||||
{return endo2all;}
|
||||
/** Return exo2all mapping. */
|
||||
const vector<int>& get_exo2all() const
|
||||
{return exo2all;}
|
||||
/** Return the allvar ordering. */
|
||||
const vector<const char*>& get_allvar() const
|
||||
{return allvar;}
|
||||
};
|
||||
|
||||
/** This class refines the DynamicAtoms by distinguishing among
|
||||
* parameters (no lag and leads) and endogenous and exogenous
|
||||
* variables (with lags and leads). For parameters, endogenous and
|
||||
* exogenous, it defines outer orderings and internal
|
||||
* orderings. The internal orderings are created by
|
||||
* parsing_finished() method when it is sure that no new variables
|
||||
* would be registered. The outer orderings are given by the order
|
||||
* of calls of registering methods.
|
||||
*
|
||||
* In addition, the class also defines outer ordering of
|
||||
* endogenous and exogenous variables. This is input as a
|
||||
* parameter to parsing_finished(). By default, this whole outer
|
||||
* ordering is just a concatenation of outer ordering of
|
||||
* endogenous and exogenous variables.
|
||||
*
|
||||
* The internal ordering of all endo and exo variables is just a
|
||||
* concatenation of endo and exo variables in their internal
|
||||
* orderings. This is the ordering with respect to which all
|
||||
* derivatives are taken. */
|
||||
class FineAtoms : public DynamicAtoms {
|
||||
friend class AllvarOuterOrdering;
|
||||
protected:
|
||||
typedef map<const char*, int, ltstr> Tvarintmap;
|
||||
private:
|
||||
/** The vector of parameters names. The order gives the order
|
||||
* the data is communicated with outside world. */
|
||||
vector<const char*> params;
|
||||
/** A map mapping a name of a parameter to an index in the outer
|
||||
* ordering. */
|
||||
Tvarintmap param_outer_map;
|
||||
/** The vector of endogenous variables. This defines the order
|
||||
* like parameters. */
|
||||
vector<const char*> endovars;
|
||||
/** A map mapping a name of an endogenous variable to an index
|
||||
* in the outer ordering. */
|
||||
Tvarintmap endo_outer_map;
|
||||
/** The vector of exogenous variables. Also defines the order
|
||||
* like parameters and endovars. */
|
||||
vector<const char*> exovars;
|
||||
/** A map mapping a name of an exogenous variable to an index
|
||||
* in the outer ordering. */
|
||||
Tvarintmap exo_outer_map;
|
||||
|
||||
protected:
|
||||
/** This is the internal ordering of all atoms corresponding
|
||||
* to endogenous variables. It is constructed by
|
||||
* parsing_finished() method, which should be called after all
|
||||
* parsing jobs have been finished. */
|
||||
VarOrdering* endo_order;
|
||||
/** This is the internal ordering of all atoms corresponding
|
||||
* to exogenous variables. It has the same handling as
|
||||
* endo_order. */
|
||||
VarOrdering* exo_order;
|
||||
/** This is the all variables outer ordering. It is
|
||||
* constructed by parsing finished. */
|
||||
AllvarOuterOrdering* allvar_order;
|
||||
/** This vector defines a set of atoms as tree indices used
|
||||
* for differentiation. The order of the atoms in this vector
|
||||
* defines ordering of the derivative tensors. The ordering is
|
||||
* a concatenation of atoms from endo_order and then
|
||||
* exo_order. This vector is setup by parsing_finished() and
|
||||
* is returned by variables(). */
|
||||
vector<int> der_atoms;
|
||||
/** This is a mapping from endogenous atoms to all atoms in
|
||||
* der_atoms member. The mapping maps index in endogenous atom
|
||||
* ordering to index (not value) in der_atoms. It is useful if
|
||||
* one wants to evaluate derivatives wrt only endogenous
|
||||
* variables. It is set by parsing_finished(). By definition,
|
||||
* it is monotone. */
|
||||
vector<int> endo_atoms_map;
|
||||
/** This is a mapping from exogenous atoms to all atoms in
|
||||
* der_atoms member. It is the same as endo_atoms_map for
|
||||
* atoms of exogenous variables. */
|
||||
vector<int> exo_atoms_map;
|
||||
public:
|
||||
FineAtoms()
|
||||
: endo_order(NULL), exo_order(NULL), allvar_order(NULL) {}
|
||||
FineAtoms(const FineAtoms& fa);
|
||||
/** Deletes endo_order and exo_order. */
|
||||
virtual ~FineAtoms()
|
||||
{
|
||||
if (endo_order) delete endo_order;
|
||||
if (exo_order) delete exo_order;
|
||||
if (allvar_order) delete allvar_order;
|
||||
}
|
||||
/** Overrides DynamicAtoms::check_variable so that the error
|
||||
* would be raised if the variable name is not declared. A
|
||||
* variable is declared by inserting it to
|
||||
* DynamicAtoms::varnames. This is a responsibility of a
|
||||
* subclass. */
|
||||
int check_variable(const char* name) const;
|
||||
/** This calculates min lag and max lead of endogenous variables. */
|
||||
void endovarspan(int& mlead, int& mlag) const
|
||||
{varspan(endovars, mlead, mlag);}
|
||||
/** This calculates mim lag and max lead of exogenous variables. */
|
||||
void exovarspan(int& mlead, int& mlag) const
|
||||
{varspan(exovars, mlead, mlag);}
|
||||
/** This calculates the number of periods in which at least
|
||||
* one exogenous variable occurs. */
|
||||
int num_exo_periods() const;
|
||||
/** Return an (external) ordering of parameters. */
|
||||
const vector<const char*>& get_params() const
|
||||
{return params;}
|
||||
/** Return an external ordering of endogenous variables. */
|
||||
const vector<const char*>& get_endovars() const
|
||||
{return endovars;}
|
||||
/** Return an external ordering of exogenous variables. */
|
||||
const vector<const char*>& get_exovars() const
|
||||
{return exovars;}
|
||||
/** This constructs internal orderings and makes the indices
|
||||
* returned by variables method available. Further it
|
||||
* constructs outer ordering of all variables by a simple
|
||||
* concatenation of outer endogenous and outer exogenous. In
|
||||
* addition, it makes nstat, npred, nboth, nforw available. */
|
||||
void parsing_finished(VarOrdering::ord_type ot);
|
||||
/** This does the same thing as
|
||||
* parsing_finished(VarOrdering::ord_type) plus it allows for
|
||||
* inputing a different outer ordering of all variables. The
|
||||
* ordering is input as a list of strings, their storage can
|
||||
* be arbitrary. */
|
||||
void parsing_finished(VarOrdering::ord_type ot, const vector<const char*> avo);
|
||||
/** Return the external ordering of all variables (endo and
|
||||
* exo). This is either the second argument to
|
||||
* parsing_finished or the default external ordering. This
|
||||
* must be called only after parsing_finished. */
|
||||
const vector<const char*>& get_allvar() const;
|
||||
/** Return the map from outer ordering of endo variables to
|
||||
* the allvar ordering. This must be called only after
|
||||
* parsing_finished. */
|
||||
const vector<int>& outer_endo2all() const;
|
||||
/** Return the map from outer ordering of exo variables to
|
||||
* the allvar ordering. This must be called only after
|
||||
* parsing_finished. */
|
||||
const vector<int>& outer_exo2all() const;
|
||||
/** Return the atoms with respect to which we are going to
|
||||
* differentiate. This must be called after
|
||||
* parsing_finished. */
|
||||
vector<int> variables() const;
|
||||
/** Return the number of static. */
|
||||
int nstat() const;
|
||||
/** Return the number of predetermined. */
|
||||
int npred() const;
|
||||
/** Return the number of both. */
|
||||
int nboth() const;
|
||||
/** Return the number of forward looking. */
|
||||
int nforw() const;
|
||||
/** Return the index of an endogenous atom given by tree index in
|
||||
* the endo ordering. This must be also called only after
|
||||
* parsing_finished(). */
|
||||
int get_pos_of_endo(int t) const;
|
||||
/** Return the index of an exogenous atom given by tree index in
|
||||
* the exo ordering. This must be also called only after
|
||||
* parsing_finished(). */
|
||||
int get_pos_of_exo(int t) const;
|
||||
/** Return the index of either endogenous or exogenous atom
|
||||
* given by tree index in the concatenated ordering of
|
||||
* endogenous and exogenous atoms. This must be also called
|
||||
* only after parsing_finished(). */
|
||||
int get_pos_of_all(int t) const;
|
||||
/** Return the mapping from endogenous at time t to outer
|
||||
* ordering of endogenous. */
|
||||
const vector<int>& y2outer_endo() const;
|
||||
/** Return the mapping from the outer ordering of endogenous to endogenous
|
||||
* at time t. */
|
||||
const vector<int>& outer2y_endo() const;
|
||||
/** Return the mapping from exogenous at time t to outer
|
||||
* ordering of exogenous. */
|
||||
const vector<int>& y2outer_exo() const;
|
||||
/** Return the mapping from the outer ordering of exogenous to exogenous
|
||||
* at time t. */
|
||||
const vector<int>& outer2y_exo() const;
|
||||
/** Return the endo_atoms_map. */
|
||||
const vector<int>& get_endo_atoms_map() const;
|
||||
/** Return the exo_atoms_map. */
|
||||
const vector<int>& get_exo_atoms_map() const;
|
||||
/** Return an index in the outer ordering of a given
|
||||
* parameter. An exception is thrown if the name is not a
|
||||
* parameter. */
|
||||
int name2outer_param(const char* name) const;
|
||||
/** Return an index in the outer ordering of a given
|
||||
* endogenous variable. An exception is thrown if the name is not a
|
||||
* and endogenous variable. */
|
||||
int name2outer_endo(const char* name) const;
|
||||
/** Return an index in the outer ordering of a given
|
||||
* exogenous variable. An exception is thrown if the name is not a
|
||||
* and exogenous variable. */
|
||||
int name2outer_exo(const char* name) const;
|
||||
/** Return an index in the outer ordering of all variables
|
||||
* (endo and exo) for a given name. An exception is thrown if
|
||||
* the name is not a variable. This must be called only after
|
||||
* parsing_finished(). */
|
||||
int name2outer_allvar(const char* name) const;
|
||||
/** Return the number of endogenous variables at time t-1, these are state
|
||||
* variables. */
|
||||
int nys() const
|
||||
{return npred()+nboth();}
|
||||
/** Return the number of endogenous variables at time t+1. */
|
||||
int nyss() const
|
||||
{return nboth()+nforw();}
|
||||
/** Return the number of endogenous variables. */
|
||||
int ny() const
|
||||
{return endovars.size();}
|
||||
/** Return the number of exogenous variables. */
|
||||
int nexo() const
|
||||
{return (int)exovars.size();}
|
||||
/** Return the number of parameters. */
|
||||
int np() const
|
||||
{return (int)(params.size());}
|
||||
/** Register unique endogenous variable name. The order of
|
||||
* calls defines the endo outer ordering. The method is
|
||||
* virtual, since a superclass may want to do some additional
|
||||
* action. */
|
||||
virtual void register_uniq_endo(const char* name);
|
||||
/** Register unique exogenous variable name. The order of
|
||||
* calls defines the exo outer ordering. The method is
|
||||
* virtual, since a superclass may want to do somem additional
|
||||
* action. */
|
||||
virtual void register_uniq_exo(const char* name);
|
||||
/** Register unique parameter name. The order of calls defines
|
||||
* the param outer ordering. The method is
|
||||
* virtual, since a superclass may want to do somem additional
|
||||
* action. */
|
||||
virtual void register_uniq_param(const char* name);
|
||||
/** Debug print. */
|
||||
void print() const;
|
||||
private:
|
||||
/** This performs the common part of parsing_finished(), which
|
||||
* is a construction of internal orderings. */
|
||||
void make_internal_orderings(VarOrdering::ord_type ot);
|
||||
protected:
|
||||
/** This remembers the ordering type of the last call make_internal_ordering. */
|
||||
VarOrdering::ord_type order_type;
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,72 @@
|
|||
%{
|
||||
#include "location.h"
|
||||
#include "formula_tab.hh"
|
||||
|
||||
extern YYLTYPE fmla_lloc;
|
||||
|
||||
#define YY_USER_ACTION SET_LLOC(fmla_);
|
||||
%}
|
||||
|
||||
%option nounput
|
||||
%option noyy_top_state
|
||||
%option stack
|
||||
%option yylineno
|
||||
%option prefix="fmla_"
|
||||
%option never-interactive
|
||||
%x CMT
|
||||
|
||||
%%
|
||||
|
||||
/* comments */
|
||||
<*>"/*" {yy_push_state(CMT);}
|
||||
<CMT>[^*\n]*
|
||||
<CMT>"*"+[^*/\n]*
|
||||
<CMT>"*"+"/" {yy_pop_state();}
|
||||
<CMT>[\n]
|
||||
"//".*\n
|
||||
|
||||
/* initial spaces or tabs are ignored */
|
||||
|
||||
[ \t\r\n]
|
||||
[+] {return YPLUS;}
|
||||
[-] {return YMINUS;}
|
||||
[*] {return YTIMES;}
|
||||
[/] {return YDIVIDE;}
|
||||
[\^] {return YPOWER;}
|
||||
exp {return YEXP;}
|
||||
log {return YLOG;}
|
||||
sin {return YSIN;}
|
||||
cos {return YCOS;}
|
||||
tan {return YTAN;}
|
||||
sqrt {return YSQRT;}
|
||||
erf {return YERF;}
|
||||
erfc {return YERFC;}
|
||||
diff {return YDIFF;}
|
||||
|
||||
/* names: parameters, variables (lagged/leaded) */
|
||||
[A-Za-z_][A-Za-z0-9_]*([\(\{][+-]?[0-9]+[\)\}])? {
|
||||
fmla_lval.string=fmla_text;
|
||||
return NAME;
|
||||
}
|
||||
|
||||
/* floating point numbers */
|
||||
(([0-9]*\.?[0-9]+)|([0-9]+\.))([edED][-+]?[0-9]+)? {
|
||||
fmla_lval.string=fmla_text;
|
||||
return DNUMBER;
|
||||
}
|
||||
|
||||
= {return EQUAL_SIGN;}
|
||||
|
||||
. {return fmla_text[0];}
|
||||
|
||||
%%
|
||||
|
||||
int fmla_wrap()
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
void fmla__destroy_buffer(void* p)
|
||||
{
|
||||
fmla__delete_buffer((YY_BUFFER_STATE)p);
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
%{
|
||||
/* Copyright 2006, Ondra Kamenik */
|
||||
|
||||
/* $Id: formula.y 1749 2008-03-28 11:59:29Z kamenik $ */
|
||||
|
||||
#include "location.h"
|
||||
#include "formula_parser.h"
|
||||
#include "formula_tab.hh"
|
||||
|
||||
int fmla_error(char*);
|
||||
int fmla_lex(void);
|
||||
extern int fmla_lineno;
|
||||
extern ogp::FormulaParser* fparser;
|
||||
extern YYLTYPE fmla_lloc;
|
||||
|
||||
static void print_token_value (FILE *, int, YYSTYPE);
|
||||
#define YYPRINT(file, type, value) print_token_value (file, type, value)
|
||||
|
||||
%}
|
||||
|
||||
%union {
|
||||
char* string;
|
||||
double dvalue;
|
||||
int integer;
|
||||
}
|
||||
|
||||
%token EQUAL_SIGN
|
||||
%left YPLUS YMINUS
|
||||
%left YTIMES YDIVIDE
|
||||
%left YUMINUS YUPLUS
|
||||
%right YPOWER
|
||||
%token YEXP YLOG YSIN YCOS YTAN YSQRT YERF YERFC YDIFF
|
||||
%token <string> DNUMBER NAME
|
||||
%type <integer> expression
|
||||
|
||||
%name-prefix="fmla_"
|
||||
|
||||
%locations
|
||||
%error-verbose
|
||||
|
||||
%%
|
||||
root : equation_list
|
||||
| expression
|
||||
{fparser->add_formula($1);}
|
||||
;
|
||||
|
||||
equation_list : equation_list equation | equation ;
|
||||
|
||||
equation : expression EQUAL_SIGN expression ';'
|
||||
{fparser->add_formula(fparser->add_binary(ogp::MINUS,$1,$3));}
|
||||
| expression ';'
|
||||
{fparser->add_formula($1);}
|
||||
;
|
||||
|
||||
expression : '(' expression ')' { $$ = $2;}
|
||||
| expression YPLUS expression {$$=fparser->add_binary(ogp::PLUS,$1,$3);}
|
||||
| expression YMINUS expression {$$=fparser->add_binary(ogp::MINUS,$1,$3);}
|
||||
| expression YTIMES expression {$$=fparser->add_binary(ogp::TIMES,$1,$3);}
|
||||
| expression YDIVIDE expression {$$=fparser->add_binary(ogp::DIVIDE,$1,$3);}
|
||||
| expression YPOWER expression {$$=fparser->add_binary(ogp::POWER,$1,$3);}
|
||||
| YMINUS expression %prec YUMINUS {$$=fparser->add_unary(ogp::UMINUS,$2);}
|
||||
| YPLUS expression %prec YUPLUS {$$ = $2;}
|
||||
| YSIN '(' expression ')' {$$=fparser->add_unary(ogp::SIN,$3);}
|
||||
| YCOS '(' expression ')' {$$=fparser->add_unary(ogp::COS,$3);}
|
||||
| YTAN '(' expression ')' {$$=fparser->add_unary(ogp::TAN,$3);}
|
||||
| YEXP '(' expression ')' {$$=fparser->add_unary(ogp::EXP,$3);}
|
||||
| YLOG '(' expression ')' {$$=fparser->add_unary(ogp::LOG,$3);}
|
||||
| YSQRT '(' expression ')' {$$=fparser->add_unary(ogp::SQRT,$3);}
|
||||
| YERF '(' expression ')' {$$=fparser->add_unary(ogp::ERF,$3);}
|
||||
| YERFC '(' expression ')' {$$=fparser->add_unary(ogp::ERFC,$3);}
|
||||
| YDIFF '(' expression ',' NAME ')' {$$=fparser->add_derivative($3, fparser->add_nulary($5));}
|
||||
| NAME {$$=fparser->add_nulary($1);}
|
||||
| DNUMBER {$$=fparser->add_nulary($1);}
|
||||
;
|
||||
|
||||
%%
|
||||
|
||||
int fmla_error(char* s)
|
||||
{
|
||||
fparser->error(s);
|
||||
}
|
||||
|
||||
static void print_token_value(FILE* file, int type, YYSTYPE value)
|
||||
{
|
||||
if (type == NAME)
|
||||
fprintf(file, "%s", value.string);
|
||||
}
|
|
@ -0,0 +1,517 @@
|
|||
// Copyright (C) 2005, Ondra Kamenik
|
||||
|
||||
// $Id: formula_parser.cpp 2268 2008-11-22 10:38:03Z michel $
|
||||
|
||||
#include "utils/cc/pascal_triangle.h"
|
||||
#include "utils/cc/exception.h"
|
||||
|
||||
#include "parser_exception.h"
|
||||
#include "location.h"
|
||||
#include "formula_parser.h"
|
||||
#include "formula_tab.hh"
|
||||
|
||||
#include <cmath>
|
||||
|
||||
using namespace ogp;
|
||||
|
||||
extern location_type fmla_lloc;
|
||||
|
||||
FormulaParser::FormulaParser(const FormulaParser& fp, Atoms& a)
|
||||
: otree(fp.otree), atoms(a), formulas(fp.formulas), ders()
|
||||
{
|
||||
// create derivatives
|
||||
for (unsigned int i = 0; i < fp.ders.size(); i++)
|
||||
ders.push_back(new FormulaDerivatives(*(fp.ders[i])));
|
||||
}
|
||||
|
||||
FormulaParser::~FormulaParser()
|
||||
{
|
||||
destroy_derivatives();
|
||||
}
|
||||
|
||||
void FormulaParser::differentiate(int max_order)
|
||||
{
|
||||
destroy_derivatives();
|
||||
vector<int> vars;
|
||||
vars = atoms.variables();
|
||||
for (unsigned int i = 0; i < formulas.size(); i++)
|
||||
ders.push_back(new FormulaDerivatives(otree, vars, formulas[i], max_order));
|
||||
}
|
||||
|
||||
const FormulaDerivatives& FormulaParser::derivatives(int i) const
|
||||
{
|
||||
if (i < (int)ders.size())
|
||||
return *(ders[i]);
|
||||
else
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Wrong formula index in FormulaParser::derivatives");
|
||||
return *(ders[0]); // just because of compiler
|
||||
}
|
||||
|
||||
|
||||
|
||||
void FormulaParser::add_formula(int t)
|
||||
{
|
||||
formulas.push_back(t);
|
||||
}
|
||||
|
||||
int FormulaParser::add_binary(code_t code, int t1, int t2)
|
||||
{
|
||||
return otree.add_binary(code, t1, t2);
|
||||
}
|
||||
|
||||
int FormulaParser::add_unary(code_t code, int t)
|
||||
{
|
||||
return otree.add_unary(code, t);
|
||||
}
|
||||
|
||||
int FormulaParser::add_nulary(const char* str)
|
||||
{
|
||||
int t = -1;
|
||||
try {
|
||||
t = atoms.check(str);
|
||||
} catch (const ParserException& e) {
|
||||
throw ParserException(e, fmla_lloc.off);
|
||||
}
|
||||
if (t == -1) {
|
||||
t = otree.add_nulary();
|
||||
atoms.assign(str, t);
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
void FormulaParser::add_subst_formulas(const map<int,int>& subst, const FormulaParser& fp)
|
||||
{
|
||||
for (int i = 0; i < fp.nformulas(); i++) {
|
||||
int f = add_substitution(fp.formula(i), subst, fp);
|
||||
add_formula(f);
|
||||
}
|
||||
}
|
||||
|
||||
void FormulaParser::substitute_formulas(const map<int,int>& smap)
|
||||
{
|
||||
for (int i = 0; i < nformulas(); i++) {
|
||||
// make substitution and replace the formula for it
|
||||
int f = add_substitution(formulas[i], smap);
|
||||
formulas[i] = f;
|
||||
// update the derivatives if any
|
||||
if (i < (int)ders.size() && ders[i]) {
|
||||
int order = ders[i]->get_order();
|
||||
delete ders[i];
|
||||
ders[i] = new FormulaDerivatives(otree, atoms.variables(), formulas[i], order);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Global symbols for passing info to parser. */
|
||||
FormulaParser* fparser;
|
||||
|
||||
/** The declarations of functions defined in formula_ll.cc and
|
||||
* formula_tab.cc generated from formula.lex and formula.y */
|
||||
void* fmla__scan_buffer(char*, size_t);
|
||||
void fmla__destroy_buffer(void*);
|
||||
void fmla_parse();
|
||||
extern location_type fmla_lloc;
|
||||
|
||||
/** This makes own copy of provided data, sets the buffer for the
|
||||
* parser with fmla_scan_buffer, and launches fmla_parse(). Note that
|
||||
* the pointer returned from fmla_scan_buffer must be freed at the
|
||||
* end. */
|
||||
void FormulaParser::parse(int length, const char* stream)
|
||||
{
|
||||
char* buffer = new char[length+2];
|
||||
strncpy(buffer, stream, length);
|
||||
buffer[length] = '\0';
|
||||
buffer[length+1] = '\0';
|
||||
fmla_lloc.off = 0;
|
||||
fmla_lloc.ll = 0;
|
||||
void* p = fmla__scan_buffer(buffer, (unsigned int)length+2);
|
||||
fparser = this;
|
||||
fmla_parse();
|
||||
delete [] buffer;
|
||||
fmla__destroy_buffer(p);
|
||||
}
|
||||
|
||||
void FormulaParser::error(const char* mes) const
|
||||
{
|
||||
throw ParserException(mes, fmla_lloc.off);
|
||||
}
|
||||
|
||||
int FormulaParser::last_formula() const
|
||||
{
|
||||
int res = -1;
|
||||
for (unsigned int i = 0; i < formulas.size(); i++)
|
||||
if (res < formulas[i])
|
||||
res = formulas[i];
|
||||
return std::max(res, otree.get_last_nulary());
|
||||
}
|
||||
|
||||
int FormulaParser::pop_last_formula()
|
||||
{
|
||||
if (formulas.size() == 0)
|
||||
return -1;
|
||||
int t = formulas.back();
|
||||
if (formulas.size() == ders.size()) {
|
||||
delete ders.back();
|
||||
ders.pop_back();
|
||||
}
|
||||
formulas.pop_back();
|
||||
return t;
|
||||
}
|
||||
|
||||
void FormulaParser::print() const
|
||||
{
|
||||
atoms.print();
|
||||
for (unsigned int i = 0; i < formulas.size(); i++) {
|
||||
printf("formula %d:\n", formulas[i]);
|
||||
otree.print_operation(formulas[i]);
|
||||
}
|
||||
for (unsigned int i = 0; i < ders.size(); i++) {
|
||||
printf("derivatives for the formula %d:\n", formulas[i]);
|
||||
ders[i]->print(otree);
|
||||
}
|
||||
}
|
||||
|
||||
void FormulaParser::destroy_derivatives()
|
||||
{
|
||||
while (ders.size() > 0) {
|
||||
delete ders.back();
|
||||
ders.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
/** This constructor makes a vector of indices for formulas
|
||||
* corresponding to derivatives of the given formula. The formula is
|
||||
* supposed to belong to the provided tree, the created derivatives
|
||||
* are added to the tree.
|
||||
*
|
||||
* The algorithm is as follows. todo: update description of the
|
||||
* algorithm
|
||||
*/
|
||||
FormulaDerivatives::FormulaDerivatives(OperationTree& otree,
|
||||
const vector<int>& vars, int f, int max_order)
|
||||
: nvar(vars.size()), order(max_order)
|
||||
{
|
||||
FoldMultiIndex fmi_zero(nvar);
|
||||
tder.push_back(f);
|
||||
indices.push_back(fmi_zero);
|
||||
unsigned int last_order_beg = 0;
|
||||
unsigned int last_order_end = tder.size();
|
||||
|
||||
for (int k = 1; k <= order; k++) {
|
||||
// interval <last_order_beg,last_order_end) is guaranteed
|
||||
// here to contain at least one item
|
||||
for (unsigned int run = last_order_beg; run < last_order_end; run++) {
|
||||
// shift one order from the run
|
||||
FoldMultiIndex fmi(indices[run], 1);
|
||||
// set starting variable from the run, note that if k=1,
|
||||
// the shift order ctor of fmi will set it to zero
|
||||
int ivar_start = fmi[k-1];
|
||||
for (int ivar = ivar_start; ivar < nvar; ivar++, fmi.increment()) {
|
||||
int der = otree.add_derivative(tder[run], vars[ivar]);
|
||||
if (der != OperationTree::zero) {
|
||||
tder.push_back(der);
|
||||
indices.push_back(fmi);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set new last_order_beg and last_order_end
|
||||
last_order_beg = last_order_end;
|
||||
last_order_end = tder.size();
|
||||
// if there was no new derivative, break out from the loop
|
||||
if (last_order_beg >= last_order_end)
|
||||
break;
|
||||
}
|
||||
|
||||
// build ind2der map
|
||||
for (unsigned int i = 0; i < indices.size(); i++)
|
||||
ind2der.insert(Tfmiintmap::value_type(indices[i], i));
|
||||
|
||||
}
|
||||
|
||||
FormulaDerivatives::FormulaDerivatives(const FormulaDerivatives& fd)
|
||||
: tder(fd.tder), indices(fd.indices), ind2der(fd.ind2der),
|
||||
nvar(fd.nvar), order(fd.order)
|
||||
{
|
||||
}
|
||||
|
||||
int FormulaDerivatives::derivative(const FoldMultiIndex& mi) const
|
||||
{
|
||||
if (mi.order() > order)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Wrong order of multi-index in FormulaDerivatives::derivative");
|
||||
if (mi.nv() != nvar)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Wrong multi-index variables in FormulaDerivatives::derivative");
|
||||
|
||||
Tfmiintmap::const_iterator it = ind2der.find(mi);
|
||||
if (it == ind2der.end())
|
||||
return OperationTree::zero;
|
||||
else
|
||||
return tder[(*it).second];
|
||||
}
|
||||
|
||||
void FormulaDerivatives::print(const OperationTree& otree) const
|
||||
{
|
||||
for (Tfmiintmap::const_iterator it = ind2der.begin();
|
||||
it != ind2der.end(); ++it) {
|
||||
printf("derivative ");
|
||||
(*it).first.print();
|
||||
printf(" is formula %d\n", tder[(*it).second]);
|
||||
otree.print_operation(tder[(*it).second]);
|
||||
}
|
||||
}
|
||||
|
||||
void FormulaCustomEvaluator::eval(const AtomValues& av, FormulaEvalLoader& loader)
|
||||
{
|
||||
etree.reset_all();
|
||||
av.setValues(etree);
|
||||
for (unsigned int i = 0; i < terms.size(); i++) {
|
||||
double res = etree.eval(terms[i]);
|
||||
loader.load((int)i, res);
|
||||
}
|
||||
}
|
||||
|
||||
FoldMultiIndex::FoldMultiIndex(int nv)
|
||||
: nvar(nv), ord(0), data(new int[ord])
|
||||
{
|
||||
}
|
||||
|
||||
FoldMultiIndex::FoldMultiIndex(int nv, int ordd, int ii)
|
||||
: nvar(nv), ord(ordd), data(new int[ord])
|
||||
{
|
||||
for (int i = 0; i < ord; i++)
|
||||
data[i] = ii;
|
||||
}
|
||||
|
||||
/** Note that a monotone sequence mapped by monotone mapping yields a
|
||||
* monotone sequence. */
|
||||
FoldMultiIndex::FoldMultiIndex(int nv, const FoldMultiIndex& mi, const vector<int>& mp)
|
||||
: nvar(nv), ord(mi.ord), data(new int[ord])
|
||||
{
|
||||
for (int i = 0; i < ord; i++) {
|
||||
if (i < ord-1 && mp[i+1] < mp[i])
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Mapping not monotone in FoldMultiIndex constructor");
|
||||
if (mp[mi[i]] >= nv || mp[mi[i]] < 0)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Mapping out of bounds in FoldMultiIndex constructor");
|
||||
data[i] = mp[mi[i]];
|
||||
}
|
||||
}
|
||||
|
||||
FoldMultiIndex::FoldMultiIndex(const FoldMultiIndex& fmi, int new_orders)
|
||||
: nvar(fmi.nvar),
|
||||
ord(fmi.ord+new_orders),
|
||||
data(new int[ord])
|
||||
{
|
||||
memcpy(data, fmi.data, fmi.ord*sizeof(int));
|
||||
int new_item = (fmi.ord > 0)? fmi.data[fmi.ord-1] : 0;
|
||||
for (int i = fmi.ord; i < ord; i++) {
|
||||
data[i] = new_item;
|
||||
}
|
||||
}
|
||||
|
||||
FoldMultiIndex::FoldMultiIndex(const FoldMultiIndex& fmi)
|
||||
: nvar(fmi.nvar),
|
||||
ord(fmi.ord),
|
||||
data(new int[fmi.ord])
|
||||
{
|
||||
memcpy(data, fmi.data, ord*sizeof(int));
|
||||
}
|
||||
|
||||
const FoldMultiIndex& FoldMultiIndex::operator=(const FoldMultiIndex& fmi)
|
||||
{
|
||||
if (ord != fmi.ord) {
|
||||
delete [] data;
|
||||
data = new int[fmi.ord];
|
||||
}
|
||||
|
||||
ord = fmi.ord;
|
||||
nvar = fmi.nvar;
|
||||
memcpy(data, fmi.data, ord*sizeof(int));
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool FoldMultiIndex::operator<(const FoldMultiIndex& fmi) const
|
||||
{
|
||||
if (nvar != fmi.nvar)
|
||||
ogu::Exception(__FILE__,__LINE__,
|
||||
"Different nvar in FoldMultiIndex::operator<");
|
||||
|
||||
if (ord < fmi.ord)
|
||||
return true;
|
||||
if (ord > fmi.ord)
|
||||
return false;
|
||||
|
||||
int i = 0;
|
||||
while (i < ord && data[i] == fmi.data[i])
|
||||
i++;
|
||||
if (i == ord)
|
||||
return false;
|
||||
else
|
||||
return data[i] < fmi.data[i];
|
||||
}
|
||||
|
||||
bool FoldMultiIndex::operator==(const FoldMultiIndex& fmi) const
|
||||
{
|
||||
bool res = true;
|
||||
res = res && (nvar == fmi.nvar) && (ord == fmi.ord);
|
||||
if (res)
|
||||
for (int i = 0; i < ord; i++)
|
||||
if (data[i] != fmi.data[i])
|
||||
return false;
|
||||
return res;
|
||||
}
|
||||
|
||||
void FoldMultiIndex::increment()
|
||||
{
|
||||
if (ord == 0)
|
||||
return;
|
||||
|
||||
int k = ord-1;
|
||||
data[k]++;
|
||||
while (k > 0 && data[k] == nvar) {
|
||||
data[k] = 0;
|
||||
data[--k]++;
|
||||
}
|
||||
for (int kk = 1; kk < ord; kk++)
|
||||
if (data[kk-1] > data[kk])
|
||||
data[kk] = data[kk-1];
|
||||
}
|
||||
|
||||
|
||||
// For description of an algorithm for calculation of folded offset,
|
||||
// see Tensor Library Documentation, Ondra Kamenik, 2005, description
|
||||
// of FTensor::getOffsetRecurse().
|
||||
int FoldMultiIndex::offset() const
|
||||
{
|
||||
// make copy for the recursions
|
||||
int* tmp = new int[ord];
|
||||
for (int i = 0; i < ord; i++)
|
||||
tmp[i] = data[i];
|
||||
// call the recursive algorithm
|
||||
int res = offset_recurse(tmp, ord, nvar);
|
||||
|
||||
delete [] tmp;
|
||||
return res;
|
||||
}
|
||||
|
||||
void FoldMultiIndex::print() const
|
||||
{
|
||||
printf("[");
|
||||
for (int i = 0; i < ord; i++)
|
||||
printf("%d ", data[i]);
|
||||
printf("]");
|
||||
}
|
||||
|
||||
int FoldMultiIndex::offset_recurse(int* data, int len, int nv)
|
||||
{
|
||||
if (len == 0)
|
||||
return 0;
|
||||
// calculate length of initial constant indices
|
||||
int prefix = 1;
|
||||
while (prefix < len && data[0] == data[prefix])
|
||||
prefix++;
|
||||
|
||||
int m = data[0];
|
||||
int s1 = ptriang.noverk(nv+len-1, len) - ptriang.noverk(nv-m+len-1,len);
|
||||
|
||||
// cancel m from the rest of the sequence
|
||||
for (int i = prefix; i < len; i++)
|
||||
data[i] -= m;
|
||||
|
||||
// calculate offset of the remaining sequence
|
||||
int s2 = offset_recurse(data+prefix, len-prefix, nv-m);
|
||||
// return the sum
|
||||
return s1+s2;
|
||||
}
|
||||
|
||||
|
||||
bool ltfmi::operator()(const FoldMultiIndex& i1, const FoldMultiIndex& i2) const
|
||||
{
|
||||
return i1 < i2;
|
||||
}
|
||||
|
||||
|
||||
FormulaDerEvaluator::FormulaDerEvaluator(const FormulaParser& fp)
|
||||
: etree(fp.otree, -1)
|
||||
{
|
||||
for (unsigned int i = 0; i < fp.ders.size(); i++)
|
||||
ders.push_back((const FormulaDerivatives*)(fp.ders[i]));
|
||||
|
||||
der_atoms = fp.atoms.variables();
|
||||
}
|
||||
|
||||
void FormulaDerEvaluator::eval(const AtomValues& av, FormulaDerEvalLoader& loader, int order)
|
||||
{
|
||||
if (ders.size() == 0)
|
||||
return;
|
||||
int maxorder = ders[0]->order;
|
||||
|
||||
if (order > maxorder)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Wrong order in FormulaDerEvaluator::eval");
|
||||
|
||||
etree.reset_all();
|
||||
av.setValues(etree);
|
||||
|
||||
int* vars = new int[order];
|
||||
|
||||
for (unsigned int i = 0; i < ders.size(); i++) {
|
||||
for (FormulaDerivatives::Tfmiintmap::const_iterator it = ders[i]->ind2der.begin();
|
||||
it != ders[i]->ind2der.end(); ++it) {
|
||||
const FoldMultiIndex& mi = (*it).first;
|
||||
if (mi.order() == order) {
|
||||
// set vars from multiindex mi and variables
|
||||
for (int k = 0; k < order; k++)
|
||||
vars[k] = der_atoms[mi[k]];
|
||||
// evaluate
|
||||
double res = etree.eval(ders[i]->tder[(*it).second]);
|
||||
// load
|
||||
loader.load(i, order, vars, res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete [] vars;
|
||||
}
|
||||
|
||||
void FormulaDerEvaluator::eval(const vector<int>& mp, const AtomValues& av,
|
||||
FormulaDerEvalLoader& loader, int order)
|
||||
{
|
||||
etree.reset_all();
|
||||
av.setValues(etree);
|
||||
|
||||
int nvar_glob = der_atoms.size();
|
||||
int nvar = mp.size();
|
||||
int* vars = new int[order];
|
||||
|
||||
for (unsigned int i = 0; i < ders.size(); i++) {
|
||||
FoldMultiIndex mi(nvar, order);
|
||||
do {
|
||||
// find index of the derivative in the tensor
|
||||
FoldMultiIndex mi_glob(nvar_glob, mi, mp);
|
||||
int der = ders[i]->derivative(mi_glob);
|
||||
if (der != OperationTree::zero) {
|
||||
// set vars from the global multiindex
|
||||
for (int k = 0; k < order; k++)
|
||||
vars[k] = der_atoms[mi_glob[k]];
|
||||
// evaluate derivative
|
||||
double res = etree.eval(der);
|
||||
// load
|
||||
loader.load(i, order, vars, res);
|
||||
}
|
||||
mi.increment();
|
||||
} while (! mi.past_the_end());
|
||||
}
|
||||
|
||||
delete [] vars;
|
||||
}
|
||||
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,418 @@
|
|||
// Copyright (C) 2005, Ondra Kamenik
|
||||
|
||||
// $Id: formula_parser.h 1760 2008-03-31 14:26:35Z kamenik $
|
||||
|
||||
#ifndef OGP_FORMULA_PARSER_H
|
||||
#define OGP_FORMULA_PARSER_H
|
||||
|
||||
#include "tree.h"
|
||||
|
||||
namespace ogp {
|
||||
using std::vector;
|
||||
|
||||
/** Pure virtual class defining a minimal interface for
|
||||
* representation of nulary terms within FormulaParser. */
|
||||
class Atoms {
|
||||
public:
|
||||
Atoms() {}
|
||||
virtual ~Atoms() {}
|
||||
/** This returns previously assigned internal index to the
|
||||
* given atom, or returns -1 if the atom has not been assigned
|
||||
* yet. The method can raise an exception, if the Atoms
|
||||
* implementation is strict and the name is not among
|
||||
* prescribed possible values. */
|
||||
virtual int check(const char* name) const = 0;
|
||||
/** This method assigns an internal index to the nulary term
|
||||
* described by the name. The internal index is allocated by
|
||||
* OperationTree class. */
|
||||
virtual void assign(const char* name, int t) = 0;
|
||||
/** Returns a number of variables which will be used for
|
||||
* differentiations. */
|
||||
virtual int nvar() const = 0;
|
||||
/** Returns a vector of variable's internal indices which will
|
||||
* be used for differentiations. */
|
||||
virtual vector<int> variables() const = 0;
|
||||
/** Debug print. */
|
||||
virtual void print() const = 0;
|
||||
};
|
||||
|
||||
/** Pure virtual class defining interface for all classes able to
|
||||
* set nulary terms to evaluation tree EvalTree. The
|
||||
* implementations of this class will have to be connected with
|
||||
* Atoms to have knowledge about the atoms and their indices in
|
||||
* the tree, and will call EvalTree::set_nulary. */
|
||||
class AtomValues {
|
||||
public:
|
||||
virtual ~AtomValues() {}
|
||||
virtual void setValues(EvalTree& et) const = 0;
|
||||
};
|
||||
|
||||
class FormulaDerEvaluator;
|
||||
class FoldMultiIndex;
|
||||
/** For ordering FoldMultiIndex in the std::map. */
|
||||
struct ltfmi {
|
||||
bool operator()(const FoldMultiIndex& i1, const FoldMultiIndex& i2) const;
|
||||
};
|
||||
|
||||
/** This class stores derivatives (tree indices) of one formula
|
||||
* for all orders upto a given one. It stores the derivatives as a
|
||||
* sequence (vector) of these tree indices and sequence of the
|
||||
* multidimensional indices of variables wrt which the derivatives
|
||||
* were taken. In order to speed up querying for a derivative
|
||||
* given the variables, we have a map mapping the multidimensional
|
||||
* index to the order of the derivative in the sequence.
|
||||
*
|
||||
* The only reason we do not have only this map is that the
|
||||
* iterators of the map do not survive the insertions to the map,
|
||||
* and implementation of the constructor has to be very difficult.
|
||||
*/
|
||||
class FormulaDerivatives {
|
||||
friend class FormulaDerEvaluator;
|
||||
protected:
|
||||
/** Vector of derivatives. This is a list of derivatives (tree
|
||||
* indices), the ordering is given by the algorithm used to
|
||||
* create it. Currently, it starts with zero-th derivative,
|
||||
* the formula itself and carries with first order, second,
|
||||
* etc. */
|
||||
vector<int> tder;
|
||||
/** Vector of multiindices corresponding to the vector of
|
||||
* derivatives. */
|
||||
vector<FoldMultiIndex> indices;
|
||||
/** For retrieving derivatives via a multiindex, we have a map
|
||||
* mapping a multiindex to a derivative in the tder
|
||||
* ordering. This means that indices[ind2der[index]] == index. */
|
||||
typedef map<FoldMultiIndex, int, ltfmi> Tfmiintmap;
|
||||
Tfmiintmap ind2der;
|
||||
/** The number of variables. */
|
||||
int nvar;
|
||||
/** The maximum order of derivatives. */
|
||||
int order;
|
||||
public:
|
||||
/** The constructor allocates and fills the sequence of the
|
||||
* indices of derivatives for a formula.
|
||||
* @param otree the OperationTree for which all work is done
|
||||
* and to which the derivatives are added.
|
||||
* @param vars the vector of nulary terms in the tree; the
|
||||
* derivatives are taken with respect to these variables in
|
||||
* the ordering given by the vector.
|
||||
* @param f the index of the formula being differentiated. The
|
||||
* zero derivative is set to f.
|
||||
* @param max_order the maximum order of differentiation.
|
||||
*/
|
||||
FormulaDerivatives(OperationTree& otree, const vector<int>& vars, int f, int max_order);
|
||||
/** Copy constructor. */
|
||||
FormulaDerivatives(const FormulaDerivatives& fd);
|
||||
virtual ~FormulaDerivatives(){}
|
||||
/** Random access to the derivatives via multiindex. */
|
||||
int derivative(const FoldMultiIndex& mi) const;
|
||||
/** Return the order. */
|
||||
int get_order() const
|
||||
{return order;}
|
||||
/** Debug print. */
|
||||
void print(const OperationTree& otree) const;
|
||||
};
|
||||
|
||||
class FormulaEvaluator;
|
||||
|
||||
/** This class is able to parse a number of formulas and
|
||||
* differentiate them. The life cycle of the object is as follows:
|
||||
* After it is created, a few calls to parse will add formulas
|
||||
* (zero derivatives) to the object. Then a method differentiate()
|
||||
* can be called and a vector of pointers to derivatives for each
|
||||
* formula is created. After this, no one should call other
|
||||
* parse() or differentiate(). A const reference of the object can
|
||||
* be used in constructors of FormulaEvaluator and
|
||||
* FormulaDerEvaluator in order to evaluate formulas (zero
|
||||
* derivatives) and higher derivatives resp. */
|
||||
class FormulaParser {
|
||||
friend class FormulaCustomEvaluator;
|
||||
friend class FormulaDerEvaluator;
|
||||
protected:
|
||||
/** The OperationTree of all formulas, including derivatives. */
|
||||
OperationTree otree;
|
||||
/** Reference to Atoms. The Atoms are filled with nulary terms
|
||||
* during execution of parse(). */
|
||||
Atoms& atoms;
|
||||
/** Vector of formulas (zero derivatives) in the order as they
|
||||
* have been parsed. */
|
||||
vector<int> formulas;
|
||||
/** The vector to derivatives, each vector corresponds to a
|
||||
* formula in the vector formulas. */
|
||||
vector<FormulaDerivatives*> ders;
|
||||
public:
|
||||
/** Construct an empty formula parser. */
|
||||
FormulaParser(Atoms& a)
|
||||
: atoms(a) {}
|
||||
/** Copy constructor using a different instance of Atoms. */
|
||||
FormulaParser(const FormulaParser& fp, Atoms& a);
|
||||
virtual ~FormulaParser();
|
||||
|
||||
/** Requires an addition of the formula; called from the
|
||||
* parser. */
|
||||
void add_formula(int t);
|
||||
/** Requires an addition of the binary operation; called from
|
||||
* the parser. */
|
||||
int add_binary(code_t code, int t1, int t2);
|
||||
/** Requires an addition of the unary operation; called from
|
||||
* the parser. */
|
||||
int add_unary(code_t code, int t);
|
||||
/** Requires an addition of the nulary operation given by the
|
||||
* string. The Atoms are consulted for uniquness and are given
|
||||
* an internal index generated by the OperationTree. This is
|
||||
* the channel through which the Atoms are filled. */
|
||||
int add_nulary(const char* str);
|
||||
|
||||
/** Adds a derivative to the tree. This just calls
|
||||
* OperationTree::add_derivative. */
|
||||
int add_derivative(int t, int v)
|
||||
{return otree.add_derivative(t, v);}
|
||||
/** Adds a substitution. This just calls
|
||||
* OperationTree::add_substitution. */
|
||||
int add_substitution(int t, const map<int,int>& subst)
|
||||
{return otree.add_substitution(t, subst);}
|
||||
/** Add the substitution given by the map where left sides of
|
||||
* substitutions come from another parser. The right sides are
|
||||
* from this object. The given t is from the given parser fp. */
|
||||
int add_substitution(int t, const map<int,int>& subst,
|
||||
const FormulaParser& fp)
|
||||
{return otree.add_substitution(t, subst, fp.otree);}
|
||||
/** This adds formulas from the given parser with (possibly)
|
||||
* different atoms applying substitutions from the given map
|
||||
* mapping atoms from fp to atoms of the object. */
|
||||
void add_subst_formulas(const map<int,int>& subst, const FormulaParser& fp);
|
||||
/** Substitute formulas. For each i from 1 through all
|
||||
* formulas, it adds a substitution of the i-th formula and
|
||||
* make it to be i-th formula.*/
|
||||
void substitute_formulas(const std::map<int,int>& subst);
|
||||
/** This method turns the given term to nulary operation. It
|
||||
* should be used with caution, since this method does not
|
||||
* anything do with atoms, but usually some action is also
|
||||
* needed (at leat to assign the tree index t to some
|
||||
* atom). */
|
||||
void nularify(int t)
|
||||
{otree.nularify(t);}
|
||||
/** Returns a set of nulary terms of the given term. Just
|
||||
* calls OperationTree::nulary_of_term. */
|
||||
const hash_set<int>& nulary_of_term(int t) const
|
||||
{return otree.nulary_of_term(t);}
|
||||
|
||||
/** Parse a given string containing one or more formulas. The
|
||||
* formulas are parsed and added to the OperationTree and to
|
||||
* the formulas vector. */
|
||||
void parse(int length, const char* stream);
|
||||
/** Processes a syntax error from bison. */
|
||||
void error(const char* mes) const;
|
||||
/** Differentiate all the formulas up to the given order. The
|
||||
* variables with respect to which the derivatives are taken
|
||||
* are obtained by Atoms::variables(). If the derivates exist,
|
||||
* they are destroyed and created again (with possibly
|
||||
* different order). */
|
||||
void differentiate(int max_order);
|
||||
/** Return i-th formula derivatives. */
|
||||
const FormulaDerivatives& derivatives(int i) const;
|
||||
|
||||
/** This returns a maximum index of zero derivative formulas
|
||||
* including all nulary terms. This is a mimumum length of the
|
||||
* tree for which it is safe to evaluate zero derivatives of
|
||||
* the formulas. */
|
||||
int last_formula() const;
|
||||
/** This returns a tree index of the i-th formula in the
|
||||
* vector. */
|
||||
int formula(int i) const
|
||||
{return formulas[i];}
|
||||
|
||||
|
||||
/** This returns a tree index of the last formula and pops its
|
||||
* item from the formulas vector. The number of formulas is
|
||||
* then less by one. Returns -1 if there is no formula. If
|
||||
* there are derivatives of the last formula, they are
|
||||
* destroyed and the vector ders is popped from the back. */
|
||||
int pop_last_formula();
|
||||
|
||||
/** This returns a number of formulas. */
|
||||
int nformulas() const
|
||||
{return (int)(formulas.size());}
|
||||
|
||||
/** This returns a reference to atoms. */
|
||||
const Atoms& getAtoms() const
|
||||
{return atoms;}
|
||||
Atoms& getAtoms()
|
||||
{return atoms;}
|
||||
/** This returns the tree. */
|
||||
const OperationTree& getTree() const
|
||||
{return otree;}
|
||||
OperationTree& getTree()
|
||||
{return otree;}
|
||||
|
||||
/** Debug print. */
|
||||
void print() const;
|
||||
private:
|
||||
/** Hide this copy constructor declaration by declaring it as
|
||||
* private. */
|
||||
FormulaParser(const FormulaParser& fp);
|
||||
/** Destroy all derivatives. */
|
||||
void destroy_derivatives();
|
||||
};
|
||||
|
||||
/** This is a pure virtual class defining an interface for all
|
||||
* classes which will load the results of formula (zero
|
||||
* derivative) evaluations. A primitive implementation of this
|
||||
* class can be a vector of doubles. */
|
||||
class FormulaEvalLoader {
|
||||
public:
|
||||
virtual ~FormulaEvalLoader() {}
|
||||
/** Set the value res for the given formula. The formula is
|
||||
* identified by an index corresponding to the ordering in
|
||||
* which the formulas have been parsed (starting from
|
||||
* zero). */
|
||||
virtual void load(int i, double res) = 0;
|
||||
};
|
||||
|
||||
/** This class evaluates a selected subset of terms of the
|
||||
* tree. In the protected constructor, one can constraint the
|
||||
* initialization of the evaluation tree to a given number of
|
||||
* terms in the beginning. Using this constructor, one has to make
|
||||
* sure, that the terms in the beginning do not refer to terms
|
||||
* behind the initial part. */
|
||||
class FormulaCustomEvaluator {
|
||||
protected:
|
||||
/** The evaluation tree. */
|
||||
EvalTree etree;
|
||||
/** The custom tree indices to be evaluated. */
|
||||
vector<int> terms;
|
||||
public:
|
||||
/** Construct from FormulaParser and given list of terms. */
|
||||
FormulaCustomEvaluator(const FormulaParser& fp, const vector<int>& ts)
|
||||
: etree(fp.otree), terms(ts)
|
||||
{}
|
||||
/** Construct from OperationTree and given list of terms. */
|
||||
FormulaCustomEvaluator(const OperationTree& ot, const vector<int>& ts)
|
||||
: etree(ot), terms(ts)
|
||||
{}
|
||||
/** Evaluate the terms using the given AtomValues and load the
|
||||
* results using the given loader. The loader is called for
|
||||
* each term in the order of the terms. */
|
||||
void eval(const AtomValues& av, FormulaEvalLoader& loader);
|
||||
protected:
|
||||
FormulaCustomEvaluator(const FormulaParser& fp)
|
||||
: etree(fp.otree, fp.last_formula()), terms(fp.formulas)
|
||||
{}
|
||||
};
|
||||
|
||||
/** This class evaluates zero derivatives of the FormulaParser. */
|
||||
class FormulaEvaluator : public FormulaCustomEvaluator {
|
||||
public:
|
||||
/** Construct from FormulaParser. */
|
||||
FormulaEvaluator(const FormulaParser& fp)
|
||||
: FormulaCustomEvaluator(fp) {}
|
||||
};
|
||||
|
||||
/** This is a pure virtual class defining an interface for all
|
||||
* classes which will load the results of formula derivative
|
||||
* evaluations. */
|
||||
class FormulaDerEvalLoader {
|
||||
public:
|
||||
virtual ~FormulaDerEvalLoader() {}
|
||||
/** This loads the result of the derivative of the given
|
||||
* order. The semantics of i is the same as in
|
||||
* FormulaEvalLoader::load. The indices of variables with
|
||||
* respect to which the derivative was taken are stored in
|
||||
* memory pointed by vars. These are the tree indices of the
|
||||
* variables. */
|
||||
virtual void load(int i, int order, const int* vars, double res) = 0;
|
||||
};
|
||||
|
||||
/** This class is a utility class representing the tensor
|
||||
* multindex. It can basically increment itself, and calculate
|
||||
* its offset in the folded tensor. */
|
||||
class FoldMultiIndex {
|
||||
/** Number of variables. */
|
||||
int nvar;
|
||||
/** Dimension. */
|
||||
int ord;
|
||||
/** The multiindex. */
|
||||
int* data;
|
||||
public:
|
||||
/** Initializes to the zero derivative. Order is 0, data is
|
||||
* empty. */
|
||||
FoldMultiIndex(int nv);
|
||||
/** Initializes the multiindex to zeros or given i. */
|
||||
FoldMultiIndex(int nv, int order, int i = 0);
|
||||
/** Makes a new multiindex of the same order applying a given
|
||||
* mapping to the indices. The mapping is supposed to be monotone. */
|
||||
FoldMultiIndex(int nv, const FoldMultiIndex& mi, const vector<int>& mp);
|
||||
/** Shifting constructor. This adds a given number of orders
|
||||
* to the end, copying the last item to the newly added items,
|
||||
* keeping the index ordered. If the index was empty (zero-th
|
||||
* dimension), then zeros are added. */
|
||||
FoldMultiIndex(const FoldMultiIndex& fmi, int new_orders);
|
||||
/** Copy constructor. */
|
||||
FoldMultiIndex(const FoldMultiIndex& fmi);
|
||||
/** Desctructor. */
|
||||
virtual ~FoldMultiIndex()
|
||||
{delete [] data;}
|
||||
/** Assignment operator. */
|
||||
const FoldMultiIndex& operator=(const FoldMultiIndex& fmi);
|
||||
/** Operator < implementing lexicographic ordering within one
|
||||
* order, increasing order across orders. */
|
||||
bool operator<(const FoldMultiIndex& fmi) const;
|
||||
bool operator==(const FoldMultiIndex& fmi) const;
|
||||
/** Increment the multiindex. */
|
||||
void increment();
|
||||
/** Return offset of the multiindex in the folded tensor. */
|
||||
int offset() const;
|
||||
const int& operator[](int i) const
|
||||
{return data[i];}
|
||||
/** Return order of the multiindex, i.e. dimension of the
|
||||
* tensor. */
|
||||
int order() const
|
||||
{return ord;}
|
||||
/** Return the number of variables. */
|
||||
int nv() const
|
||||
{return nvar;}
|
||||
/** Return the data. */
|
||||
const int* ind() const
|
||||
{return data;}
|
||||
/** Return true if the end of the tensor is reached. The
|
||||
* result of a subsequent increment should be considered
|
||||
* unpredictable. */
|
||||
bool past_the_end() const
|
||||
{return (ord == 0) || (data[0] == nvar);}
|
||||
/** Prints the multiindex in the brackets. */
|
||||
void print() const;
|
||||
private:
|
||||
static int offset_recurse(int* data, int len, int nv);
|
||||
};
|
||||
|
||||
/** This class evaluates derivatives of the FormulaParser. */
|
||||
class FormulaDerEvaluator {
|
||||
/** Its own instance of EvalTree. */
|
||||
EvalTree etree;
|
||||
/** The indices of derivatives for each formula. This is a
|
||||
* const copy FormulaParser::ders. We do not allocate nor
|
||||
* deallocate anything here. */
|
||||
vector<const FormulaDerivatives*> ders;
|
||||
/** A copy of tree indices corresponding to atoms to with
|
||||
* respect the derivatives were taken. */
|
||||
vector<int> der_atoms;
|
||||
public:
|
||||
/** Construct the object from FormulaParser. */
|
||||
FormulaDerEvaluator(const FormulaParser& fp);
|
||||
/** Evaluate the derivatives from the FormulaParser wrt to all
|
||||
* atoms in variables vector at the given AtomValues. The
|
||||
* given loader is used for output. */
|
||||
void eval(const AtomValues& av, FormulaDerEvalLoader& loader, int order);
|
||||
/** Evaluate the derivatives from the FormulaParser wrt to a
|
||||
* selection of atoms of the atoms in der_atoms vector at the
|
||||
* given AtomValues. The selection is given by a monotone
|
||||
* mapping to the indices (not values) of the der_atoms. */
|
||||
void eval(const vector<int>& mp, const AtomValues& av, FormulaDerEvalLoader& loader,
|
||||
int order);
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,46 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: location.h 762 2006-05-22 13:00:07Z kamenik $
|
||||
|
||||
// Purpose: This file defines macros for lex and bison so that the
|
||||
// very primitive location tracking would be enabled. The location of
|
||||
// a token is given by offset of its first character. The offset is
|
||||
// relative to the number which is (and must be) initialized before
|
||||
// parsing. This file is to be included to the top of bison and lex
|
||||
// sources.
|
||||
|
||||
// How to use: in preamble of bison and flex, you must include this
|
||||
// file and declare extern YYLTYPE prefix##lloc. In addition, in flex,
|
||||
// you must define int prefix##ll =0; and use macro SET_LLOC(prefix)
|
||||
// in EVERY action consuming material (this can be done with #define
|
||||
// YY_USER_ACTION) and in bison you must use option %locations.
|
||||
|
||||
|
||||
#ifndef OG_LOCATION_H
|
||||
#define OG_LOCATION_H
|
||||
|
||||
namespace ogp {
|
||||
|
||||
struct location_type {
|
||||
int off; // offset of the token
|
||||
int ll; // length ot the token
|
||||
location_type() : off(0), ll(0) {}
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
#define YYLTYPE ogp::location_type
|
||||
|
||||
// set current off to the first off and add all lengths
|
||||
#define YYLLOC_DEFAULT(Current, Rhs, N) \
|
||||
{(Current).off = (Rhs)[1].off; \
|
||||
(Current).ll = 0; \
|
||||
for (int i = 1; i <= N; i++) (Current).ll += (Rhs)[i].ll;}
|
||||
|
||||
#define SET_LLOC(prefix) (prefix##lloc.off += prefix##lloc.ll, prefix##lloc.ll = prefix##leng)
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,59 @@
|
|||
%{
|
||||
#include "location.h"
|
||||
#include "matrix_tab.hh"
|
||||
|
||||
extern YYLTYPE matrix_lloc;
|
||||
extern void matrix_error(char*);
|
||||
|
||||
#define YY_USER_ACTION SET_LLOC(matrix_);
|
||||
%}
|
||||
|
||||
%option nounput
|
||||
%option noyy_top_state
|
||||
%option stack
|
||||
%option yylineno
|
||||
%option prefix="matrix_"
|
||||
%option never-interactive
|
||||
%x CMT
|
||||
|
||||
%%
|
||||
|
||||
/* comments */
|
||||
<*>"/*" {yy_push_state(CMT);}
|
||||
<CMT>[^*\n]*
|
||||
<CMT>"*"+[^*/\n]*
|
||||
<CMT>"*"+"/" {yy_pop_state();}
|
||||
<CMT>[\n]
|
||||
"//".*\n
|
||||
|
||||
/* ignore spaces and commas */
|
||||
[ \t,]
|
||||
/* new row */
|
||||
\r\n {return NEW_ROW;}
|
||||
\n {return NEW_ROW;}
|
||||
;[ \t]*\n {return NEW_ROW;}
|
||||
;[ \t]*\r\n {return NEW_ROW;}
|
||||
; {return NEW_ROW;}
|
||||
|
||||
[+-]?(([0-9]*\.?[0-9]+)|([0-9]+\.))([edED][-+]?[0-9]+)? {
|
||||
matrix_lval.val = strtod(matrix_text, NULL);
|
||||
return DNUMBER;
|
||||
}
|
||||
|
||||
. {
|
||||
char mes[300];
|
||||
sprintf(mes, "Unrecognized character %s", matrix_text);
|
||||
matrix_error(mes);
|
||||
}
|
||||
|
||||
%%
|
||||
|
||||
int matrix_wrap()
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
void matrix__destroy_buffer(void* p)
|
||||
{
|
||||
matrix__delete_buffer((YY_BUFFER_STATE)p);
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
%{
|
||||
#include "location.h"
|
||||
#include "matrix_parser.h"
|
||||
#include "matrix_tab.hh"
|
||||
|
||||
void matrix_error(char*);
|
||||
int matrix_lex(void);
|
||||
extern int matrix_lineno;
|
||||
extern ogp::MatrixParser* mparser;
|
||||
extern YYLTYPE matrix_lloc;
|
||||
|
||||
// static void print_token_value (FILE *, int, YYSTYPE);
|
||||
//#define YYPRINT(file, type, value) print_token_value (file, type, value)
|
||||
|
||||
%}
|
||||
|
||||
%union {
|
||||
double val;
|
||||
int integer;
|
||||
}
|
||||
|
||||
%token NEW_ROW
|
||||
%token <val> DNUMBER
|
||||
|
||||
%name-prefix="matrix_";
|
||||
|
||||
%locations
|
||||
%error-verbose
|
||||
|
||||
%%
|
||||
|
||||
matrix : first_row other_rows
|
||||
| first_row other_rows empty_rows
|
||||
| first_row empty_rows other_rows empty_rows
|
||||
| first_row empty_rows other_rows
|
||||
| empty_rows first_row other_rows
|
||||
| empty_rows first_row other_rows empty_rows
|
||||
| empty_rows first_row empty_rows other_rows empty_rows
|
||||
| empty_rows first_row empty_rows
|
||||
| first_row empty_rows
|
||||
| empty_rows first_row
|
||||
| first_row
|
||||
| empty_rows
|
||||
;
|
||||
|
||||
empty_rows : empty_rows NEW_ROW | NEW_ROW;
|
||||
|
||||
lod : DNUMBER {mparser->add_item($1);}
|
||||
| lod DNUMBER {mparser->add_item($2);}
|
||||
;
|
||||
|
||||
first_row : lod;
|
||||
|
||||
other_rows : other_rows one_row | other_rows empty_rows one_row |one_row ;
|
||||
|
||||
one_row : NEW_ROW {mparser->start_row();} lod;
|
||||
|
||||
|
||||
%%
|
||||
|
||||
void matrix_error(char* s)
|
||||
{
|
||||
mparser->error(s);
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: matrix_parser.cpp 2269 2008-11-23 14:33:22Z michel $
|
||||
|
||||
#include "parser_exception.h"
|
||||
#include "matrix_parser.h"
|
||||
#include "location.h"
|
||||
#include "matrix_tab.hh"
|
||||
#include <cstring>
|
||||
|
||||
using namespace ogp;
|
||||
|
||||
/** A global symbol for passing info to the MatrixParser from
|
||||
* matrix_parse(). */
|
||||
MatrixParser* mparser;
|
||||
|
||||
/** The declaration of functions defined in matrix_ll.cc and
|
||||
* matrix_tab.cc generated from matrix.lex and matrix.y. */
|
||||
void* matrix__scan_buffer(char*, size_t);
|
||||
void matrix__destroy_buffer(void*);
|
||||
void matrix_parse();
|
||||
extern ogp::location_type matrix_lloc;
|
||||
|
||||
void MatrixParser::parse(int length, const char* stream)
|
||||
{
|
||||
// reinitialize the object
|
||||
data.clear();
|
||||
row_lengths.clear();
|
||||
nc = 0;
|
||||
// allocate temporary buffer and parse
|
||||
char* buffer = new char[length+2];
|
||||
strncpy(buffer, stream, length);
|
||||
buffer[length] = '\0';
|
||||
buffer[length+1] = '\0';
|
||||
matrix_lloc.off = 0;
|
||||
matrix_lloc.ll = 0;
|
||||
void* p = matrix__scan_buffer(buffer, (unsigned int)length+2);
|
||||
mparser = this;
|
||||
matrix_parse();
|
||||
delete [] buffer;
|
||||
matrix__destroy_buffer(p);
|
||||
}
|
||||
|
||||
void MatrixParser::add_item(double v)
|
||||
{
|
||||
data.push_back(v);
|
||||
if (row_lengths.size() == 0)
|
||||
row_lengths.push_back(0);
|
||||
(row_lengths.back())++;
|
||||
if (row_lengths.back() > nc)
|
||||
nc = row_lengths.back();
|
||||
}
|
||||
|
||||
void MatrixParser::start_row()
|
||||
{
|
||||
row_lengths.push_back(0);
|
||||
}
|
||||
|
||||
void MatrixParser::error(const char* mes) const
|
||||
{
|
||||
throw ParserException(mes, matrix_lloc.off);
|
||||
}
|
||||
|
||||
int MatrixParser::find_first_non_empty_row(int start) const
|
||||
{
|
||||
int r = start;
|
||||
while (r < (int)row_lengths.size() && row_lengths[r] == 0)
|
||||
r++;
|
||||
return r;
|
||||
}
|
||||
|
||||
MPIterator MatrixParser::begin() const
|
||||
{
|
||||
MPIterator it(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
MPIterator MatrixParser::end() const
|
||||
{
|
||||
MPIterator it(*this, "end");
|
||||
return it;
|
||||
}
|
||||
|
||||
MPIterator::MPIterator(const MatrixParser& mp)
|
||||
: p(&mp), i(0), c(0), r(mp.find_first_non_empty_row())
|
||||
{}
|
||||
|
||||
MPIterator::MPIterator(const MatrixParser& mp, const char* dummy)
|
||||
: p(&mp), i(mp.data.size()), c(0), r(mp.row_lengths.size())
|
||||
{}
|
||||
|
||||
MPIterator& MPIterator::operator++()
|
||||
{
|
||||
i++;
|
||||
c++;
|
||||
if (p->row_lengths[r] <= c) {
|
||||
c = 0;
|
||||
r = p->find_first_non_empty_row(r+1);
|
||||
}
|
||||
return *this;
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: matrix_parser.h 762 2006-05-22 13:00:07Z kamenik $
|
||||
|
||||
#ifndef OGP_MATRIX_PARSER
|
||||
#define OGP_MATRIX_PARSER
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace ogp {
|
||||
using std::vector;
|
||||
|
||||
/** This class reads the given string and parses it as a
|
||||
* matrix. The matrix is read row by row. The row delimiter is
|
||||
* either a newline character or semicolon (first newline
|
||||
* character after the semicolon is ignored), the column delimiter
|
||||
* is either blank character or comma. A different number of items
|
||||
* in the row is not reconciliated, we do not construct a matrix
|
||||
* here. The class provides only an iterator to go through all
|
||||
* read items, the iterator provides information on row number and
|
||||
* column number of the item. */
|
||||
class MPIterator;
|
||||
class MatrixParser {
|
||||
friend class MPIterator;
|
||||
protected:
|
||||
/** Raw data as they were read. */
|
||||
vector<double> data;
|
||||
/** Number of items in each row. */
|
||||
vector<int> row_lengths;
|
||||
/** Maximum number of row lengths. */
|
||||
int nc;
|
||||
public:
|
||||
MatrixParser()
|
||||
: nc(0) {}
|
||||
MatrixParser(const MatrixParser& mp)
|
||||
: data(mp.data), row_lengths(mp.row_lengths), nc(mp.nc) {}
|
||||
virtual ~MatrixParser() {}
|
||||
/** Return a number of read rows. */
|
||||
int nrows() const
|
||||
{return (int) row_lengths.size();}
|
||||
/** Return a maximum number of items in the rows. */
|
||||
int ncols() const
|
||||
{return nc;}
|
||||
/** Parses a given data. This initializes the object data. */
|
||||
void parse(int length, const char* stream);
|
||||
/** Adds newly read item. This should be called from bison
|
||||
* parser. */
|
||||
void add_item(double v);
|
||||
/** Starts a new row. This should be called from bison
|
||||
* parser. */
|
||||
void start_row();
|
||||
/** Process a parse error from the parser. */
|
||||
void error(const char* mes) const;
|
||||
/** Return begin iterator. */
|
||||
MPIterator begin() const;
|
||||
/** Return end iterator. */
|
||||
MPIterator end() const;
|
||||
protected:
|
||||
/** Returns an index of the first non-empty row starting at
|
||||
* start. If the start row is non-empty, returns the start. If
|
||||
* there is no other non-empty row, returns
|
||||
* row_lengths.size(). */
|
||||
int find_first_non_empty_row(int start = 0) const;
|
||||
};
|
||||
|
||||
/** This is an iterator intended to iterate through a matrix parsed
|
||||
* by MatrixParser. The iterator provides only read-only access. */
|
||||
class MPIterator {
|
||||
friend class MatrixParser;
|
||||
protected:
|
||||
/** Reference to the matrix parser. */
|
||||
const MatrixParser* p;
|
||||
/** The index of the pointed item in the matrix parser. */
|
||||
unsigned int i;
|
||||
/** The column number of the pointed item starting from zero. */
|
||||
int c;
|
||||
/** The row number of the pointed item starting from zero. */
|
||||
int r;
|
||||
|
||||
public:
|
||||
MPIterator() : p(NULL), i(0), c(0), r(0) {}
|
||||
/** Constructs an iterator pointing to the beginning of the
|
||||
* parsed matrix. */
|
||||
MPIterator(const MatrixParser& mp);
|
||||
/** Constructs an iterator pointing to the past-the-end of the
|
||||
* parsed matrix. */
|
||||
MPIterator(const MatrixParser& mp, const char* dummy);
|
||||
/** Return read-only reference to the pointed item. */
|
||||
const double& operator*() const
|
||||
{return p->data[i];}
|
||||
/** Return a row index of the pointed item. */
|
||||
int row() const
|
||||
{return r;}
|
||||
/** Return a column index of the pointed item. */
|
||||
int col() const
|
||||
{return c;}
|
||||
/** Assignment operator. */
|
||||
const MPIterator& operator=(const MPIterator& it)
|
||||
{p = it.p; i = it.i; c = it.c; r = it.r; return *this;}
|
||||
/** Return true if the iterators are the same, this is if they
|
||||
* have the same underlying object and the same item index. */
|
||||
bool operator==(const MPIterator& it) const
|
||||
{return it.p == p && it.i == i;}
|
||||
/** Negative of the operator==. */
|
||||
bool operator!=(const MPIterator& it) const
|
||||
{return ! (it == *this);}
|
||||
/** Increment operator. */
|
||||
MPIterator& operator++();
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,30 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: namelist.cpp 42 2007-01-22 21:53:24Z ondra $
|
||||
|
||||
#include "namelist.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
using namespace ogp;
|
||||
|
||||
/** A global symbol for passing info to NameListParser from its
|
||||
* parser. */
|
||||
NameListParser* name_list_parser;
|
||||
|
||||
void* namelist__scan_buffer(char*, unsigned int);
|
||||
void namelist__destroy_buffer(void*);
|
||||
void namelist_parse();
|
||||
|
||||
void NameListParser::namelist_parse(int length, const char* stream)
|
||||
{
|
||||
char* buffer = new char[length+2];
|
||||
strncpy(buffer, stream, length);
|
||||
buffer[length] = '\0';
|
||||
buffer[length+1] = '\0';
|
||||
void* p = namelist__scan_buffer(buffer, (unsigned int)length+2);
|
||||
name_list_parser = this;
|
||||
::namelist_parse();
|
||||
delete [] buffer;
|
||||
namelist__destroy_buffer(p);
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
// Copyright (C) 2007, Ondra Kamenik
|
||||
|
||||
// $Id: namelist.h 107 2007-05-10 22:35:04Z ondra $
|
||||
|
||||
#ifndef OGP_NAMELIST
|
||||
#define OGP_NAMELIST
|
||||
|
||||
namespace ogp {
|
||||
|
||||
/** Parent class of all parsers parsing a namelist. They must
|
||||
* implement add_name() method and error() method, which is called
|
||||
* when an parse error occurs.
|
||||
*
|
||||
* Parsing a name list is done as follows: implement
|
||||
* NameListParser interface, create the object, and call
|
||||
* NameListParser::namelist_parse(int lengt, const char*
|
||||
* text). When implementing error(), one may consult global
|
||||
* location_type namelist_lloc. */
|
||||
class NameListParser {
|
||||
public:
|
||||
virtual ~NameListParser() {}
|
||||
virtual void add_name(const char* name) = 0;
|
||||
virtual void namelist_error(const char* mes) = 0;
|
||||
void namelist_parse(int length, const char* text);
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,52 @@
|
|||
%{
|
||||
#include "location.h"
|
||||
#include "namelist_tab.hh"
|
||||
|
||||
extern YYLTYPE namelist_lloc;
|
||||
|
||||
#define YY_USER_ACTION SET_LLOC(namelist_);
|
||||
%}
|
||||
|
||||
%option nounput
|
||||
%option noyy_top_state
|
||||
%option stack
|
||||
%option prefix="namelist_"
|
||||
%option never-interactive
|
||||
%x CMT
|
||||
|
||||
%%
|
||||
|
||||
/* comments */
|
||||
<*>"/*" {yy_push_state(CMT);}
|
||||
<CMT>[^*\n]*
|
||||
<CMT>"*"+[^*/\n]*
|
||||
<CMT>"*"+"/" {yy_pop_state();}
|
||||
<CMT>[\n]
|
||||
"//".*\n
|
||||
|
||||
/* initial spaces or tabs are ignored */
|
||||
[ \t\r\n\0]
|
||||
|
||||
/* names */
|
||||
[A-Za-z_][A-Za-z0-9_]* {
|
||||
namelist_lval.string = namelist_text;
|
||||
return NAME;
|
||||
}
|
||||
|
||||
, {return COMMA;}
|
||||
. {
|
||||
namelist_lval.character = namelist_text[0];
|
||||
return CHARACTER;
|
||||
}
|
||||
|
||||
%%
|
||||
|
||||
int namelist_wrap()
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
void namelist__destroy_buffer(void* p)
|
||||
{
|
||||
namelist__delete_buffer((YY_BUFFER_STATE)p);
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
%{
|
||||
#include "location.h"
|
||||
#include "namelist.h"
|
||||
#include "namelist_tab.hh"
|
||||
|
||||
int namelist_error(char*);
|
||||
int namelist_lex(void);
|
||||
extern ogp::NameListParser* name_list_parser;
|
||||
|
||||
%}
|
||||
|
||||
%union {
|
||||
int integer;
|
||||
char *string;
|
||||
char character;
|
||||
}
|
||||
|
||||
%token COMMA CHARACTER
|
||||
%token <string> NAME;
|
||||
|
||||
%name-prefix="namelist_"
|
||||
|
||||
%locations
|
||||
%error-verbose
|
||||
|
||||
%%
|
||||
|
||||
namelist : namelist NAME {name_list_parser->add_name($2);}
|
||||
| namelist COMMA NAME {name_list_parser->add_name($3);}
|
||||
| NAME {name_list_parser->add_name($1);}
|
||||
;
|
||||
|
||||
%%
|
||||
|
||||
int namelist_error(char* mes)
|
||||
{
|
||||
name_list_parser->namelist_error(mes);
|
||||
}
|
|
@ -0,0 +1,116 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: parser_exception.cpp 2269 2008-11-23 14:33:22Z michel $
|
||||
|
||||
#include "parser_exception.h"
|
||||
#include <cstring>
|
||||
|
||||
using namespace ogp;
|
||||
|
||||
ParserException::ParserException(const char* m, int offset)
|
||||
: mes(new char[strlen(m)+1]), off(offset),
|
||||
aux_i1(-1), aux_i2(-1), aux_i3(-1)
|
||||
{
|
||||
strcpy(mes, m);
|
||||
}
|
||||
|
||||
ParserException::ParserException(const string& m, int offset)
|
||||
: mes(new char[m.size()+1]), off(offset),
|
||||
aux_i1(-1), aux_i2(-1), aux_i3(-1)
|
||||
{
|
||||
strncpy(mes, m.c_str(), m.size());
|
||||
mes[m.size()] = '\0';
|
||||
}
|
||||
|
||||
ParserException::ParserException(const string& m, const char* dum, int i1)
|
||||
: mes(new char[m.size()+1]), off(0),
|
||||
aux_i1(i1), aux_i2(-1), aux_i3(-1)
|
||||
{
|
||||
strncpy(mes, m.c_str(), m.size());
|
||||
mes[m.size()] = '\0';
|
||||
}
|
||||
|
||||
ParserException::ParserException(const string& m, const char* dum, int i1, int i2)
|
||||
: mes(new char[m.size()+1]), off(0),
|
||||
aux_i1(i1), aux_i2(i2), aux_i3(-1)
|
||||
{
|
||||
strncpy(mes, m.c_str(), m.size());
|
||||
mes[m.size()] = '\0';
|
||||
}
|
||||
|
||||
ParserException::ParserException(const string& m, const char* dum, int i1, int i2, int i3)
|
||||
: mes(new char[m.size()+1]), off(0),
|
||||
aux_i1(i1), aux_i2(i2), aux_i3(i3)
|
||||
{
|
||||
strncpy(mes, m.c_str(), m.size());
|
||||
mes[m.size()] = '\0';
|
||||
}
|
||||
|
||||
ParserException::ParserException(const ParserException& m, int plus_offset)
|
||||
: mes(NULL),
|
||||
aux_i1(-1), aux_i2(-1), aux_i3(-1)
|
||||
{
|
||||
copy(m);
|
||||
off += plus_offset;
|
||||
}
|
||||
|
||||
ParserException::ParserException(const ParserException& m, const char* dum, int i)
|
||||
: mes(NULL),
|
||||
aux_i1(-1), aux_i2(-1), aux_i3(-1)
|
||||
{
|
||||
copy(m);
|
||||
aux_i3 = m.aux_i2;
|
||||
aux_i2 = m.aux_i1;
|
||||
aux_i1 = i;
|
||||
}
|
||||
|
||||
ParserException::ParserException(const ParserException& m, const char* dum, int i1, int i2)
|
||||
: mes(NULL),
|
||||
aux_i1(-1), aux_i2(-1), aux_i3(-1)
|
||||
{
|
||||
copy(m);
|
||||
aux_i3 = m.aux_i1;
|
||||
aux_i2 = i2;
|
||||
aux_i1 = i1;
|
||||
}
|
||||
|
||||
ParserException::ParserException(const ParserException& m, const char* dum, int i1, int i2, int i3)
|
||||
: mes(NULL),
|
||||
aux_i1(-1), aux_i2(-1), aux_i3(-1)
|
||||
{
|
||||
copy(m);
|
||||
aux_i3 = i3;
|
||||
aux_i2 = i2;
|
||||
aux_i1 = i1;
|
||||
}
|
||||
|
||||
|
||||
ParserException::ParserException(const ParserException& e)
|
||||
: mes(NULL),
|
||||
aux_i1(-1), aux_i2(-1), aux_i3(-1)
|
||||
{
|
||||
copy(e);
|
||||
}
|
||||
|
||||
ParserException::~ParserException()
|
||||
{
|
||||
delete [] mes;
|
||||
}
|
||||
|
||||
void ParserException::copy(const ParserException& e)
|
||||
{
|
||||
if (mes)
|
||||
delete [] mes;
|
||||
mes = new char[strlen(e.mes)+1];
|
||||
strcpy(mes, e.mes);
|
||||
off = e.off;
|
||||
aux_i1 = e.aux_i1;
|
||||
aux_i2 = e.aux_i2;
|
||||
aux_i3 = e.aux_i3;
|
||||
}
|
||||
|
||||
void ParserException::print(FILE* fd) const
|
||||
{
|
||||
// todo: to be refined
|
||||
fprintf(fd, "%s: offset %d\n", mes, off);
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: parser_exception.h 1761 2008-03-31 14:27:13Z kamenik $
|
||||
|
||||
#ifndef OG_FORMULA_PARSER_H
|
||||
#define OG_FORMULA_PARSER_H
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace ogp {
|
||||
using std::string;
|
||||
|
||||
/** This is an easy exception, which, besides the message, stores
|
||||
* also an offset of the parse error. Since we might need to track
|
||||
* the argument number and for example the filed in the argument
|
||||
* which caused the error, we add three integers, which have no
|
||||
* semantics here. They should be documented in the function which
|
||||
* throws an exception and sets them. Their default value is -1,
|
||||
* which means they have not been set. */
|
||||
class ParserException {
|
||||
protected:
|
||||
char* mes;
|
||||
int off;
|
||||
int aux_i1;
|
||||
int aux_i2;
|
||||
int aux_i3;
|
||||
public:
|
||||
ParserException(const char* m, int offset);
|
||||
ParserException(const string& m, int offset);
|
||||
ParserException(const string& m, const char* dum, int i1);
|
||||
ParserException(const string& m, const char* dum, int i1, int i2);
|
||||
ParserException(const string& m, const char* dum, int i1, int i2, int i3);
|
||||
ParserException(const ParserException& e, int plus_offset);
|
||||
/** Makes a copy and pushes given integer to aux_i1 shuffling
|
||||
* others and forgetting the last. */
|
||||
ParserException(const ParserException& e, const char* dum, int i);
|
||||
/** Makes a copy and pushes given two integers to aux_i1 and aux_i2 shuffling
|
||||
* others and forgetting the last two. */
|
||||
ParserException(const ParserException& e, const char* dum, int i1, int i2);
|
||||
/** Makes a copy and pushes given three integers to aux_i1, aux_i2, aus_i3 shuffling
|
||||
* others and forgetting the last three. */
|
||||
ParserException(const ParserException& e, const char* dum, int i1, int i2, int i3);
|
||||
ParserException(const ParserException& e);
|
||||
virtual ~ParserException();
|
||||
void print(FILE* fd) const;
|
||||
const char* message() const
|
||||
{return mes;}
|
||||
int offset() const
|
||||
{return off;}
|
||||
const int& i1() const
|
||||
{return aux_i1;}
|
||||
int& i1()
|
||||
{return aux_i1;}
|
||||
const int& i2() const
|
||||
{return aux_i2;}
|
||||
int& i2()
|
||||
{return aux_i2;}
|
||||
const int& i3() const
|
||||
{return aux_i3;}
|
||||
int& i3()
|
||||
{return aux_i3;}
|
||||
protected:
|
||||
void copy(const ParserException& e);
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,122 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: static_atoms.cpp 1360 2007-07-10 11:44:20Z kamenik $
|
||||
|
||||
#include "static_atoms.h"
|
||||
#include "utils/cc/exception.h"
|
||||
|
||||
using namespace ogp;
|
||||
|
||||
StaticAtoms::StaticAtoms(const StaticAtoms& a)
|
||||
: Atoms(), Constants(a), varnames(a.varnames),
|
||||
varorder(), vars(), indices()
|
||||
{
|
||||
// fill varorder
|
||||
for (unsigned int i = 0; i < a.varorder.size(); i++) {
|
||||
const char* s = varnames.query(a.varorder[i]);
|
||||
varorder.push_back(s);
|
||||
}
|
||||
|
||||
// fill vars
|
||||
for (Tvarmap::const_iterator it = a.vars.begin();
|
||||
it != a.vars.end(); ++it) {
|
||||
const char* s = varnames.query((*it).first);
|
||||
vars.insert(Tvarmap::value_type(s, (*it).second));
|
||||
}
|
||||
|
||||
// fill indices
|
||||
for (Tinvmap::const_iterator it = a.indices.begin();
|
||||
it != a.indices.end(); ++it) {
|
||||
const char* s = varnames.query((*it).second);
|
||||
indices.insert(Tinvmap::value_type((*it).first, s));
|
||||
}
|
||||
}
|
||||
|
||||
void StaticAtoms::import_atoms(const DynamicAtoms& da, OperationTree& otree, Tintintmap& tmap)
|
||||
{
|
||||
Constants::import_constants(da, otree, tmap);
|
||||
|
||||
for (int i = 0; i < da.get_name_storage().num(); i++) {
|
||||
const char* name = da.get_name_storage().get_name(i);
|
||||
register_name(name);
|
||||
int tnew = otree.add_nulary();
|
||||
assign(name, tnew);
|
||||
try {
|
||||
const DynamicAtoms::Tlagmap& lmap = da.lagmap(name);
|
||||
for (DynamicAtoms::Tlagmap::const_iterator it = lmap.begin();
|
||||
it != lmap.end(); ++it) {
|
||||
int told = (*it).second;
|
||||
tmap.insert(Tintintmap::value_type(told, tnew));
|
||||
}
|
||||
} catch (const ogu::Exception& e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int StaticAtoms::check(const char* name) const
|
||||
{
|
||||
if (DynamicAtoms::is_string_constant(name)) {
|
||||
return Constants::check(name);
|
||||
} else {
|
||||
return check_variable(name);
|
||||
}
|
||||
}
|
||||
|
||||
int StaticAtoms::index(const char* name) const
|
||||
{
|
||||
Tvarmap::const_iterator it = vars.find(name);
|
||||
if (it == vars.end())
|
||||
return -1;
|
||||
else
|
||||
return (*it).second;
|
||||
}
|
||||
|
||||
const char* StaticAtoms::inv_index(int t) const
|
||||
{
|
||||
Tinvmap::const_iterator it = indices.find(t);
|
||||
if (it == indices.end())
|
||||
return NULL;
|
||||
else
|
||||
return (*it).second;
|
||||
}
|
||||
|
||||
void StaticAtoms::assign(const char* name, int t)
|
||||
{
|
||||
if (DynamicAtoms::is_string_constant(name)) {
|
||||
double val;
|
||||
sscanf(name, "%lf", &val);
|
||||
add_constant(t, val);
|
||||
} else {
|
||||
const char* ss = varnames.insert(name);
|
||||
vars.insert(Tvarmap::value_type(ss, t));
|
||||
indices.insert(Tinvmap::value_type(t, ss));
|
||||
}
|
||||
}
|
||||
|
||||
vector<int> StaticAtoms::variables() const
|
||||
{
|
||||
vector<int> res;
|
||||
for (Tvarmap::const_iterator it = vars.begin();
|
||||
it != vars.end(); ++it) {
|
||||
res.push_back((*it).second);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void StaticAtoms::register_name(const char* name)
|
||||
{
|
||||
const char* ss = varnames.insert(name);
|
||||
varorder.push_back(ss);
|
||||
}
|
||||
|
||||
void StaticAtoms::print() const
|
||||
{
|
||||
printf("constants:\n");
|
||||
Constants::print();
|
||||
printf("variable names:\n");
|
||||
varnames.print();
|
||||
printf("map to tree indices:\n");
|
||||
for (Tvarmap::const_iterator it = vars.begin(); it != vars.end(); ++it)
|
||||
printf("%s\t->\t%d\n", (*it).first, (*it).second);
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: static_atoms.h 1218 2007-03-19 21:52:49Z kamenik $
|
||||
|
||||
#ifndef OGP_STATIC_ATOMS
|
||||
#define OGP_STATIC_ATOMS
|
||||
|
||||
#include "dynamic_atoms.h"
|
||||
|
||||
namespace ogp {
|
||||
|
||||
class StaticAtoms : public Atoms, public Constants {
|
||||
protected:
|
||||
typedef map<const char*, int, ltstr> Tvarmap;
|
||||
typedef map<int, const char*> Tinvmap;
|
||||
/** Storage for names. */
|
||||
NameStorage varnames;
|
||||
/** Outer order of variables. */
|
||||
vector<const char*> varorder;
|
||||
/** This is the map mapping a variable name to the tree
|
||||
* index. */
|
||||
Tvarmap vars;
|
||||
/** This is the inverse mapping. It maps a tree index to the
|
||||
* variable name. */
|
||||
Tinvmap indices;
|
||||
public:
|
||||
StaticAtoms() : Atoms(), Constants(), varnames(), varorder(), vars()
|
||||
{}
|
||||
/* Copy constructor. */
|
||||
StaticAtoms(const StaticAtoms& a);
|
||||
/** Conversion from DynamicAtoms. This takes all atoms from
|
||||
* the DynamicAtoms and adds its static version. The new tree
|
||||
* indices are allocated in the passed OperationTree. Whole
|
||||
* the process is traced in the map mapping old tree indices
|
||||
* to new tree indices. */
|
||||
StaticAtoms(const DynamicAtoms& da, OperationTree& otree, Tintintmap& tmap)
|
||||
: Atoms(), Constants(), varnames(), varorder(), vars()
|
||||
{import_atoms(da, otree, tmap);}
|
||||
/* Destructor. */
|
||||
virtual ~StaticAtoms() {}
|
||||
/** This imports atoms from dynamic atoms inserting the new
|
||||
* tree indices to the given tree (including constants). The
|
||||
* mapping from old atoms to new atoms is traced in tmap. */
|
||||
void import_atoms(const DynamicAtoms& da, OperationTree& otree,
|
||||
Tintintmap& tmap);
|
||||
/** If the name is constant, it returns its tree index if the
|
||||
* constant is registered in Constants, it returns -1
|
||||
* otherwise. If the name is not constant, it returns result
|
||||
* from check_variable, which is implemented by a subclass. */
|
||||
int check(const char* name) const;
|
||||
/** This assigns a given tree index to the variable name. The
|
||||
* name should have been checked before the call. */
|
||||
void assign(const char* name, int t);
|
||||
int nvar() const
|
||||
{return varnames.num();}
|
||||
/** This returns a vector of all variables. */
|
||||
vector<int> variables() const;
|
||||
/** This returns a tree index of the given variable. */
|
||||
int index(const char* name) const;
|
||||
/** This returns a name from the given tree index. NULL is
|
||||
* returned if the tree index doesn't exist. */
|
||||
const char* inv_index(int t) const;
|
||||
/** This returns a name in a outer ordering. (There is no other ordering.) */
|
||||
const char* name(int i) const
|
||||
{return varorder[i];}
|
||||
/** Debug print. */
|
||||
void print() const;
|
||||
/** This registers a variable. A subclass can reimplement
|
||||
* this, for example, to ensure uniqueness of the
|
||||
* name. However, this method should be always called in
|
||||
* overriding methods to do the registering job. */
|
||||
virtual void register_name(const char* name);
|
||||
/** Return the name storage to allow querying to other
|
||||
* classes. */
|
||||
const NameStorage& get_name_storage() const
|
||||
{return varnames;}
|
||||
protected:
|
||||
/** This checks the variable. The implementing subclass might
|
||||
* want to throw an exception if the variable has not been
|
||||
* registered. */
|
||||
virtual int check_variable(const char* name) const = 0;
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,217 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: static_fine_atoms.cpp 82 2007-04-19 11:33:30Z ondra $
|
||||
|
||||
#include "utils/cc/exception.h"
|
||||
|
||||
#include "static_fine_atoms.h"
|
||||
#include "parser_exception.h"
|
||||
|
||||
using namespace ogp;
|
||||
|
||||
StaticFineAtoms::StaticFineAtoms(const StaticFineAtoms& sfa)
|
||||
: StaticAtoms(sfa),
|
||||
params(), param_outer_map(),
|
||||
endovars(), endo_outer_map(),
|
||||
exovars(), exo_outer_map(),
|
||||
der_atoms(sfa.der_atoms),
|
||||
endo_atoms_map(sfa.endo_atoms_map),
|
||||
exo_atoms_map(sfa.exo_atoms_map)
|
||||
{
|
||||
for (unsigned int i = 0; i < sfa.params.size(); i++) {
|
||||
const char* name = varnames.query(sfa.params[i]);
|
||||
params.push_back(name);
|
||||
param_outer_map.insert(Tvarintmap::value_type(name, i));
|
||||
}
|
||||
|
||||
for (unsigned int i = 0; i < sfa.endovars.size(); i++) {
|
||||
const char* name = varnames.query(sfa.endovars[i]);
|
||||
endovars.push_back(name);
|
||||
endo_outer_map.insert(Tvarintmap::value_type(name, i));
|
||||
}
|
||||
|
||||
for (unsigned int i = 0; i < sfa.exovars.size(); i++) {
|
||||
const char* name = varnames.query(sfa.exovars[i]);
|
||||
exovars.push_back(name);
|
||||
exo_outer_map.insert(Tvarintmap::value_type(name, i));
|
||||
}
|
||||
}
|
||||
|
||||
void StaticFineAtoms::import_atoms(const FineAtoms& fa, OperationTree& otree, Tintintmap& tmap)
|
||||
{
|
||||
StaticAtoms::import_atoms(fa, otree, tmap);
|
||||
|
||||
// we just need to put parameters, endovars, and exovars to
|
||||
// respective vectors, the names are already in the storage
|
||||
|
||||
// parameters
|
||||
const vector<const char*>& fa_params = fa.get_params();
|
||||
for (unsigned int i = 0; i < fa_params.size(); i++)
|
||||
register_param(fa_params[i]);
|
||||
|
||||
// endogenous
|
||||
const vector<const char*>& fa_endovars = fa.get_endovars();
|
||||
for (unsigned int i = 0; i < fa_endovars.size(); i++)
|
||||
register_endo(fa_endovars[i]);
|
||||
|
||||
// exogenous
|
||||
const vector<const char*>& fa_exovars = fa.get_exovars();
|
||||
for (unsigned int i = 0; i < fa_exovars.size(); i++)
|
||||
register_exo(fa_exovars[i]);
|
||||
|
||||
parsing_finished();
|
||||
}
|
||||
|
||||
void StaticFineAtoms::import_atoms(const FineAtoms& fa, OperationTree& otree, Tintintmap& tmap,
|
||||
const char* dummy)
|
||||
{
|
||||
StaticAtoms::import_atoms(fa, otree, tmap);
|
||||
|
||||
// we just need to put parameters, endovars, and exovars to
|
||||
// respective vectors, the names are already in the storage
|
||||
|
||||
// parameters
|
||||
const vector<const char*>& fa_params = fa.get_params();
|
||||
for (unsigned int i = 0; i < fa_params.size(); i++)
|
||||
register_param(fa_params[i]);
|
||||
|
||||
// endogenous
|
||||
const vector<const char*>& fa_endovars = fa.get_endovars();
|
||||
for (unsigned int i = 0; i < fa_endovars.size(); i++)
|
||||
register_endo(fa_endovars[fa.y2outer_endo()[i]]);
|
||||
|
||||
// exogenous
|
||||
const vector<const char*>& fa_exovars = fa.get_exovars();
|
||||
for (unsigned int i = 0; i < fa_exovars.size(); i++)
|
||||
register_exo(fa_exovars[fa.y2outer_exo()[i]]);
|
||||
|
||||
parsing_finished();
|
||||
}
|
||||
|
||||
int StaticFineAtoms::check_variable(const char* name) const
|
||||
{
|
||||
const char* ss = varnames.query(name);
|
||||
if (ss == NULL)
|
||||
throw ParserException(string("Variable <")+name+"> not declared.",0);
|
||||
return index(name);
|
||||
}
|
||||
|
||||
void StaticFineAtoms::parsing_finished()
|
||||
{
|
||||
// build der_atoms, and endo_atoms_map and exo_atoms_map
|
||||
der_atoms.clear();
|
||||
endo_atoms_map.clear();
|
||||
exo_atoms_map.clear();
|
||||
|
||||
// go through all endo and exo insert tree indices, ignore names
|
||||
// whose tree index is -1 (those which are not referenced)
|
||||
for (unsigned int i = 0; i < endovars.size(); i++) {
|
||||
int t = index(endovars[i]);
|
||||
if (t != -1) {
|
||||
endo_atoms_map.push_back(der_atoms.size());
|
||||
der_atoms.push_back(t);
|
||||
}
|
||||
}
|
||||
for (unsigned int i = 0; i < exovars.size(); i++) {
|
||||
int t = index(exovars[i]);
|
||||
if (t != -1) {
|
||||
exo_atoms_map.push_back(der_atoms.size());
|
||||
der_atoms.push_back(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int StaticFineAtoms::name2outer_param(const char* name) const
|
||||
{
|
||||
Tvarintmap::const_iterator it = param_outer_map.find(name);
|
||||
if (it == param_outer_map.end())
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Name is not a parameter in StaticFineAtoms::name2outer_param");
|
||||
return (*it).second;
|
||||
}
|
||||
|
||||
int StaticFineAtoms::name2outer_endo(const char* name) const
|
||||
{
|
||||
Tvarintmap::const_iterator it = endo_outer_map.find(name);
|
||||
if (it == endo_outer_map.end())
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Name is not an endogenous variable in StaticFineAtoms::name2outer_endo");
|
||||
return (*it).second;
|
||||
}
|
||||
|
||||
int StaticFineAtoms::name2outer_exo(const char* name) const
|
||||
{
|
||||
Tvarintmap::const_iterator it = exo_outer_map.find(name);
|
||||
if (it == exo_outer_map.end())
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Name is not an exogenous variable in StaticFineAtoms::name2outer_exo");
|
||||
return (*it).second;
|
||||
}
|
||||
|
||||
void StaticFineAtoms::register_uniq_endo(const char* name)
|
||||
{
|
||||
if (varnames.query(name))
|
||||
throw ogp::ParserException(string("Endogenous variable <")+name+"> is not unique.",0);
|
||||
const char* ss = varnames.insert(name);
|
||||
register_endo(ss);
|
||||
}
|
||||
|
||||
void StaticFineAtoms::register_uniq_exo(const char* name)
|
||||
{
|
||||
if (varnames.query(name))
|
||||
throw ogp::ParserException(string("Exogenous variable <")+name+"> is not unique.",0);
|
||||
const char* ss = varnames.insert(name);
|
||||
register_exo(ss);
|
||||
}
|
||||
|
||||
void StaticFineAtoms::register_uniq_param(const char* name)
|
||||
{
|
||||
if (varnames.query(name))
|
||||
throw ogp::ParserException(string("Parameter <")+name+"> is not unique.",0);
|
||||
const char* ss = varnames.insert(name);
|
||||
register_param(ss);
|
||||
}
|
||||
|
||||
void StaticFineAtoms::print() const
|
||||
{
|
||||
StaticAtoms::print();
|
||||
printf("endo atoms map:\n");
|
||||
for (unsigned int i = 0; i < endo_atoms_map.size(); i++)
|
||||
printf("%d --> %d\n", i, endo_atoms_map[i]);
|
||||
printf("exo atoms map:\n");
|
||||
for (unsigned int i = 0; i < exo_atoms_map.size(); i++)
|
||||
printf("%d --> %d\n", i, exo_atoms_map[i]);
|
||||
printf("der atoms:\n");
|
||||
for (unsigned int i = 0; i < der_atoms.size(); i++)
|
||||
printf("%d\t%d\n",i, der_atoms[i]);
|
||||
}
|
||||
|
||||
void StaticFineAtoms::register_endo(const char* name)
|
||||
{
|
||||
const char* ss = varnames.query(name);
|
||||
if (ss == NULL)
|
||||
throw ogp::ParserException(string("Endogenous variable <")
|
||||
+name+"> not found in storage.",0);
|
||||
endovars.push_back(ss);
|
||||
endo_outer_map.insert(Tvarintmap::value_type(ss, endovars.size()-1));
|
||||
}
|
||||
|
||||
void StaticFineAtoms::register_exo(const char* name)
|
||||
{
|
||||
const char* ss = varnames.query(name);
|
||||
if (ss == NULL)
|
||||
throw ogp::ParserException(string("Exogenous variable <")
|
||||
+name+"> not found in storage.",0);
|
||||
exovars.push_back(ss);
|
||||
exo_outer_map.insert(Tvarintmap::value_type(ss, exovars.size()-1));
|
||||
}
|
||||
|
||||
void StaticFineAtoms::register_param(const char* name)
|
||||
{
|
||||
const char* ss = varnames.query(name);
|
||||
if (ss == NULL)
|
||||
throw ogp::ParserException(string("Parameter <")+name+"> not found in storage.",0);
|
||||
params.push_back(ss);
|
||||
param_outer_map.insert(Tvarintmap::value_type(ss, params.size()-1));
|
||||
}
|
||||
|
|
@ -0,0 +1,177 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: static_fine_atoms.h 42 2007-01-22 21:53:24Z ondra $
|
||||
|
||||
#ifndef OGP_STATIC_FINE_ATOMS_H
|
||||
#define OGP_STATIC_FINE_ATOMS_H
|
||||
|
||||
#include "static_atoms.h"
|
||||
#include "fine_atoms.h"
|
||||
|
||||
namespace ogp {
|
||||
|
||||
/** This class represents static atoms distinguishing between
|
||||
* parameters, endogenous and exogenous variables. The class
|
||||
* maintains also ordering of all three categories (referenced as
|
||||
* outer or inner, since there is only one ordering). It can be
|
||||
* constructed either from scratch, or from fine dynamic atoms. In
|
||||
* the latter case, one can decide if the ordering of this static
|
||||
* atoms should be internal or external ordering of the original
|
||||
* dynamic fine atoms. */
|
||||
class StaticFineAtoms : public StaticAtoms {
|
||||
public:
|
||||
typedef map<int,int> Tintintmap;
|
||||
protected:
|
||||
typedef map<const char*, int, ltstr> Tvarintmap;
|
||||
private:
|
||||
/** The vector of parameter names, gives the parameter
|
||||
* ordering. */
|
||||
vector<const char*> params;
|
||||
/** A map mappping a parameter name to an index in the ordering. */
|
||||
Tvarintmap param_outer_map;
|
||||
/** The vector of endogenous variables. This defines the order
|
||||
* like parameters. */
|
||||
vector<const char*> endovars;
|
||||
/** A map mapping a name of an endogenous variable to an index
|
||||
* in the ordering. */
|
||||
Tvarintmap endo_outer_map;
|
||||
/** The vector of exogenous variables. Also defines the order
|
||||
* like parameters and endovars. */
|
||||
vector<const char*> exovars;
|
||||
/** A map mapping a name of an exogenous variable to an index
|
||||
* in the outer ordering. */
|
||||
Tvarintmap exo_outer_map;
|
||||
/** This vector defines a set of atoms as tree indices used
|
||||
* for differentiation. The order of the atoms in is the
|
||||
* concatenation of the outer ordering of endogenous and
|
||||
* exogenous. This vector is setup by parsing_finished() and
|
||||
* is returned by variables(). */
|
||||
vector<int> der_atoms;
|
||||
/** This is a mapping from endogenous atoms to all atoms in
|
||||
* der_atoms member. The mapping maps index in endogenous atom
|
||||
* ordering to index (not value) in der_atoms. It is useful if
|
||||
* one wants to evaluate derivatives wrt only endogenous
|
||||
* variables. It is set by parsing_finished(). By definition,
|
||||
* it is monotone. */
|
||||
vector<int> endo_atoms_map;
|
||||
/** This is a mapping from exogenous atoms to all atoms in
|
||||
* der_atoms member. It is the same as endo_atoms_map for
|
||||
* atoms of exogenous variables. */
|
||||
vector<int> exo_atoms_map;
|
||||
public:
|
||||
StaticFineAtoms() {}
|
||||
/** Copy constructor making a new storage for atom names. */
|
||||
StaticFineAtoms(const StaticFineAtoms& sfa);
|
||||
/** Conversion from dynamic FineAtoms taking its outer
|
||||
* ordering as ordering of parameters, endogenous and
|
||||
* exogenous. A biproduct is an integer to integer map mapping
|
||||
* tree indices of the dynamic atoms to tree indices of the
|
||||
* static atoms. */
|
||||
StaticFineAtoms(const FineAtoms& fa, OperationTree& otree, Tintintmap& tmap)
|
||||
{StaticFineAtoms::import_atoms(fa, otree, tmap);}
|
||||
/** Conversion from dynamic FineAtoms taking its internal
|
||||
* ordering as ordering of parameters, endogenous and
|
||||
* exogenous. A biproduct is an integer to integer map mapping
|
||||
* tree indices of the dynamic atoms to tree indices of the
|
||||
* static atoms. */
|
||||
StaticFineAtoms(const FineAtoms& fa, OperationTree& otree, Tintintmap& tmap,
|
||||
const char* dummy)
|
||||
{StaticFineAtoms::import_atoms(fa, otree, tmap, dummy);}
|
||||
virtual ~StaticFineAtoms() {}
|
||||
/** This adds atoms from dynamic atoms inserting new tree
|
||||
* indices to the given tree and tracing the mapping from old
|
||||
* atoms to new atoms in tmap. The ordering of the static
|
||||
* atoms is the same as outer ordering of dynamic atoms. */
|
||||
void import_atoms(const FineAtoms& fa, OperationTree& otree, Tintintmap& tmap);
|
||||
/** This adds atoms from dynamic atoms inserting new tree
|
||||
* indices to the given tree and tracing the mapping from old
|
||||
* atoms to new atoms in tmap. The ordering of the static
|
||||
* atoms is the same as internal ordering of dynamic atoms. */
|
||||
void import_atoms(const FineAtoms& fa, OperationTree& otree, Tintintmap& tmap,
|
||||
const char* dummy);
|
||||
/** Overrides StaticAtoms::check_variable so that the error
|
||||
* would be raised if the variable name is not declared. A
|
||||
* variable is declared by inserting it to
|
||||
* StaticAtoms::varnames, which is done with registering
|
||||
* methods. This a responsibility of a subclass. */
|
||||
int check_variable(const char* name) const;
|
||||
/** Return an (external) ordering of parameters. */
|
||||
const vector<const char*>& get_params() const
|
||||
{return params;}
|
||||
/** Return an external ordering of endogenous variables. */
|
||||
const vector<const char*>& get_endovars() const
|
||||
{return endovars;}
|
||||
/** Return an external ordering of exogenous variables. */
|
||||
const vector<const char*>& get_exovars() const
|
||||
{return exovars;}
|
||||
/** This constructs der_atoms, and the endo_endoms_map and
|
||||
* exo_atoms_map, which can be created only after the parsing
|
||||
* is finished. */
|
||||
void parsing_finished();
|
||||
/** Return the atoms with respect to which we are going to
|
||||
* differentiate. */
|
||||
vector<int> variables() const
|
||||
{return der_atoms;}
|
||||
/** Return the endo_atoms_map. */
|
||||
const vector<int>& get_endo_atoms_map() const
|
||||
{return endo_atoms_map;}
|
||||
/** Return the exo_atoms_map. */
|
||||
const vector<int>& get_exo_atoms_map() const
|
||||
{return endo_atoms_map;}
|
||||
/** Return an index in the outer ordering of a given
|
||||
* parameter. An exception is thrown if the name is not a
|
||||
* parameter. */
|
||||
int name2outer_param(const char* name) const;
|
||||
/** Return an index in the outer ordering of a given
|
||||
* endogenous variable. An exception is thrown if the name is not a
|
||||
* and endogenous variable. */
|
||||
int name2outer_endo(const char* name) const;
|
||||
/** Return an index in the outer ordering of a given
|
||||
* exogenous variable. An exception is thrown if the name is not a
|
||||
* and exogenous variable. */
|
||||
int name2outer_exo(const char* name) const;
|
||||
/** Return the number of endogenous variables. */
|
||||
int ny() const
|
||||
{return endovars.size();}
|
||||
/** Return the number of exogenous variables. */
|
||||
int nexo() const
|
||||
{return (int)exovars.size();}
|
||||
/** Return the number of parameters. */
|
||||
int np() const
|
||||
{return (int)(params.size());}
|
||||
/** Register unique endogenous variable name. The order of
|
||||
* calls defines the endo outer ordering. The method is
|
||||
* virtual, since a superclass may want to do some additional
|
||||
* action. */
|
||||
virtual void register_uniq_endo(const char* name);
|
||||
/** Register unique exogenous variable name. The order of
|
||||
* calls defines the exo outer ordering. The method is
|
||||
* virtual, since a superclass may want to do somem additional
|
||||
* action. */
|
||||
virtual void register_uniq_exo(const char* name);
|
||||
/** Register unique parameter name. The order of calls defines
|
||||
* the param outer ordering. The method is
|
||||
* virtual, since a superclass may want to do somem additional
|
||||
* action. */
|
||||
virtual void register_uniq_param(const char* name);
|
||||
/** Debug print. */
|
||||
void print() const;
|
||||
private:
|
||||
/** Add endogenous variable name, which is already in the name
|
||||
* storage. */
|
||||
void register_endo(const char* name);
|
||||
/** Add exogenous variable name, which is already in the name
|
||||
* storage. */
|
||||
void register_exo(const char* name);
|
||||
/** Add parameter name, which is already in the name
|
||||
* storage. */
|
||||
void register_param(const char* name);
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,912 @@
|
|||
// Copyright (C) 2005, Ondra Kamenik
|
||||
|
||||
// $Id: tree.cpp 1762 2008-03-31 14:28:54Z kamenik $
|
||||
|
||||
#include "utils/cc/exception.h"
|
||||
|
||||
#include "tree.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
|
||||
using namespace ogp;
|
||||
|
||||
|
||||
/** Here we just implement complementary error function without
|
||||
* declaring it for uses from outside this unit. The implementation is taken from "Numerical Recipes in C" 2nd ed. 1992 p. 221, */
|
||||
double erffc(double x)
|
||||
{
|
||||
double z = std::abs(x);
|
||||
double t = 1/(1+0.5*z);
|
||||
double r = t*exp(-z*z-1.26551223+t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))));
|
||||
return x >= 0 ? r : 2-r;
|
||||
}
|
||||
|
||||
/** Here we initialize OperationTree to contain only zero, one, nan
|
||||
* and two_over_pi terms. */
|
||||
OperationTree::OperationTree()
|
||||
{
|
||||
last_nulary = -1;
|
||||
// allocate space for the constants
|
||||
for (int i = 0; i < num_constants; i++)
|
||||
add_nulary();
|
||||
}
|
||||
|
||||
int OperationTree::add_nulary()
|
||||
{
|
||||
int op = terms.size();
|
||||
Operation nulary;
|
||||
terms.push_back(nulary);
|
||||
_Tintset s;
|
||||
s.insert(op);
|
||||
nul_incidence.push_back(s);
|
||||
_Tderivmap empty;
|
||||
derivatives.push_back(empty);
|
||||
last_nulary = op;
|
||||
return op;
|
||||
}
|
||||
|
||||
int OperationTree::add_unary(code_t code, int op)
|
||||
{
|
||||
if (op == zero &&
|
||||
(code == UMINUS ||
|
||||
code == SIN ||
|
||||
code == TAN ||
|
||||
code == SQRT ||
|
||||
code == ERF))
|
||||
return zero;
|
||||
if (op == zero && code == LOG || op == nan)
|
||||
return nan;
|
||||
if (op == zero && (code == EXP ||
|
||||
code == COS ||
|
||||
code == ERFC))
|
||||
return one;
|
||||
|
||||
Operation unary(code, op);
|
||||
_Topmap::const_iterator i = ((const _Topmap&)opmap).find(unary);
|
||||
if (i == opmap.end()) {
|
||||
int newop = terms.size();
|
||||
// add to the terms
|
||||
terms.push_back(unary);
|
||||
// copy incidence of the operand
|
||||
nul_incidence.push_back(nul_incidence[op]);
|
||||
// insert it to opmap
|
||||
opmap.insert(_Topval(unary, newop));
|
||||
// add empty map of derivatives
|
||||
_Tderivmap empty;
|
||||
derivatives.push_back(empty);
|
||||
return newop;
|
||||
}
|
||||
return (*i).second;
|
||||
}
|
||||
|
||||
int OperationTree::add_binary(code_t code, int op1, int op2)
|
||||
{
|
||||
// quick exits for special values
|
||||
if (op1 == nan || op2 == nan)
|
||||
return nan;
|
||||
// for plus
|
||||
if (code == PLUS)
|
||||
if (op1 == zero && op2 == zero)
|
||||
return zero;
|
||||
else if (op1 == zero)
|
||||
return op2;
|
||||
else if (op2 == zero)
|
||||
return op1;
|
||||
// for minus
|
||||
if (code == MINUS)
|
||||
if (op1 == zero && op2 == zero)
|
||||
return zero;
|
||||
else if (op1 == zero)
|
||||
return add_unary(UMINUS, op2);
|
||||
else if (op2 == zero)
|
||||
return op1;
|
||||
// for times
|
||||
if (code == TIMES)
|
||||
if (op1 == zero || op2 == zero)
|
||||
return zero;
|
||||
else if (op1 == one)
|
||||
return op2;
|
||||
else if (op2 == one)
|
||||
return op1;
|
||||
// for divide
|
||||
if (code == DIVIDE)
|
||||
if (op1 == op2)
|
||||
return one;
|
||||
else if (op1 == zero)
|
||||
return zero;
|
||||
else if (op2 == zero)
|
||||
return nan;
|
||||
// for power
|
||||
if (code == POWER)
|
||||
if (op1 == zero && op2 == zero)
|
||||
return nan;
|
||||
else if (op1 == zero)
|
||||
return zero;
|
||||
else if (op2 == zero)
|
||||
return one;
|
||||
else if (op1 == one)
|
||||
return one;
|
||||
else if (op2 == one)
|
||||
return op1;
|
||||
|
||||
// order operands of commutative operations
|
||||
if (code == TIMES || code == PLUS)
|
||||
if (op1 > op2) {
|
||||
int tmp = op1;
|
||||
op1 = op2;
|
||||
op2 = tmp;
|
||||
}
|
||||
|
||||
// construct operation and check/add it
|
||||
Operation binary(code, op1, op2);
|
||||
_Topmap::const_iterator i = ((const _Topmap&)opmap).find(binary);
|
||||
if (i == opmap.end()) {
|
||||
int newop = terms.size();
|
||||
terms.push_back(binary);
|
||||
// sum both sets of incidenting nulary operations
|
||||
nul_incidence.push_back(nul_incidence[op1]);
|
||||
nul_incidence.back().insert(nul_incidence[op2].begin(), nul_incidence[op2].end());
|
||||
// add to opmap
|
||||
opmap.insert(_Topval(binary, newop));
|
||||
// add empty map of derivatives
|
||||
_Tderivmap empty;
|
||||
derivatives.push_back(empty);
|
||||
return newop;
|
||||
}
|
||||
return (*i).second;
|
||||
}
|
||||
|
||||
int OperationTree::add_derivative(int t, int v)
|
||||
{
|
||||
if (t < 0 || t >= (int) terms.size())
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Wrong value for tree index in OperationTree::add_derivative");
|
||||
|
||||
// quick returns for nulary terms or empty incidence
|
||||
if (terms[t].nary() == 0 && t != v) {
|
||||
return zero;
|
||||
}
|
||||
if (terms[t].nary() == 0 && t == v) {
|
||||
return one;
|
||||
}
|
||||
if (nul_incidence[t].end() == nul_incidence[t].find(v)) {
|
||||
return zero;
|
||||
}
|
||||
|
||||
// quick return if the derivative has been registered
|
||||
_Tderivmap::const_iterator i = derivatives[t].find(v);
|
||||
if (i != derivatives[t].end())
|
||||
return (*i).second;
|
||||
|
||||
int res = -1;
|
||||
switch (terms[t].getCode()) {
|
||||
|
||||
case UMINUS:
|
||||
{
|
||||
int tmp = add_derivative(terms[t].getOp1(), v);
|
||||
res = add_unary(UMINUS, tmp);
|
||||
break;
|
||||
}
|
||||
case LOG:
|
||||
{
|
||||
int tmp = add_derivative(terms[t].getOp1(), v);
|
||||
res = add_binary(DIVIDE, tmp, terms[t].getOp1());
|
||||
break;
|
||||
}
|
||||
case EXP:
|
||||
{
|
||||
int tmp = add_derivative(terms[t].getOp1(), v);
|
||||
res = add_binary(TIMES, t, tmp);
|
||||
break;
|
||||
}
|
||||
case SIN:
|
||||
{
|
||||
int tmp = add_derivative(terms[t].getOp1(), v);
|
||||
res = add_binary(TIMES, add_unary(COS, terms[t].getOp1()), tmp);
|
||||
break;
|
||||
}
|
||||
case COS:
|
||||
{
|
||||
int tmp = add_derivative(terms[t].getOp1(), v);
|
||||
res = add_unary(UMINUS, add_binary(TIMES, add_unary(SIN, terms[t].getOp1()), tmp));
|
||||
break;
|
||||
}
|
||||
case TAN:
|
||||
{
|
||||
int tmp = add_derivative(terms[t].getOp1(), v);
|
||||
int tmp2 = add_unary(COS, terms[t].getOp1());
|
||||
res = add_binary(DIVIDE, tmp, add_binary(TIMES, tmp2, tmp2));
|
||||
break;
|
||||
}
|
||||
case SQRT:
|
||||
{
|
||||
int tmp = add_derivative(terms[t].getOp1(), v);
|
||||
res = add_binary(DIVIDE, tmp,
|
||||
add_binary(PLUS, t, t));
|
||||
break;
|
||||
}
|
||||
case ERF:
|
||||
{
|
||||
int tmp = add_binary(TIMES, terms[t].getOp1(), terms[t].getOp1());
|
||||
tmp = add_unary(UMINUS, tmp);
|
||||
tmp = add_unary(EXP, tmp);
|
||||
int der = add_derivative(terms[t].getOp1(), v);
|
||||
tmp = add_binary(TIMES, tmp, der);
|
||||
res = add_binary(TIMES, two_over_pi, tmp);
|
||||
break;
|
||||
}
|
||||
case ERFC:
|
||||
{
|
||||
int tmp = add_binary(TIMES, terms[t].getOp1(), terms[t].getOp1());
|
||||
tmp = add_unary(UMINUS, tmp);
|
||||
tmp = add_unary(EXP, tmp);
|
||||
int der = add_derivative(terms[t].getOp1(), v);
|
||||
tmp = add_binary(TIMES, tmp, der);
|
||||
tmp = add_binary(TIMES, two_over_pi, tmp);
|
||||
res = add_unary(UMINUS, tmp);
|
||||
break;
|
||||
}
|
||||
case PLUS:
|
||||
{
|
||||
int tmp1 = add_derivative(terms[t].getOp1(), v);
|
||||
int tmp2 = add_derivative(terms[t].getOp2(), v);
|
||||
res = add_binary(PLUS, tmp1, tmp2);
|
||||
break;
|
||||
}
|
||||
case MINUS:
|
||||
{
|
||||
int tmp1 = add_derivative(terms[t].getOp1(), v);
|
||||
int tmp2 = add_derivative(terms[t].getOp2(), v);
|
||||
res = add_binary(MINUS, tmp1, tmp2);
|
||||
break;
|
||||
}
|
||||
case TIMES:
|
||||
{
|
||||
int tmp1 = add_derivative(terms[t].getOp1(), v);
|
||||
int tmp2 = add_derivative(terms[t].getOp2(), v);
|
||||
int res1 = add_binary(TIMES, terms[t].getOp1(), tmp2);
|
||||
int res2 = add_binary(TIMES, tmp1, terms[t].getOp2());
|
||||
res = add_binary(PLUS, res1, res2);
|
||||
break;
|
||||
}
|
||||
case DIVIDE:
|
||||
{
|
||||
int tmp1 = add_derivative(terms[t].getOp1(), v);
|
||||
int tmp2 = add_derivative(terms[t].getOp2(), v);
|
||||
if (tmp2 == zero)
|
||||
res = add_binary(DIVIDE, tmp1, terms[t].getOp2());
|
||||
else {
|
||||
int nom = add_binary(MINUS,
|
||||
add_binary(TIMES, tmp1, terms[t].getOp2()),
|
||||
add_binary(TIMES, tmp2, terms[t].getOp1()));
|
||||
int den = add_binary(TIMES, terms[t].getOp2(), terms[t].getOp2());
|
||||
res = add_binary(DIVIDE, nom, den);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case POWER:
|
||||
{
|
||||
int tmp1 = add_derivative(terms[t].getOp1(), v);
|
||||
int tmp2 = add_derivative(terms[t].getOp2(), v);
|
||||
int s1 = add_binary(TIMES, tmp2,
|
||||
add_binary(TIMES, t,
|
||||
add_unary(LOG, terms[t].getOp1())));
|
||||
int s2 = add_binary(TIMES, tmp1,
|
||||
add_binary(TIMES, terms[t].getOp2(),
|
||||
add_binary(POWER, terms[t].getOp1(),
|
||||
add_binary(MINUS, terms[t].getOp2(), one))));
|
||||
res = add_binary(PLUS, s1, s2);
|
||||
break;
|
||||
}
|
||||
case NONE:
|
||||
break;
|
||||
}
|
||||
|
||||
if (res == -1)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Unknown operation code.");
|
||||
|
||||
register_derivative(t, v, res);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
int OperationTree::add_substitution(int t, const map<int,int>& subst)
|
||||
{
|
||||
return add_substitution(t, subst, *this);
|
||||
}
|
||||
|
||||
int OperationTree::add_substitution(int t, const map<int,int>& subst,
|
||||
const OperationTree& otree)
|
||||
{
|
||||
// return substitution of t if it is in the map
|
||||
map<int,int>::const_iterator it = subst.find(t);
|
||||
if (subst.end() != it)
|
||||
return (*it).second;
|
||||
|
||||
int nary = otree.terms[t].nary();
|
||||
if (nary == 2) {
|
||||
// return the binary operation of the substituted terms
|
||||
int t1 = add_substitution(otree.terms[t].getOp1(), subst, otree);
|
||||
int t2 = add_substitution(otree.terms[t].getOp2(), subst, otree);
|
||||
return add_binary(otree.terms[t].getCode(), t1, t2);
|
||||
} else if (nary == 1) {
|
||||
// return the unary operation of the substituted term
|
||||
int t1 = add_substitution(otree.terms[t].getOp1(), subst, otree);
|
||||
return add_unary(otree.terms[t].getCode(), t1);
|
||||
} else {
|
||||
// if t is not the first num_constants, and otree is not this
|
||||
// tree, then raise and exception. Otherwise return t, since
|
||||
// it is either a special term (having the same semantics in
|
||||
// both trees), or the trees are the same, hence t has the
|
||||
// same semantics
|
||||
if (t < num_constants || this == &otree)
|
||||
return t;
|
||||
else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Incomplete substitution map in OperationTree::add_substitution");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void OperationTree::nularify(int t)
|
||||
{
|
||||
// remove the original operation from opmap
|
||||
_Topmap::iterator it = opmap.find(terms[t]);
|
||||
if (it != opmap.end())
|
||||
opmap.erase(it);
|
||||
// turn the operation to nulary
|
||||
Operation nulary_op;
|
||||
terms[t] = nulary_op;
|
||||
// update last nulary
|
||||
if (last_nulary < t)
|
||||
last_nulary = t;
|
||||
// update nul_incidence information for all terms including t
|
||||
update_nul_incidence_after_nularify(t);
|
||||
}
|
||||
|
||||
void OperationTree::register_derivative(int t, int v, int tder)
|
||||
{
|
||||
// todo: might check that the insert inserts a new pair
|
||||
derivatives[t].insert(_Tderivmap::value_type(v, tder));
|
||||
}
|
||||
|
||||
hash_set<int> OperationTree::select_terms(int t, const opselector& sel) const
|
||||
{
|
||||
hash_set<int> subterms;
|
||||
select_terms(t, sel, subterms);
|
||||
return subterms;
|
||||
}
|
||||
|
||||
void OperationTree::select_terms(int t, const opselector& sel, hash_set<int>& subterms) const
|
||||
{
|
||||
const Operation& op = terms[t];
|
||||
|
||||
if (sel(t))
|
||||
subterms.insert(t);
|
||||
else
|
||||
if (op.nary() == 2) {
|
||||
select_terms(op.getOp1(), sel, subterms);
|
||||
select_terms(op.getOp2(), sel, subterms);
|
||||
} else if (op.nary() == 1) {
|
||||
select_terms(op.getOp1(), sel, subterms);
|
||||
}
|
||||
}
|
||||
|
||||
hash_set<int> OperationTree::select_terms_inv(int t, const opselector& sel) const
|
||||
{
|
||||
hash_set<int> subterms;
|
||||
select_terms_inv(t, sel, subterms);
|
||||
return subterms;
|
||||
}
|
||||
|
||||
bool OperationTree::select_terms_inv(int t, const opselector& sel, hash_set<int>& subterms) const
|
||||
{
|
||||
const Operation& op = terms[t];
|
||||
|
||||
if (op.nary() == 2) {
|
||||
bool a1 = select_terms_inv(op.getOp1(), sel, subterms);
|
||||
bool a2 = select_terms_inv(op.getOp2(), sel, subterms);
|
||||
if (a1 && a2 && sel(t)) {
|
||||
subterms.insert(t);
|
||||
return true;
|
||||
}
|
||||
} else if (op.nary() == 1) {
|
||||
bool a1 = select_terms_inv(op.getOp1(), sel, subterms);
|
||||
if (a1 && sel(t)) {
|
||||
subterms.insert(t);
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (sel(t)) {
|
||||
subterms.insert(t);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void OperationTree::forget_derivative_maps()
|
||||
{
|
||||
for (unsigned int i = 0; i < derivatives.size(); i++)
|
||||
derivatives[i].clear();
|
||||
}
|
||||
|
||||
|
||||
void OperationTree::print_operation_tree(int t, FILE* fd, OperationFormatter& f) const
|
||||
{
|
||||
f.format(terms[t], t, fd);
|
||||
}
|
||||
|
||||
void OperationTree::print_operation(int t) const
|
||||
{
|
||||
DefaultOperationFormatter dof(*this);
|
||||
print_operation_tree(t, stdout, dof);
|
||||
}
|
||||
|
||||
void OperationTree::update_nul_incidence_after_nularify(int t)
|
||||
{
|
||||
hash_set<int> updated;
|
||||
for (int tnode = num_constants; tnode < (int)terms.size(); tnode++) {
|
||||
const Operation& op = terms[tnode];
|
||||
if (op.nary() == 2) {
|
||||
int op1 = op.getOp1();
|
||||
int op2 = op.getOp2();
|
||||
if (op1 >= tnode || op2 >= tnode)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Tree disorder asserted");
|
||||
bool updated1 = (updated.end() != updated.find(op1));
|
||||
bool updated2 = (updated.end() != updated.find(op2));
|
||||
if (updated1 || updated2) {
|
||||
nul_incidence[tnode] = nul_incidence[op1];
|
||||
nul_incidence[tnode].insert(nul_incidence[op2].begin(), nul_incidence[op2].end());
|
||||
updated.insert(tnode);
|
||||
}
|
||||
} else if (op.nary() == 1) {
|
||||
int op1 = op.getOp1();
|
||||
if (op1 >= tnode)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Tree disorder asserted");
|
||||
bool updated1 = (updated.end() != updated.find(op1));
|
||||
if (updated1) {
|
||||
nul_incidence[tnode] = nul_incidence[op1];
|
||||
updated.insert(tnode);
|
||||
}
|
||||
} else if (op.nary() == 0) {
|
||||
if (tnode == t) {
|
||||
nul_incidence[tnode].clear();
|
||||
nul_incidence[tnode].insert(tnode);
|
||||
updated.insert(tnode);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
EvalTree::EvalTree(const OperationTree& ot, int last)
|
||||
: otree(ot),
|
||||
values(new double[(last==-1)? ot.terms.size() : last+1]),
|
||||
flags(new bool[(last==-1)? ot.terms.size() : last+1]),
|
||||
last_operation((last==-1)? ot.terms.size()-1 : last)
|
||||
{
|
||||
if (last_operation < OperationTree::num_constants-1 ||
|
||||
last_operation > (int)ot.terms.size()-1)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Wrong last in EvalTree constructor.");
|
||||
|
||||
values[0] = 0.0;
|
||||
flags[0] = true;
|
||||
values[1] = 1.0;
|
||||
flags[1] = true;
|
||||
values[2] = std::numeric_limits<double>::quiet_NaN();
|
||||
flags[2] = true;
|
||||
values[3] = 2.0/sqrt(M_PI);
|
||||
flags[3] = true;
|
||||
// this sets from num_constants on
|
||||
reset_all();
|
||||
}
|
||||
|
||||
void EvalTree::reset_all()
|
||||
{
|
||||
for (int i = OperationTree::num_constants; i <= last_operation; i++)
|
||||
flags[i] = false;
|
||||
}
|
||||
|
||||
void EvalTree::set_nulary(int t, double val)
|
||||
{
|
||||
if (t < 0 || t > last_operation)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"The tree index out of bounds in EvalTree::set_nulary");
|
||||
if (t < OperationTree::num_constants || otree.terms[t].nary() != 0)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"The term is not nulary assignable in EvalTree::set_nulary");
|
||||
|
||||
values[t] = val;
|
||||
flags[t] = true;
|
||||
}
|
||||
|
||||
double EvalTree::eval(int t)
|
||||
{
|
||||
if (t < 0 || t > last_operation)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"The tree index out of bounds in EvalTree::eval");
|
||||
if (otree.terms[t].nary() == 0 && flags[t] == false)
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Nulary term has not been assigned a value in EvalTree::eval");
|
||||
|
||||
if (! flags[t]) {
|
||||
const Operation& op = otree.terms[t];
|
||||
if (op.nary() == 1) {
|
||||
double r1 = eval(op.getOp1());
|
||||
double res;
|
||||
if (op.getCode() == UMINUS)
|
||||
res = -r1;
|
||||
else if (op.getCode() == LOG)
|
||||
res = log(r1);
|
||||
else if (op.getCode() == EXP)
|
||||
res = exp(r1);
|
||||
else if (op.getCode() == SIN)
|
||||
res = sin(r1);
|
||||
else if (op.getCode() == COS)
|
||||
res = cos(r1);
|
||||
else if (op.getCode() == TAN)
|
||||
res = tan(r1);
|
||||
else if (op.getCode() == SQRT)
|
||||
res = sqrt(r1);
|
||||
else if (op.getCode() == ERF)
|
||||
res = 1-erffc(r1);
|
||||
else if (op.getCode() == ERFC)
|
||||
res = erffc(r1);
|
||||
else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Unknown unary operation code in EvalTree::eval");
|
||||
res = 0.0;
|
||||
}
|
||||
values[t] = res;
|
||||
flags[t] = true;
|
||||
} else if (op.nary() == 2) {
|
||||
double res;
|
||||
if (op.getCode() == PLUS) {
|
||||
double r1 = eval(op.getOp1());
|
||||
double r2 = eval(op.getOp2());
|
||||
res = r1 + r2;
|
||||
} else if (op.getCode() == MINUS) {
|
||||
double r1 = eval(op.getOp1());
|
||||
double r2 = eval(op.getOp2());
|
||||
res = r1 - r2;
|
||||
} else if (op.getCode() == TIMES) {
|
||||
// pickup less complex formula first
|
||||
unsigned int nul1 = otree.nulary_of_term(op.getOp1()).size();
|
||||
unsigned int nul2 = otree.nulary_of_term(op.getOp2()).size();
|
||||
if (nul1 < nul2) {
|
||||
double r1 = eval(op.getOp1());
|
||||
if (r1 == 0.0)
|
||||
res = 0.0;
|
||||
else {
|
||||
double r2 = eval(op.getOp2());
|
||||
res = r1 * r2;
|
||||
}
|
||||
} else {
|
||||
double r2 = eval(op.getOp2());
|
||||
if (r2 == 0)
|
||||
res = 0.0;
|
||||
else {
|
||||
double r1 = eval(op.getOp1());
|
||||
res = r1*r2;
|
||||
}
|
||||
}
|
||||
} else if (op.getCode() == DIVIDE) {
|
||||
double r1 = eval(op.getOp1());
|
||||
if (r1 == 0)
|
||||
res = 0.0;
|
||||
else {
|
||||
double r2 = eval(op.getOp2());
|
||||
res = r1 / r2;
|
||||
}
|
||||
} else if (op.getCode() == POWER) {
|
||||
// suppose that more complex is the first op in average
|
||||
double r2 = eval(op.getOp2());
|
||||
if (r2 == 0.0)
|
||||
res = 1.0;
|
||||
else {
|
||||
double r1 = eval(op.getOp1());
|
||||
res = pow(r1, r2);
|
||||
}
|
||||
} else {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Unknown binary operation code in EvalTree::eval");
|
||||
res = 0.0;
|
||||
}
|
||||
values[t] = res;
|
||||
flags[t] = true;
|
||||
}
|
||||
return values[t];
|
||||
}
|
||||
|
||||
// if (! std::isfinite(values[t]))
|
||||
// printf("Tree value t=%d is not finite = %f\n", t, values[t]);
|
||||
|
||||
return values[t];
|
||||
}
|
||||
|
||||
void EvalTree::print() const
|
||||
{
|
||||
printf("last_op=%d\n", last_operation);
|
||||
printf(" 0 1 2 3 4 5 6 7 8 9\n");
|
||||
printf("----------------------------------------------------------------\n");
|
||||
for (int i = 0; i <= (last_operation+1)/10; i++) {
|
||||
printf("%-3d|", i);
|
||||
int j = 0;
|
||||
while (j < 10 && 10*i+j < last_operation+1) {
|
||||
int k = 10*i+j;
|
||||
if (flags[k])
|
||||
printf(" %5.1g", values[k]);
|
||||
else
|
||||
printf(" -----");
|
||||
j++;
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
|
||||
void DefaultOperationFormatter::format(const Operation& op, int t, FILE* fd)
|
||||
{
|
||||
// add to the stop_set
|
||||
if (stop_set.end() == stop_set.find(t))
|
||||
stop_set.insert(t);
|
||||
else
|
||||
return;
|
||||
|
||||
// call recursively non-nulary terms of the operation
|
||||
if (op.nary() == 2) {
|
||||
int t1 = op.getOp1();
|
||||
const Operation& op1 = otree.terms[t1];
|
||||
int t2 = op.getOp2();
|
||||
const Operation& op2 = otree.terms[t2];
|
||||
if (op1.nary() > 0)
|
||||
format(op1, t1, fd);
|
||||
if (op2.nary() > 0)
|
||||
format(op2, t2, fd);
|
||||
}
|
||||
if (op.nary() == 1) {
|
||||
int t1 = op.getOp1();
|
||||
const Operation& op1 = otree.terms[t1];
|
||||
if (op1.nary() > 0)
|
||||
format(op1, t1, fd);
|
||||
}
|
||||
|
||||
// print 'term ='
|
||||
format_term(t, fd);
|
||||
fprintf(fd, " = ");
|
||||
if (op.nary() == 0) {
|
||||
format_nulary(t, fd);
|
||||
} else if (op.nary() == 1) {
|
||||
int t1 = op.getOp1();
|
||||
const Operation& op1 = otree.terms[t1];
|
||||
const char* opname = "unknown";
|
||||
switch (op.getCode()) {
|
||||
case UMINUS:
|
||||
opname = "-";
|
||||
break;
|
||||
case LOG:
|
||||
opname = "log";
|
||||
break;
|
||||
case EXP:
|
||||
opname = "exp";
|
||||
break;
|
||||
case SIN:
|
||||
opname = "sin";
|
||||
break;
|
||||
case COS:
|
||||
opname = "cos";
|
||||
break;
|
||||
case TAN:
|
||||
opname = "tan";
|
||||
break;
|
||||
case SQRT:
|
||||
opname = "sqrt";
|
||||
break;
|
||||
case ERF:
|
||||
opname = "erf";
|
||||
break;
|
||||
case ERFC:
|
||||
opname = "erfc";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
fprintf(fd, "%s(", opname);
|
||||
if (op1.nary() == 0)
|
||||
format_nulary(t1, fd);
|
||||
else
|
||||
format_term(t1, fd);
|
||||
fprintf(fd, ")");
|
||||
} else {
|
||||
int t1 = op.getOp1();
|
||||
const Operation& op1 = otree.terms[t1];
|
||||
int t2 = op.getOp2();
|
||||
const Operation& op2 = otree.terms[t2];
|
||||
const char* opname = "unknown";
|
||||
switch (op.getCode()) {
|
||||
case PLUS:
|
||||
opname = "+";
|
||||
break;
|
||||
case MINUS:
|
||||
opname = "-";
|
||||
break;
|
||||
case TIMES:
|
||||
opname = "*";
|
||||
break;
|
||||
case DIVIDE:
|
||||
opname = "/";
|
||||
break;
|
||||
case POWER:
|
||||
opname = "^";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (op1.nary() == 0)
|
||||
format_nulary(t1, fd);
|
||||
else
|
||||
format_term(t1, fd);
|
||||
fprintf(fd, " %s ", opname);
|
||||
if (op2.nary() == 0)
|
||||
format_nulary(t2, fd);
|
||||
else
|
||||
format_term(t2, fd);
|
||||
}
|
||||
|
||||
print_delim(fd);
|
||||
|
||||
}
|
||||
|
||||
void DefaultOperationFormatter::format_term(int t, FILE* fd) const
|
||||
{
|
||||
fprintf(fd, "$%d", t);
|
||||
}
|
||||
|
||||
void DefaultOperationFormatter::format_nulary(int t, FILE* fd) const
|
||||
{
|
||||
if (t == OperationTree::zero)
|
||||
fprintf(fd, "0");
|
||||
else if (t == OperationTree::one)
|
||||
fprintf(fd, "1");
|
||||
else if (t == OperationTree::nan)
|
||||
fprintf(fd, "NaN");
|
||||
else
|
||||
fprintf(fd, "$%d", t);
|
||||
}
|
||||
|
||||
void DefaultOperationFormatter::print_delim(FILE* fd) const
|
||||
{
|
||||
fprintf(fd, ";\n");
|
||||
}
|
||||
|
||||
std::string OperationStringConvertor::convert(const Operation& op, int t) const
|
||||
{
|
||||
if (op.nary() == 0) {
|
||||
if (t < OperationTree::num_constants)
|
||||
if (t == OperationTree::zero)
|
||||
return std::string("0");
|
||||
else if (t == OperationTree::one)
|
||||
return std::string("1");
|
||||
else if (t == OperationTree::nan)
|
||||
return std::string("NaN");
|
||||
else if (t == OperationTree::two_over_pi) {
|
||||
char buf[100];
|
||||
sprintf(buf, "%20.16g", 2.0/std::sqrt(M_PI));
|
||||
return std::string(buf);
|
||||
} else {
|
||||
return std::string("error!error");
|
||||
}
|
||||
else
|
||||
return nulsc.convert(t);
|
||||
} else if (op.nary() == 1) {
|
||||
int t1 = op.getOp1();
|
||||
const Operation& op1 = otree.operation(t1);
|
||||
const char* opname = "unknown";
|
||||
switch (op.getCode()) {
|
||||
case UMINUS:
|
||||
opname = "-";
|
||||
break;
|
||||
case LOG:
|
||||
opname = "log";
|
||||
break;
|
||||
case EXP:
|
||||
opname = "exp";
|
||||
break;
|
||||
case SIN:
|
||||
opname = "sin";
|
||||
break;
|
||||
case COS:
|
||||
opname = "cos";
|
||||
break;
|
||||
case TAN:
|
||||
opname = "tan";
|
||||
break;
|
||||
case SQRT:
|
||||
opname = "sqrt";
|
||||
break;
|
||||
case ERF:
|
||||
opname = "erf";
|
||||
break;
|
||||
case ERFC:
|
||||
opname = "erfc";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
std::string s1 = convert(op1, t1);
|
||||
return std::string(opname) + "(" + s1 + ")";
|
||||
} else {
|
||||
int t1 = op.getOp1();
|
||||
const Operation& op1 = otree.operation(t1);
|
||||
int t2 = op.getOp2();
|
||||
const Operation& op2 = otree.operation(t2);
|
||||
const char* opname = "unknown";
|
||||
switch (op.getCode()) {
|
||||
case PLUS:
|
||||
opname = "+";
|
||||
break;
|
||||
case MINUS:
|
||||
opname = "-";
|
||||
break;
|
||||
case TIMES:
|
||||
opname = "*";
|
||||
break;
|
||||
case DIVIDE:
|
||||
opname = "/";
|
||||
break;
|
||||
case POWER:
|
||||
opname = "^";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
// decide about parenthesis
|
||||
bool op1_par = true;
|
||||
bool op2_par = true;
|
||||
if (op.getCode() == PLUS) {
|
||||
op1_par = false;
|
||||
op2_par = false;
|
||||
} else if (op.getCode() == MINUS) {
|
||||
op1_par = false;
|
||||
if (op2.getCode() != MINUS && op2.getCode() != PLUS)
|
||||
op2_par = false;
|
||||
} else {
|
||||
if (op1.nary() < 2)
|
||||
op1_par = false;
|
||||
if (op2.nary() < 2)
|
||||
op2_par = false;
|
||||
}
|
||||
|
||||
std::string res;
|
||||
if (op1_par)
|
||||
res += "(";
|
||||
res += convert(op1, t1);
|
||||
if (op1_par)
|
||||
res += ")";
|
||||
res += " ";
|
||||
res += opname;
|
||||
res += " ";
|
||||
if (op2_par)
|
||||
res += "(";
|
||||
res += convert(op2, t2);
|
||||
if (op2_par)
|
||||
res += ")";
|
||||
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,457 @@
|
|||
// Copyright (C) 2005, Ondra Kamenik
|
||||
|
||||
// $Id: tree.h 1762 2008-03-31 14:28:54Z kamenik $
|
||||
|
||||
#ifndef OGP_TREE_H
|
||||
#define OGP_TREE_H
|
||||
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include <map>
|
||||
#include <ext/hash_map>
|
||||
#include <ext/hash_set>
|
||||
|
||||
namespace ogp {
|
||||
|
||||
using __gnu_cxx::hash_set;
|
||||
using __gnu_cxx::hash_map;
|
||||
using __gnu_cxx::hash;
|
||||
using std::vector;
|
||||
using std::set;
|
||||
using std::map;
|
||||
|
||||
/** Enumerator representing nulary, unary and binary operation
|
||||
* codes. For nulary, 'none' is used. When one is adding a new
|
||||
* codes, he should update the code of #OperationTree::add_unary,
|
||||
* #OperationTree::add_binary, and of course
|
||||
* #OperationTree::add_derivative. */
|
||||
enum code_t {NONE, UMINUS, LOG, EXP, SIN, COS, TAN, SQRT, ERF,
|
||||
ERFC, PLUS, MINUS, TIMES, DIVIDE, POWER};
|
||||
|
||||
/** Class representing a nulary, unary, or binary operation. */
|
||||
class Operation {
|
||||
protected:
|
||||
/** Code of the operation. */
|
||||
code_t code;
|
||||
/** First operand. If none, then it is -1. */
|
||||
int op1;
|
||||
/** Second operand. If none, then it is -1. */
|
||||
int op2;
|
||||
|
||||
public:
|
||||
/** Constructs a binary operation. */
|
||||
Operation(code_t cd, int oper1, int oper2)
|
||||
: code(cd), op1(oper1), op2(oper2) {}
|
||||
/** Constructs a unary operation. */
|
||||
Operation(code_t cd, int oper1)
|
||||
: code(cd), op1(oper1), op2(-1) {}
|
||||
/** Constructs a nulary operation. */
|
||||
Operation()
|
||||
: code(NONE), op1(-1), op2(-1) {}
|
||||
/** A copy constructor. */
|
||||
Operation(const Operation& op)
|
||||
: code(op.code), op1(op.op1), op2(op.op2) {}
|
||||
|
||||
/** Operator =. */
|
||||
const Operation& operator=(const Operation& op)
|
||||
{
|
||||
code = op.code;
|
||||
op1 = op.op1;
|
||||
op2 = op.op2;
|
||||
return *this;
|
||||
}
|
||||
/** Operator ==. */
|
||||
bool operator==(const Operation& op) const
|
||||
{
|
||||
return code == op.code && op1 == op.op1 && op2 == op.op2;
|
||||
}
|
||||
/** Operator < implementing lexicographic ordering. */
|
||||
bool operator<(const Operation& op) const
|
||||
{
|
||||
return (code < op.code ||
|
||||
code == op.code &&
|
||||
(op1 < op.op1 || op1 == op.op1 && op2 < op.op2));
|
||||
}
|
||||
/** Returns a number of operands. */
|
||||
int nary() const
|
||||
{
|
||||
return (op2 == -1)? ((op1 == -1) ? 0 : 1) : 2;
|
||||
}
|
||||
/** Returns a hash value of the operation. */
|
||||
size_t hashval() const
|
||||
{
|
||||
return op2+1 + (op1+1)^15 + code^30;
|
||||
}
|
||||
|
||||
code_t getCode() const
|
||||
{ return code; }
|
||||
int getOp1() const
|
||||
{ return op1; }
|
||||
int getOp2() const
|
||||
{ return op2; }
|
||||
|
||||
};
|
||||
|
||||
/** This struct is a predicate for ordering of the operations in
|
||||
* OperationTree class. now obsolete */
|
||||
struct ltoper {
|
||||
bool operator()(const Operation& oper1, const Operation& oper2) const
|
||||
{return oper1 < oper2;}
|
||||
};
|
||||
|
||||
/** Hash function object for Operation. */
|
||||
struct ophash {
|
||||
size_t operator()(const Operation& op) const
|
||||
{ return op.hashval(); }
|
||||
};
|
||||
|
||||
/** This struct is a function object selecting some
|
||||
* operations. The operation is given by a tree index. */
|
||||
struct opselector {
|
||||
virtual bool operator()(int t) const = 0;
|
||||
virtual ~opselector() {}
|
||||
};
|
||||
|
||||
/** Forward declaration of OperationFormatter. */
|
||||
class OperationFormatter;
|
||||
class DefaultOperationFormatter;
|
||||
|
||||
/** Forward declaration of EvalTree to make it friend of OperationTree. */
|
||||
class EvalTree;
|
||||
|
||||
/** Class representing a set of trees for terms. Each term is
|
||||
* given a unique non-negative integer. The terms are basically
|
||||
* operations whose (integer) operands point to another terms in
|
||||
* the tree. The terms are stored in the vector. Equivalent unary
|
||||
* and binary terms are stored only once. This class guarantees
|
||||
* the uniqueness. The uniqueness of nulary terms is guaranteed by
|
||||
* the caller, since at this level of Operation abstraction, one
|
||||
* cannot discriminate between different nulary operations
|
||||
* (constants, variables). The uniqueness is enforced by the
|
||||
* hash_map whose keys are operations and values are integers
|
||||
* (indices of the terms).
|
||||
|
||||
* This class can also make derivatives of a given term with
|
||||
* respect to a given nulary term. I order to be able to quickly
|
||||
* recognize zero derivativates, we maintain a list of nulary
|
||||
* terms contained in the term. A possible zero derivative is then quickly
|
||||
* recognized by looking at the list. The list is implemented as a
|
||||
* hash_set of integers.
|
||||
*
|
||||
* In addition, many term can be differentiated multiple times wrt
|
||||
* one variable since they can be referenced multiple times. To
|
||||
* avoid this, for each term we maintain a map mapping variables
|
||||
* to the derivatives of the term. As the caller will
|
||||
* differentiate wrt more and more variables, these maps will
|
||||
* become richer and richer.
|
||||
*/
|
||||
class OperationTree {
|
||||
friend class EvalTree;
|
||||
friend class DefaultOperationFormatter;
|
||||
protected:
|
||||
/** This is the vector of the terms. An index to this vector
|
||||
* uniquelly determines the term. */
|
||||
vector<Operation> terms;
|
||||
|
||||
/** This defines a type for a map mapping the unary and binary
|
||||
* operations to their indices. */
|
||||
typedef hash_map<Operation, int, ophash> _Topmap;
|
||||
typedef _Topmap::value_type _Topval;
|
||||
|
||||
/** This is the map mapping the unary and binary operations to
|
||||
* the indices of the terms.*/
|
||||
_Topmap opmap;
|
||||
|
||||
/** This is a type for a set of integers. */
|
||||
typedef hash_set<int> _Tintset;
|
||||
/** This is a vector of integer sets corresponding to the
|
||||
* nulary terms contained in the term. */
|
||||
vector<_Tintset> nul_incidence;
|
||||
|
||||
/** This is a type of the map from variables (nulary terms) to
|
||||
* the terms. */
|
||||
typedef hash_map<int, int> _Tderivmap;
|
||||
/** This is a vector of derivative mappings. For each term, it
|
||||
* maps variables to the derivatives of the term with respect
|
||||
* to the variables. */
|
||||
vector<_Tderivmap> derivatives;
|
||||
|
||||
/** The tree index of the last nulary term. */
|
||||
int last_nulary;
|
||||
public:
|
||||
/** This is a number of constants set in the following
|
||||
* enum. This number reserves space in a vector of terms for
|
||||
* the constants. */
|
||||
static const int num_constants = 4;
|
||||
/** Enumeration for special terms. We need zero, one, nan and
|
||||
* 2/pi. These will be always first four terms having indices
|
||||
* zero, one and two, three. If adding anything to this
|
||||
* enumeration, make sure you have updated num_constants above.*/
|
||||
enum {zero=0, one=1, nan=2, two_over_pi=3};
|
||||
|
||||
/** The unique constructor which initializes the object to
|
||||
* contain only zero, one and nan and two_over_pi.*/
|
||||
OperationTree();
|
||||
|
||||
/** Copy constructor. */
|
||||
OperationTree(const OperationTree& ot)
|
||||
: terms(ot.terms), opmap(ot.opmap), nul_incidence(ot.nul_incidence),
|
||||
derivatives(ot.derivatives),
|
||||
last_nulary(ot.last_nulary)
|
||||
{}
|
||||
|
||||
/** Add a nulary operation. The caller is responsible for not
|
||||
* inserting two semantically equivalent nulary operations.
|
||||
* @return newly allocated index
|
||||
*/
|
||||
int add_nulary();
|
||||
|
||||
/** Add a unary operation. The uniqness is checked, if it
|
||||
* already exists, then it is not added.
|
||||
* @param code the code of the unary operation
|
||||
* @param op the index of the operand
|
||||
* @return the index of the operation
|
||||
*/
|
||||
int add_unary(code_t code, int op);
|
||||
|
||||
/** Add a binary operation. The uniqueness is checked, if it
|
||||
* already exists, then it is not added.
|
||||
* @param code the code of the binary operation
|
||||
* @param op1 the index of the first operand
|
||||
* @param op2 the index of the second operand
|
||||
* @return the index of the operation
|
||||
*/
|
||||
int add_binary(code_t code, int op1, int op2);
|
||||
|
||||
/** Add the derivative of the given term with respect to the
|
||||
* given nulary operation.
|
||||
* @param t the index of the operation being differentiated
|
||||
* @param v the index of the nulary operation
|
||||
* @return the index of the derivative
|
||||
*/
|
||||
int add_derivative(int t, int v);
|
||||
|
||||
/** Add the substitution given by the map. This adds a new
|
||||
* term which is equal to the given term with applied
|
||||
* substitutions given by the map replacing each term on the
|
||||
* left by a term on the right. We do not check that the terms
|
||||
* on the left are not subterms of the terms on the right. If
|
||||
* so, the substituted terms are not subject of further
|
||||
* substitution. */
|
||||
int add_substitution(int t, const map<int,int>& subst);
|
||||
|
||||
/** Add the substitution given by the map where left sides of
|
||||
* substitutions come from another tree. The right sides are
|
||||
* from this tree. The given t is from the given otree. */
|
||||
int add_substitution(int t, const map<int,int>& subst,
|
||||
const OperationTree& otree);
|
||||
|
||||
/** This method turns the given term to a nulary
|
||||
* operation. This is an only method, which changes already
|
||||
* existing term (all other methods add something new). User
|
||||
* should use this with caution and must make sure that
|
||||
* something similar has happened for atoms. In addition, it
|
||||
* does not do anything with derivatives, so it should not be
|
||||
* used after some derivatives were created, and derivatives
|
||||
* already created and saved in derivatives mappings should be
|
||||
* forgotten with forget_derivative_maps. */
|
||||
void nularify(int t);
|
||||
|
||||
/** Return the set of nulary terms of the given term. */
|
||||
const hash_set<int>& nulary_of_term(int t) const
|
||||
{return nul_incidence[t];}
|
||||
|
||||
/** Select subterms of the given term according a given
|
||||
* operation selector and return the set of terms that
|
||||
* correspond to the compounded operations. The given term is
|
||||
* a compound function of the returned subterms and the
|
||||
* function consists only from operations which yield false in
|
||||
* the selector. */
|
||||
hash_set<int> select_terms(int t, const opselector& sel) const;
|
||||
|
||||
/** Select subterms of the given term according a given
|
||||
* operation selector and return the set of terms that
|
||||
* correspond to the compounded operations. The given term is
|
||||
* a compound function of the returned subterms and the
|
||||
* subterms are maximal subterms consisting from operations
|
||||
* yielding true in the selector. */
|
||||
hash_set<int> select_terms_inv(int t, const opselector& sel) const;
|
||||
|
||||
/** This forgets all the derivative mappings. It is used after
|
||||
* a term has been nularified, and then the derivative
|
||||
* mappings carry wrong information. Note that the derivatives
|
||||
* mappings serve only as a tool for quick returns in
|
||||
* add_derivative. Resseting the mappings is harmless, all the
|
||||
* information is rebuilt in add_derivative without any
|
||||
* additional nodes (trees). */
|
||||
void forget_derivative_maps();
|
||||
|
||||
/** This returns an operation of a given term. */
|
||||
const Operation& operation(int t) const
|
||||
{return terms[t];}
|
||||
|
||||
/** This outputs the operation to the given file descriptor
|
||||
* using the given OperationFormatter. */
|
||||
void print_operation_tree(int t, FILE* fd, OperationFormatter& f) const;
|
||||
|
||||
/** Debug print of a given operation: */
|
||||
void print_operation(int t) const;
|
||||
|
||||
/** Return the last tree index of a nulary term. */
|
||||
int get_last_nulary() const
|
||||
{return last_nulary;}
|
||||
|
||||
/** Get the number of all operations. */
|
||||
int get_num_op() const
|
||||
{return (int)(terms.size());}
|
||||
private:
|
||||
/** This registers a calculated derivative of the term in the
|
||||
* #derivatives vector.
|
||||
* @param t the index of the term for which we register the derivative
|
||||
* @param v the index of the nulary term (variable) to which
|
||||
* respect the derivative was taken
|
||||
* @param tder the index of the resulting derivative
|
||||
*/
|
||||
void register_derivative(int t, int v, int tder);
|
||||
/** This does the same job as select_terms with the only
|
||||
* difference, that it adds the terms to the given set and
|
||||
* hence can be used recursivelly. */
|
||||
void select_terms(int t, const opselector& sel, hash_set<int>& subterms) const;
|
||||
/** This does the same job as select_terms_inv with the only
|
||||
* difference, that it adds the terms to the given set and
|
||||
* hence can be used recursivelly and returns true if the term
|
||||
* was selected. */
|
||||
bool select_terms_inv(int t, const opselector& sel, hash_set<int>& subterms) const;
|
||||
/** This updates nul_incidence information after the term t
|
||||
* was turned to a nulary term in all terms. It goes through
|
||||
* the tree from simplest terms to teh more complex ones and
|
||||
* changes the nul_incidence information where necesary. It
|
||||
* maintains a set where the changes have been made.*/
|
||||
void update_nul_incidence_after_nularify(int t);
|
||||
};
|
||||
|
||||
/** EvalTree class allows for an evaluation of the given tree for
|
||||
* a given values of nulary terms. For each term in the
|
||||
* OperationTree the class maintains a resulting value and a flag
|
||||
* if the value has been calculated or set. The life cycle of the
|
||||
* class is the following: After it is initialized, the user must
|
||||
* set values for necessary nulary terms. Then the object can be
|
||||
* requested to evaluate particular terms. During this process,
|
||||
* the number of evaluated terms is increasing. Then the user can
|
||||
* request overall reset of evaluation flags, set the nulary terms
|
||||
* to new values and evaluate a number of terms.
|
||||
*
|
||||
* Note that currently the user cannot request a reset of
|
||||
* evaluation flags only for those terms depending on a given
|
||||
* nulary term. This might be added in future and handeled by a
|
||||
* subclasses of OperationTree and EvalTree, since we need a
|
||||
* support for this in OperationTree.
|
||||
*/
|
||||
class EvalTree {
|
||||
protected:
|
||||
/** Reference to the OperationTree over which all evaluations
|
||||
* are done. */
|
||||
const OperationTree& otree;
|
||||
/** The array of values. */
|
||||
double* const values;
|
||||
/** The array of evaluation flags. */
|
||||
bool* const flags;
|
||||
/** The index of last operation in the EvalTree. Length of
|
||||
* values and flags will be then last_operation+1. */
|
||||
int last_operation;
|
||||
public:
|
||||
/** Initializes the evaluation tree for the given operation
|
||||
* tree. If last is greater than -1, that the evaluation tree
|
||||
* will contain only formulas up to the given last index
|
||||
* (included). */
|
||||
EvalTree(const OperationTree& otree, int last = -1);
|
||||
virtual ~EvalTree()
|
||||
{ delete [] values; delete [] flags; }
|
||||
/** Set evaluation flag to all terms (besides the first
|
||||
* special terms) to false. */
|
||||
void reset_all();
|
||||
/** Set value for a given nulary term. */
|
||||
void set_nulary(int t, double val);
|
||||
/** Evaluate the given term with nulary terms set so far. */
|
||||
double eval(int t);
|
||||
/** Debug print. */
|
||||
void print() const;
|
||||
/* Return the operation tree. */
|
||||
const OperationTree& getOperationTree() const
|
||||
{return otree;}
|
||||
private:
|
||||
EvalTree(const EvalTree&);
|
||||
};
|
||||
|
||||
/** This is an interface describing how a given operation is
|
||||
* formatted for output. */
|
||||
class OperationFormatter {
|
||||
public:
|
||||
/** Empty virtual destructor. */
|
||||
virtual ~OperationFormatter() {}
|
||||
/** Print the formatted operation op with a given tree index t
|
||||
* to a given descriptor. (See class OperationTree to know
|
||||
* what is a tree index.) This prints all the tree. This
|
||||
* always writes equation, left hand side is a string
|
||||
* represenation (a variable, temporary, whatever) of the
|
||||
* term, the right hand side is a string representation of the
|
||||
* operation (which will refer to other string representation
|
||||
* of subterms). */
|
||||
virtual void format(const Operation& op, int t, FILE* fd)=0;
|
||||
};
|
||||
|
||||
/** The default formatter formats the formulas with a usual syntax
|
||||
* (for example Matlab). A formatting of atoms and terms might be
|
||||
* reimplemented by a subclass. In addition, during its life, the
|
||||
* object maintains a set of tree indices which have been output
|
||||
* and they are not output any more. */
|
||||
class DefaultOperationFormatter : public OperationFormatter {
|
||||
protected:
|
||||
const OperationTree& otree;
|
||||
set<int> stop_set;
|
||||
public:
|
||||
DefaultOperationFormatter(const OperationTree& ot)
|
||||
: otree(ot) {}
|
||||
/** Format the operation with the default syntax. */
|
||||
void format(const Operation& op, int t, FILE* fd);
|
||||
/** This prints a string represenation of the given term, for
|
||||
* example 'tmp10' for term 10. In this implementation it
|
||||
* prints $10. */
|
||||
virtual void format_term(int t, FILE* fd) const;
|
||||
/** Print a string representation of the nulary term. */
|
||||
virtual void format_nulary(int t, FILE* fd) const;
|
||||
/** Print a delimiter between two statements. By default it is
|
||||
* "\n". */
|
||||
virtual void print_delim(FILE* fd) const;
|
||||
};
|
||||
|
||||
class NularyStringConvertor {
|
||||
public:
|
||||
virtual ~NularyStringConvertor() {}
|
||||
/** Return the string representation of the atom with the tree
|
||||
* index t. */
|
||||
virtual std::string convert(int t) const = 0;
|
||||
};
|
||||
|
||||
/** This class converts the given term to its mathematical string representation. */
|
||||
class OperationStringConvertor {
|
||||
protected:
|
||||
const NularyStringConvertor& nulsc;
|
||||
const OperationTree& otree;
|
||||
public:
|
||||
OperationStringConvertor(const NularyStringConvertor& nsc, const OperationTree& ot)
|
||||
: nulsc(nsc), otree(ot) {}
|
||||
/** Empty virtual destructor. */
|
||||
virtual ~OperationStringConvertor() {}
|
||||
/** Convert the operation to the string mathematical
|
||||
* representation. This does not write any equation, just
|
||||
* returns a string representation of the formula. */
|
||||
std::string convert(const Operation& op, int t) const;
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,220 @@
|
|||
ifeq ($(USE_ATLAS),yes)
|
||||
LD_LIBS := -llapack -lcblas -lf77blas -latlas -lg2c -lstdc++
|
||||
else ifeq ($(USE_MKL),yes)
|
||||
LD_LIBS := -L /opt//intel/Compiler/11.0/074/mkl/lib/em64t -lmkl_intel_thread -lmkl_lapack -lmkl -lmkl_em64t -L /opt//intel/Compiler/11.0/074/lib/intel64 -lguide -lstdc++
|
||||
else
|
||||
LD_LIBS := -llapack -lblas -lg2c -lstdc++
|
||||
endif
|
||||
|
||||
CC_FLAGS = -Wall -I../sylv/cc -I../tl/cc -I../kord -I../integ/cc -I..
|
||||
|
||||
DYNVERSION = 1.3.7
|
||||
tmpdir = /tmp/aaabbb123321
|
||||
pthreadGC2 = `which pthreadGC2.dll`
|
||||
mingwm10 = `which mingwm10.dll`
|
||||
|
||||
ifeq ($(CC),)
|
||||
CC := gcc
|
||||
endif
|
||||
|
||||
ifneq ($(LD_LIBRARY_PATH),) # use LD_LIBRARY_PATH from environment
|
||||
LDFLAGS := -Wl,--library-path $(LD_LIBRARY_PATH) $(LDFLAGS)
|
||||
endif
|
||||
|
||||
ifeq ($(DEBUG),yes)
|
||||
CC_FLAGS := $(CC_FLAGS) -g -DTL_DEBUG=2
|
||||
else
|
||||
CC_FLAGS := $(CC_FLAGS) -O3 -DPOSIX_THREADS
|
||||
endif
|
||||
|
||||
ifeq ($(OS),Windows_NT)
|
||||
CC_FLAGS := -mno-cygwin -mthreads $(CC_FLAGS)
|
||||
LDFLAGS := -mno-cygwin -mthreads $(LDFLAGS) $(LD_LIBS) -lpthreadGC2
|
||||
ARCH := w32
|
||||
MEX_SUFFIX = dll
|
||||
else
|
||||
LDFLAGS := $(LDFLAGS) $(LD_LIBS) -lpthread
|
||||
ARCH := linux
|
||||
MEX_SUFFIX = mexglx
|
||||
endif
|
||||
|
||||
sylvcppsource := $(wildcard ../sylv/cc/*.cpp)
|
||||
sylvhsource := $(wildcard ../sylv/cc/*.h)
|
||||
sylvobjects := $(patsubst %.cpp, %.o, $(sylvcppsource))
|
||||
|
||||
tlcwebsource := $(wildcard ../tl/cc/*.cweb)
|
||||
tlcppsource := $(patsubst %.cweb,%.cpp,$(tlcwebsource))
|
||||
tlhwebsource := $(wildcard ../tl/cc/*.hweb)
|
||||
tlhsource := $(patsubst %.hweb,%.h,$(tlhwebsource))
|
||||
tlobjects := $(patsubst %.cweb,%.o,$(tlcwebsource))
|
||||
|
||||
kordcwebsource := $(wildcard ../kord/*.cweb)
|
||||
kordcppsource := $(patsubst %.cweb,%.cpp,$(kordcwebsource))
|
||||
kordhwebsource := $(wildcard ../kord/*.hweb)
|
||||
kordhsource := $(patsubst %.hweb,%.h,$(kordhwebsource))
|
||||
kordobjects := $(patsubst %.cweb,%.o,$(kordcwebsource))
|
||||
|
||||
integcwebsource := $(wildcard ../integ/cc/*.cweb)
|
||||
integcppsource := $(patsubst %.cweb,%.cpp,$(integcwebsource))
|
||||
integhwebsource := $(wildcard ../integ/cc/*.hweb)
|
||||
integhsource := $(patsubst %.hweb,%.h,$(integhwebsource))
|
||||
integobjects := $(patsubst %.cweb,%.o,$(integcwebsource))
|
||||
|
||||
parserhsource := $(wildcard ../parser/cc/*.h)
|
||||
parsercppsource := $(wildcard ../parser/cc/*.cpp)
|
||||
|
||||
utilshsource := $(wildcard ../utils/cc/*.h)
|
||||
utilscppsource := $(wildcard ../utils/cc/*.cpp)
|
||||
|
||||
cppsource := $(wildcard *.cpp) $(patsubst %.y,%_ll.cc,$(wildcard *.y)) $(patsubst %.lex,%_tab.cc,$(wildcard *.lex))
|
||||
hsource := $(wildcard *.h) $(patsubst %.lex,%_tab.hh,$(wildcard *.lex))
|
||||
objects := $(patsubst %.cpp,%.o,$(wildcard *.cpp)) $(patsubst %.y,%_ll.o,$(wildcard *.y)) $(patsubst %.lex,%_tab.o,$(wildcard *.lex))
|
||||
|
||||
all: dynare++
|
||||
|
||||
../tl/cc/dummy.ch:
|
||||
make -C ../tl/cc dummy.ch
|
||||
|
||||
../tl/cc/%.cpp: ../tl/cc/%.cweb ../tl/cc/dummy.ch
|
||||
make -C ../tl/cc $*.cpp
|
||||
|
||||
../tl/cc/%.h: ../tl/cc/%.hweb ../tl/cc/dummy.ch
|
||||
make -C ../tl/cc $*.h
|
||||
|
||||
../tl/cc/%.o: ../tl/cc/%.cpp $(tlhsource)
|
||||
make -C ../tl/cc $*.o
|
||||
|
||||
../integ/cc/dummy.ch:
|
||||
make -C ../integ/cc dummy.ch
|
||||
|
||||
../integ/cc/%.cpp: ../integ/cc/%.cweb ../integ/cc/dummy.ch
|
||||
make -C ../integ/cc $*.cpp
|
||||
|
||||
../integ/cc/%.h: ../integ/cc/%.hweb ../integ/cc/dummy.ch
|
||||
make -C ../integ/cc $*.h
|
||||
|
||||
../integ/cc/%.o: ../integ/cc/%.cpp $(integhsource) $(tlhsource)
|
||||
make -C ../integ/cc $*.o
|
||||
|
||||
|
||||
../sylv/cc/%.o: ../sylv/cc/%.cpp $(sylvhsource)
|
||||
make -C ../sylv/cc $*.o
|
||||
|
||||
../kord/dummy.ch:
|
||||
make -C ../kord dummy.ch
|
||||
|
||||
../kord/%.cpp: ../kord/%.cweb ../kord/dummy.ch
|
||||
make -C ../kord $*.cpp
|
||||
|
||||
../kord/%.h: ../kord/%.hweb ../kord/dummy.ch
|
||||
make -C ../kord $*.h
|
||||
|
||||
../kord/%.o: ../kord/%.cpp $(tlhsource) $(kordhsource) $(integhsource)
|
||||
make -C ../kord $*.o
|
||||
|
||||
# for some reason, the pattern rules didn't work here, so the rules
|
||||
# are expanded:
|
||||
dynglob_tab.cc: dynglob.y dynare_atoms.h dynare_model.h
|
||||
bison -d -t --verbose -odynglob_tab.cc dynglob.y
|
||||
|
||||
dynglob_tab.hh: dynglob.y dynare_atoms.h dynare_model.h
|
||||
bison -d -t --verbose -odynglob_tab.cc dynglob.y
|
||||
|
||||
dynglob_ll.cc: dynglob.lex dynglob_tab.hh
|
||||
flex -i -odynglob_ll.cc dynglob.lex
|
||||
|
||||
dynglob_ll.o: dynglob_ll.cc $(hsource)
|
||||
$(CC) -I.. -I../sylv/cc -I../tl/cc -O3 -c dynglob_ll.cc
|
||||
|
||||
dynglob_tab.o: dynglob_tab.cc $(hsource)
|
||||
$(CC) -I.. -I../sylv/cc -I../tl/cc -O3 -c dynglob_tab.cc
|
||||
|
||||
%.o: %.cpp $(hsource) $(kordhsource) $(tlhsource) $(sylvhsource) $(parserhsource)
|
||||
$(CC) $(CC_FLAGS) -DDYNVERSION=\"$(DYNVERSION)\" -o $*.o -c $*.cpp
|
||||
|
||||
../parser/cc/parser.a: $(parserhsource) $(parsercppsource)
|
||||
make -C ../parser/cc parser.a
|
||||
|
||||
../utils/cc/utils.a: $(utilshsource) $(utilscppsource)
|
||||
make -C ../utils/cc utils.a
|
||||
|
||||
dynare++: $(tlhwebsource) $(tlcwebsoure) $(tlhsource) $(tlcppsource) \
|
||||
$(integhwebsource) $(integcwebsoure) $(integhsource) $(integcppsource) \
|
||||
$(kordhwebsource) $(kordcwebsoure) $(kordhsource) $(kordcppsource) \
|
||||
$(sylvhsource) $(sylvcppsource) \
|
||||
$(kordobjects) $(tlobjects) $(integobjects) $(sylvobjects) $(objects) \
|
||||
../parser/cc/parser.a ../utils/cc/utils.a
|
||||
$(CC) -g -o dynare++ $(objects) $(kordobjects) $(integobjects) $(tlobjects) ../parser/cc/parser.a ../utils/cc/utils.a $(sylvobjects) $(LDFLAGS)
|
||||
|
||||
../extern/matlab/dynare_simul_.$(MEX_SUFFIX):
|
||||
make -C ../extern/matlab all
|
||||
|
||||
srcball:
|
||||
rm -rf $(tmpdir)
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/tests
|
||||
cp ../tests/*.mod ../tests/*.dyn $(tmpdir)/dynare++-$(DYNVERSION)/tests
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/src
|
||||
cp *.h *.cpp *.y *.lex Makefile $(tmpdir)/dynare++-$(DYNVERSION)/src
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/tl/cc
|
||||
cp ../tl/cc/*web ../tl/cc/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/tl/cc
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/tl/testing
|
||||
cp ../tl/testing/*.cpp ../tl/testing/*.h ../tl/testing/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/tl/testing
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/integ/cc
|
||||
cp ../integ/cc/*web ../integ/cc/Makefile ../integ/cc/*.dat $(tmpdir)/dynare++-$(DYNVERSION)/integ/cc
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/integ/testing
|
||||
cp ../integ/testing/*.cpp ../integ/testing/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/integ/testing
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/sylv/cc
|
||||
cp ../sylv/cc/*.cpp ../sylv/cc/*.h ../sylv/cc/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/sylv/cc
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/sylv/testing
|
||||
cp ../sylv/testing/*.cpp ../sylv/testing/*.h ../sylv/testing/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/sylv/testing
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/kord
|
||||
cp ../kord/*web ../kord/Makefile ../kord/tests.cpp $(tmpdir)/dynare++-$(DYNVERSION)/kord
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/parser/cc
|
||||
cp ../parser/cc/*.cpp ../parser/cc/*.h ../parser/cc/*.lex ../parser/cc/*.y ../parser/cc/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/parser/cc
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/utils/cc
|
||||
cp ../utils/cc/*.cpp ../utils/cc/*.h ../utils/cc/Makefile $(tmpdir)/dynare++-$(DYNVERSION)/utils/cc
|
||||
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/extern/matlab
|
||||
cp ../extern/matlab/*.cpp ../extern/matlab/*.m ../extern/matlab/Makefile ../extern/matlab/*.bat $(tmpdir)/dynare++-$(DYNVERSION)/extern/matlab
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/doc
|
||||
cp ../doc/*.tex $(tmpdir)/dynare++-$(DYNVERSION)/doc
|
||||
cp ../change_log.html $(tmpdir)/dynare++-$(DYNVERSION)/
|
||||
cd $(tmpdir); tar czf dynare++-$(DYNVERSION).src.tgz dynare++-$(DYNVERSION)/*
|
||||
mv $(tmpdir)/dynare++-$(DYNVERSION).src.tgz ..
|
||||
rm -rf $(tmpdir)
|
||||
|
||||
binball: dynare++ ../extern/matlab/dynare_simul_.$(MEX_SUFFIX)
|
||||
rm -rf $(tmpdir)
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/tests
|
||||
cp ../tests/*.mod ../tests/*.dyn $(tmpdir)/dynare++-$(DYNVERSION)/tests
|
||||
mkdir -p $(tmpdir)/dynare++-$(DYNVERSION)/doc
|
||||
cp ../doc/*.pdf $(tmpdir)/dynare++-$(DYNVERSION)/doc
|
||||
cp dynare++* $(tmpdir)/dynare++-$(DYNVERSION)
|
||||
cp ../extern/matlab/*.m $(tmpdir)/dynare++-$(DYNVERSION)
|
||||
cp ../extern/matlab/dynare_simul_.$(MEX_SUFFIX) $(tmpdir)/dynare++-$(DYNVERSION)
|
||||
cp ../change_log.html $(tmpdir)/dynare++-$(DYNVERSION)
|
||||
ifeq ($(OS),Windows_NT)
|
||||
cp $(pthreadGC2) $(tmpdir)/dynare++-$(DYNVERSION)
|
||||
cp $(mingwm10) $(tmpdir)/dynare++-$(DYNVERSION)
|
||||
cd $(tmpdir); zip -r dynare++-$(DYNVERSION)-$(ARCH).ix86.zip dynare++-$(DYNVERSION)/*
|
||||
mv $(tmpdir)/dynare++-$(DYNVERSION)-$(ARCH).ix86.zip ..
|
||||
else
|
||||
cd $(tmpdir); tar czf dynare++-$(DYNVERSION)-$(ARCH).ix86.tgz dynare++-$(DYNVERSION)/*
|
||||
mv $(tmpdir)/dynare++-$(DYNVERSION)-$(ARCH).ix86.tgz ..
|
||||
endif
|
||||
rm -rf $(tmpdir)
|
||||
|
||||
|
||||
clear:
|
||||
rm -f *.o
|
||||
rm -f dynare++*
|
||||
rm -f *_ll.cc *_tab.hh *_tab.cc *.output
|
||||
make -C ../tl/testing clear
|
||||
make -C ../tl/cc clear
|
||||
make -C ../integ/testing clear
|
||||
make -C ../integ/cc clear
|
||||
make -C ../sylv/testing clear
|
||||
make -C ../sylv/cc clear
|
||||
make -C ../kord clear
|
||||
make -C ../parser/cc clear
|
||||
make -C ../utils/cc clear
|
|
@ -0,0 +1,364 @@
|
|||
|
||||
#include "dynare3.h"
|
||||
#include "dynare_exception.h"
|
||||
#include "planner_builder.h"
|
||||
#include "forw_subst_builder.h"
|
||||
|
||||
#include "utils/cc/memory_file.h"
|
||||
#include "utils/cc/exception.h"
|
||||
#include "parser/cc/parser_exception.h"
|
||||
#include "parser/cc/atom_substitutions.h"
|
||||
#include "../tl/cc/tl_exception.h"
|
||||
#include "../kord/kord_exception.h"
|
||||
|
||||
#ifndef DYNVERSION
|
||||
#define DYNVERSION "unknown"
|
||||
#endif
|
||||
|
||||
|
||||
/**************************************************************************************/
|
||||
/* DynareNameList class */
|
||||
/**************************************************************************************/
|
||||
vector<int> DynareNameList::selectIndices(const vector<const char*>& ns) const
|
||||
{
|
||||
vector<int> res;
|
||||
for (unsigned int i = 0; i < ns.size(); i++) {
|
||||
int j = 0;
|
||||
while (j < getNum() && strcmp(getName(j), ns[i]) != 0)
|
||||
j++;
|
||||
if (j == getNum())
|
||||
throw DynareException(__FILE__, __LINE__,
|
||||
string("Couldn't find name for ") + ns[i] +
|
||||
" in DynareNameList::selectIndices");
|
||||
res.push_back(j);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/**************************************************************************************/
|
||||
/* Dynare class */
|
||||
/**************************************************************************************/
|
||||
|
||||
Dynare::Dynare(const char* modname, int ord, double sstol, Journal& jr)
|
||||
: journal(jr), model(NULL), ysteady(NULL), md(1), dnl(NULL), denl(NULL), dsnl(NULL),
|
||||
fe(NULL), fde(NULL), ss_tol(sstol)
|
||||
{
|
||||
// make memory file
|
||||
ogu::MemoryFile mf(modname);
|
||||
if (mf.exists()) {
|
||||
try {
|
||||
model = new ogdyn::DynareParser(mf.base(), mf.length(), ord);
|
||||
} catch (const ogp::ParserException& pe) {
|
||||
int line;
|
||||
int col;
|
||||
mf.line_and_col(pe.offset(), line, col);
|
||||
throw DynareException(pe.message(), modname, line, col);
|
||||
}
|
||||
ysteady = new Vector(model->getAtoms().ny());
|
||||
dnl = new DynareNameList(*this);
|
||||
denl = new DynareExogNameList(*this);
|
||||
dsnl = new DynareStateNameList(*this, *dnl, *denl);
|
||||
fe = new ogp::FormulaEvaluator(model->getParser());
|
||||
fde = new ogp::FormulaDerEvaluator(model->getParser());
|
||||
writeModelInfo(journal);
|
||||
} else {
|
||||
throw DynareException(__FILE__, __LINE__, string("Could not open model file ")+modname);
|
||||
}
|
||||
}
|
||||
|
||||
Dynare::Dynare(const char** endo, int num_endo,
|
||||
const char** exo, int num_exo,
|
||||
const char** par, int num_par,
|
||||
const char* equations, int len, int ord,
|
||||
double sstol, Journal& jr)
|
||||
: journal(jr), model(NULL), ysteady(NULL), md(1), dnl(NULL), denl(NULL), dsnl(NULL),
|
||||
fe(NULL), fde(NULL), ss_tol(sstol)
|
||||
{
|
||||
try {
|
||||
model = new ogdyn::DynareSPModel(endo, num_endo, exo, num_exo, par, num_par,
|
||||
equations, len, ord);
|
||||
} catch (const ogp::ParserException& pe) {
|
||||
throw DynareException(pe.message(), pe.offset());
|
||||
}
|
||||
ysteady = new Vector(model->getAtoms().ny());
|
||||
dnl = new DynareNameList(*this);
|
||||
denl = new DynareExogNameList(*this);
|
||||
dsnl = new DynareStateNameList(*this, *dnl, *denl);
|
||||
fe = new ogp::FormulaEvaluator(model->getParser());
|
||||
fde = new ogp::FormulaDerEvaluator(model->getParser());
|
||||
writeModelInfo(journal);
|
||||
}
|
||||
|
||||
Dynare::Dynare(const Dynare& dynare)
|
||||
: journal(dynare.journal), model(NULL),
|
||||
ysteady(NULL), md(dynare.md),
|
||||
dnl(NULL), denl(NULL), dsnl(NULL), fe(NULL), fde(NULL),
|
||||
ss_tol(dynare.ss_tol)
|
||||
{
|
||||
model = dynare.model->clone();
|
||||
ysteady = new Vector(*(dynare.ysteady));
|
||||
dnl = new DynareNameList(*this);
|
||||
denl = new DynareExogNameList(*this);
|
||||
dsnl = new DynareStateNameList(*this, *dnl, *denl);
|
||||
fe = new ogp::FormulaEvaluator(model->getParser());
|
||||
fde = new ogp::FormulaDerEvaluator(model->getParser());
|
||||
}
|
||||
|
||||
Dynare::~Dynare()
|
||||
{
|
||||
if (model)
|
||||
delete model;
|
||||
if (ysteady)
|
||||
delete ysteady;
|
||||
if (dnl)
|
||||
delete dnl;
|
||||
if (dsnl)
|
||||
delete dsnl;
|
||||
if (denl)
|
||||
delete denl;
|
||||
if (fe)
|
||||
delete fe;
|
||||
if (fde)
|
||||
delete fde;
|
||||
}
|
||||
|
||||
void Dynare::writeMat4(FILE* fd, const char* prefix) const
|
||||
{
|
||||
char tmp[100];
|
||||
sprintf(tmp, "%s_vars", prefix);
|
||||
getAllEndoNames().writeMat4(fd, tmp);
|
||||
getAllEndoNames().writeMat4Indices(fd, prefix);
|
||||
sprintf(tmp, "%s_state_vars", prefix);
|
||||
getStateNames().writeMat4(fd, tmp);
|
||||
sprintf(tmp, "%s_shocks", prefix);
|
||||
getExogNames().writeMat4(fd, tmp);
|
||||
getExogNames().writeMat4Indices(fd, prefix);
|
||||
sprintf(tmp, "%s_vcov_exo", prefix);
|
||||
model->getVcov().writeMat4(fd, tmp);
|
||||
TwoDMatrix aux(1,1);
|
||||
sprintf(tmp, "%s_nstat", prefix);
|
||||
aux.get(0,0) = nstat();
|
||||
aux.writeMat4(fd, tmp);
|
||||
sprintf(tmp, "%s_npred", prefix);
|
||||
aux.get(0,0) = npred();
|
||||
aux.writeMat4(fd, tmp);
|
||||
sprintf(tmp, "%s_nboth", prefix);
|
||||
aux.get(0,0) = nboth();
|
||||
aux.writeMat4(fd, tmp);
|
||||
sprintf(tmp, "%s_nforw", prefix);
|
||||
aux.get(0,0) = nforw();
|
||||
aux.writeMat4(fd, tmp);
|
||||
}
|
||||
|
||||
void Dynare::writeDump(const std::string& basename) const
|
||||
{
|
||||
std::string fname(basename);
|
||||
fname += ".dump";
|
||||
std::ofstream out(fname.c_str());
|
||||
model->dump_model(out);
|
||||
out.close();
|
||||
}
|
||||
|
||||
void Dynare::solveDeterministicSteady(Vector& steady)
|
||||
{
|
||||
JournalRecordPair pa(journal);
|
||||
pa << "Non-linear solver for deterministic steady state" << endrec;
|
||||
steady = (const Vector&) model->getInit();
|
||||
DynareVectorFunction dvf(*this);
|
||||
DynareJacobian dj(*this);
|
||||
ogu::NLSolver nls(dvf, dj, 500, ss_tol, journal);
|
||||
int iter;
|
||||
if (! nls.solve(steady, iter))
|
||||
throw DynareException(__FILE__, __LINE__,
|
||||
"Could not obtain convergence in non-linear solver");
|
||||
}
|
||||
|
||||
// evaluate system at given y_t=y_{t+1}=y_{t-1}, and given shocks x_t
|
||||
void Dynare::evaluateSystem(Vector& out, const Vector& yy, const Vector& xx)
|
||||
{
|
||||
ConstVector yym(yy, nstat(), nys());
|
||||
ConstVector yyp(yy, nstat()+npred(), nyss());
|
||||
evaluateSystem(out, yym, yy, yyp, xx);
|
||||
}
|
||||
|
||||
// evaluate system at given y^*_{t-1}, y_t, y^{**}_{t+1} and at
|
||||
// exogenous x_t, all three vectors yym, yy, and yyp have the
|
||||
// respective lengths of y^*_{t-1}, y_t, y^{**}_{t+1}
|
||||
void Dynare::evaluateSystem(Vector& out, const Vector& yym, const Vector& yy,
|
||||
const Vector& yyp, const Vector& xx)
|
||||
{
|
||||
ogdyn::DynareAtomValues dav(model->getAtoms(), model->getParams(), yym, yy, yyp, xx);
|
||||
DynareEvalLoader del(model->getAtoms(), out);
|
||||
fe->eval(dav, del);
|
||||
}
|
||||
|
||||
void Dynare::calcDerivatives(const Vector& yy, const Vector& xx)
|
||||
{
|
||||
ConstVector yym(yy, nstat(), nys());
|
||||
ConstVector yyp(yy, nstat()+npred(), nyss());
|
||||
ogdyn::DynareAtomValues dav(model->getAtoms(), model->getParams(), yym, yy, yyp, xx);
|
||||
DynareDerEvalLoader ddel(model->getAtoms(), md, model->getOrder());
|
||||
for (int iord = 1; iord <= model->getOrder(); iord++)
|
||||
fde->eval(dav, ddel, iord);
|
||||
}
|
||||
|
||||
void Dynare::calcDerivativesAtSteady()
|
||||
{
|
||||
Vector xx(nexog());
|
||||
xx.zeros();
|
||||
calcDerivatives(*ysteady, xx);
|
||||
}
|
||||
|
||||
void Dynare::writeModelInfo(Journal& jr) const
|
||||
{
|
||||
// write info on variables
|
||||
{
|
||||
JournalRecordPair rp(journal);
|
||||
rp << "Information on variables" << endrec;
|
||||
JournalRecord rec1(journal);
|
||||
rec1 << "Number of endogenous: " << ny() << endrec;
|
||||
JournalRecord rec2(journal);
|
||||
rec2 << "Number of exogenous: " << nexog() << endrec;
|
||||
JournalRecord rec3(journal);
|
||||
rec3 << "Number of static: " << nstat() << endrec;
|
||||
JournalRecord rec4(journal);
|
||||
rec4 << "Number of predetermined: " << npred()+nboth() << endrec;
|
||||
JournalRecord rec5(journal);
|
||||
rec5 << "Number of forward looking: " << nforw()+nboth() << endrec;
|
||||
JournalRecord rec6(journal);
|
||||
rec6 << "Number of both: " << nboth() << endrec;
|
||||
}
|
||||
|
||||
// write info on planner variables
|
||||
const ogdyn::PlannerInfo* pinfo = model->get_planner_info();
|
||||
if (pinfo) {
|
||||
JournalRecordPair rp(journal);
|
||||
rp << "Information on planner variables" << endrec;
|
||||
JournalRecord rec1(journal);
|
||||
rec1 << "Number of Lagrange multipliers: " << pinfo->num_lagrange_mults << endrec;
|
||||
JournalRecord rec2(journal);
|
||||
rec2 << "Number of auxiliary variables: " << pinfo->num_aux_variables << endrec;
|
||||
JournalRecord rec3(journal);
|
||||
rec3 << "Number of new terms in the tree: " << pinfo->num_new_terms << endrec;
|
||||
}
|
||||
|
||||
// write info on forward substitutions
|
||||
const ogdyn::ForwSubstInfo* finfo = model->get_forw_subst_info();
|
||||
if (finfo) {
|
||||
JournalRecordPair rp(journal);
|
||||
rp << "Information on forward substitutions" << endrec;
|
||||
JournalRecord rec1(journal);
|
||||
rec1 << "Number of affected equations: " << finfo->num_affected_equations << endrec;
|
||||
JournalRecord rec2(journal);
|
||||
rec2 << "Number of substituted terms: " << finfo->num_subst_terms << endrec;
|
||||
JournalRecord rec3(journal);
|
||||
rec3 << "Number of auxiliary variables: " << finfo->num_aux_variables << endrec;
|
||||
JournalRecord rec4(journal);
|
||||
rec4 << "Number of new terms in the tree: " << finfo->num_new_terms << endrec;
|
||||
}
|
||||
|
||||
// write info on substitutions
|
||||
const ogp::SubstInfo* sinfo = model->get_subst_info();
|
||||
if (sinfo) {
|
||||
JournalRecordPair rp(journal);
|
||||
rp << "Information on substitutions" << endrec;
|
||||
JournalRecord rec1(journal);
|
||||
rec1 << "Number of substitutions: " << sinfo->num_substs << endrec;
|
||||
}
|
||||
}
|
||||
|
||||
DynareNameList::DynareNameList(const Dynare& dynare)
|
||||
{
|
||||
for (int i = 0; i < dynare.ny(); i++) {
|
||||
int j = dynare.model->getAtoms().y2outer_endo()[i];
|
||||
const char* name = dynare.model->getAtoms().get_endovars()[j];
|
||||
names.push_back(name);
|
||||
}
|
||||
}
|
||||
|
||||
DynareStateNameList::DynareStateNameList(const Dynare& dynare, const DynareNameList& dnl,
|
||||
const DynareExogNameList& denl)
|
||||
{
|
||||
for (int i = 0; i < dynare.nys(); i++)
|
||||
names.push_back(dnl.getName(i+dynare.nstat()));
|
||||
for (int i = 0; i < dynare.nexog(); i++)
|
||||
names.push_back(denl.getName(i));
|
||||
}
|
||||
|
||||
DynareExogNameList::DynareExogNameList(const Dynare& dynare)
|
||||
{
|
||||
for (int i = 0; i < dynare.nexog(); i++) {
|
||||
int j = dynare.model->getAtoms().y2outer_exo()[i];
|
||||
const char* name = dynare.model->getAtoms().get_exovars()[j];
|
||||
names.push_back(name);
|
||||
}
|
||||
}
|
||||
|
||||
DynareEvalLoader::DynareEvalLoader(const ogp::FineAtoms& a, Vector& out)
|
||||
: Vector(out)
|
||||
{
|
||||
if (a.ny() != out.length())
|
||||
throw DynareException(__FILE__, __LINE__, "Wrong length of out vector in DynareEvalLoader constructor");
|
||||
}
|
||||
|
||||
/** This clears the container of model derivatives and initializes it
|
||||
* inserting empty sparse tensors up to the given order. */
|
||||
DynareDerEvalLoader::DynareDerEvalLoader(const ogp::FineAtoms& a,
|
||||
TensorContainer<FSSparseTensor>& mod_ders,
|
||||
int order)
|
||||
: atoms(a), md(mod_ders)
|
||||
{
|
||||
md.clear();
|
||||
for (int iord = 1; iord <= order; iord++) {
|
||||
FSSparseTensor* t = new FSSparseTensor(iord, atoms.ny()+atoms.nys()+atoms.nyss()+atoms.nexo(), atoms.ny());
|
||||
md.insert(t);
|
||||
}
|
||||
}
|
||||
|
||||
void DynareDerEvalLoader::load(int i, int iord, const int* vars, double res)
|
||||
{
|
||||
FSSparseTensor* t = md.get(Symmetry(iord));
|
||||
IntSequence s(iord, 0);
|
||||
for (int j = 0; j < iord; j++)
|
||||
s[j] = atoms.get_pos_of_all(vars[j]);
|
||||
t->insert(s, i, res);
|
||||
}
|
||||
|
||||
DynareJacobian::DynareJacobian(Dynare& dyn)
|
||||
: Jacobian(dyn.ny()), d(dyn)
|
||||
{
|
||||
zeros();
|
||||
}
|
||||
|
||||
void DynareJacobian::eval(const Vector& yy)
|
||||
{
|
||||
ogdyn::DynareSteadyAtomValues
|
||||
dav(d.getModel().getAtoms(), d.getModel().getParams(), yy);
|
||||
zeros();
|
||||
d.fde->eval(dav, *this, 1);
|
||||
}
|
||||
|
||||
void DynareJacobian::load(int i, int iord, const int* vars, double res)
|
||||
{
|
||||
if (iord != 1)
|
||||
throw DynareException(__FILE__, __LINE__,
|
||||
"Derivative order different from order=1 in DynareJacobian::load");
|
||||
|
||||
int t = vars[0];
|
||||
int j = d.getModel().getAtoms().get_pos_of_all(t);
|
||||
if (j < d.nyss())
|
||||
get(i, j+d.nstat()+d.npred()) += res;
|
||||
else if (j < d.nyss()+d.ny())
|
||||
get(i, j-d.nyss()) += res;
|
||||
else if (j < d.nyss()+d.ny()+d.nys())
|
||||
get(i, j-d.nyss()-d.ny()+d.nstat()) += res;
|
||||
}
|
||||
|
||||
void DynareVectorFunction::eval(const ConstVector& in, Vector& out)
|
||||
{
|
||||
check_for_eval(in, out);
|
||||
Vector xx(d.nexog());
|
||||
xx.zeros();
|
||||
d.evaluateSystem(out, in, xx);
|
||||
}
|
||||
|
|
@ -0,0 +1,194 @@
|
|||
// $Id: dynare3.h 1764 2008-03-31 14:30:55Z kamenik $
|
||||
// Copyright 2005, Ondra Kamenik
|
||||
|
||||
#ifndef DYNARE3_H
|
||||
#define DYNARE3_H
|
||||
|
||||
#include "../tl/cc/t_container.h"
|
||||
#include "../tl/cc/sparse_tensor.h"
|
||||
#include "../kord/decision_rule.h"
|
||||
#include "../kord/dynamic_model.h"
|
||||
|
||||
#include "dynare_model.h"
|
||||
#include "nlsolve.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
class Dynare;
|
||||
|
||||
class DynareNameList : public NameList {
|
||||
vector<const char*> names;
|
||||
public:
|
||||
DynareNameList(const Dynare& dynare);
|
||||
int getNum() const
|
||||
{return (int)names.size();}
|
||||
const char* getName(int i) const
|
||||
{return names[i];}
|
||||
/** This for each string of the input vector calculates its index
|
||||
* in the names. And returns the resulting vector of indices. If
|
||||
* the name cannot be found, then an exception is raised. */
|
||||
vector<int> selectIndices(const vector<const char*>& ns) const;
|
||||
};
|
||||
|
||||
class DynareExogNameList : public NameList {
|
||||
vector<const char*> names;
|
||||
public:
|
||||
DynareExogNameList(const Dynare& dynare);
|
||||
int getNum() const
|
||||
{return (int)names.size();}
|
||||
const char* getName(int i) const
|
||||
{return names[i];}
|
||||
};
|
||||
|
||||
class DynareStateNameList : public NameList {
|
||||
vector<const char*> names;
|
||||
public:
|
||||
DynareStateNameList(const Dynare& dynare, const DynareNameList& dnl,
|
||||
const DynareExogNameList& denl);
|
||||
int getNum() const
|
||||
{return (int)names.size();}
|
||||
const char* getName(int i) const
|
||||
{return names[i];}
|
||||
};
|
||||
|
||||
// The following only implements DynamicModel with help of ogdyn::DynareModel
|
||||
|
||||
class DynareJacobian;
|
||||
class Dynare : public DynamicModel {
|
||||
friend class DynareNameList;
|
||||
friend class DynareExogNameList;
|
||||
friend class DynareStateNameList;
|
||||
friend class DynareJacobian;
|
||||
Journal& journal;
|
||||
ogdyn::DynareModel* model;
|
||||
Vector* ysteady;
|
||||
TensorContainer<FSSparseTensor> md;
|
||||
DynareNameList* dnl;
|
||||
DynareExogNameList* denl;
|
||||
DynareStateNameList* dsnl;
|
||||
ogp::FormulaEvaluator* fe;
|
||||
ogp::FormulaDerEvaluator* fde;
|
||||
const double ss_tol;
|
||||
public:
|
||||
/** Parses the given model file and uses the given order to
|
||||
* override order from the model file (if it is != -1). */
|
||||
Dynare(const char* modname, int ord, double sstol, Journal& jr);
|
||||
/** Parses the given equations with explicitly given names. */
|
||||
Dynare(const char** endo, int num_endo,
|
||||
const char** exo, int num_exo,
|
||||
const char** par, int num_par,
|
||||
const char* equations, int len, int ord,
|
||||
double sstol, Journal& jr);
|
||||
/** Makes a deep copy of the object. */
|
||||
Dynare(const Dynare& dyn);
|
||||
DynamicModel* clone() const
|
||||
{return new Dynare(*this);}
|
||||
virtual ~Dynare();
|
||||
int nstat() const
|
||||
{return model->getAtoms().nstat();}
|
||||
int nboth() const
|
||||
{return model->getAtoms().nboth();}
|
||||
int npred() const
|
||||
{return model->getAtoms().npred();}
|
||||
int nforw() const
|
||||
{return model->getAtoms().nforw();}
|
||||
int nexog() const
|
||||
{return model->getAtoms().nexo();}
|
||||
int nys() const
|
||||
{return model->getAtoms().nys();}
|
||||
int nyss() const
|
||||
{return model->getAtoms().nyss();}
|
||||
int ny() const
|
||||
{return model->getAtoms().ny();}
|
||||
int order() const
|
||||
{return model->getOrder();}
|
||||
|
||||
const NameList& getAllEndoNames() const
|
||||
{return *dnl;}
|
||||
const NameList& getStateNames() const
|
||||
{return *dsnl;}
|
||||
const NameList& getExogNames() const
|
||||
{return *denl;}
|
||||
|
||||
TwoDMatrix& getVcov()
|
||||
{return model->getVcov();}
|
||||
const TwoDMatrix& getVcov() const
|
||||
{return model->getVcov();}
|
||||
Vector& getParams()
|
||||
{return model->getParams();}
|
||||
const Vector& getParams() const
|
||||
{return model->getParams();}
|
||||
void setInitOuter(const Vector& x)
|
||||
{model->setInitOuter(x);}
|
||||
|
||||
const TensorContainer<FSSparseTensor>& getModelDerivatives() const
|
||||
{return md;}
|
||||
const Vector& getSteady() const
|
||||
{return *ysteady;}
|
||||
Vector& getSteady()
|
||||
{return *ysteady;}
|
||||
const ogdyn::DynareModel& getModel() const
|
||||
{return *model;}
|
||||
|
||||
// here is true public interface
|
||||
void solveDeterministicSteady(Vector& steady);
|
||||
void solveDeterministicSteady()
|
||||
{solveDeterministicSteady(*ysteady);}
|
||||
void evaluateSystem(Vector& out, const Vector& yy, const Vector& xx);
|
||||
void evaluateSystem(Vector& out, const Vector& yym, const Vector& yy,
|
||||
const Vector& yyp, const Vector& xx);
|
||||
void calcDerivatives(const Vector& yy, const Vector& xx);
|
||||
void calcDerivativesAtSteady();
|
||||
|
||||
void writeMat4(FILE* fd, const char* prefix) const;
|
||||
void writeDump(const std::string& basename) const;
|
||||
private:
|
||||
void writeModelInfo(Journal& jr) const;
|
||||
};
|
||||
|
||||
class DynareEvalLoader : public ogp::FormulaEvalLoader, public Vector {
|
||||
public:
|
||||
DynareEvalLoader(const ogp::FineAtoms& a, Vector& out);
|
||||
void load(int i, double res)
|
||||
{operator[](i) = res;}
|
||||
};
|
||||
|
||||
class DynareDerEvalLoader : public ogp::FormulaDerEvalLoader {
|
||||
protected:
|
||||
const ogp::FineAtoms& atoms;
|
||||
TensorContainer<FSSparseTensor>& md;
|
||||
public:
|
||||
DynareDerEvalLoader(const ogp::FineAtoms& a, TensorContainer<FSSparseTensor>& mod_ders,
|
||||
int order);
|
||||
void load(int i, int iord, const int* vars, double res);
|
||||
};
|
||||
|
||||
class DynareJacobian : public ogu::Jacobian, public ogp::FormulaDerEvalLoader {
|
||||
protected:
|
||||
Dynare& d;
|
||||
public:
|
||||
DynareJacobian(Dynare& dyn);
|
||||
virtual ~DynareJacobian() {}
|
||||
void load(int i, int iord, const int* vars, double res);
|
||||
void eval(const Vector& in);
|
||||
};
|
||||
|
||||
class DynareVectorFunction : public ogu::VectorFunction {
|
||||
protected:
|
||||
Dynare& d;
|
||||
public:
|
||||
DynareVectorFunction(Dynare& dyn)
|
||||
: d(dyn) {}
|
||||
virtual ~DynareVectorFunction() {}
|
||||
int inDim() const
|
||||
{return d.ny();}
|
||||
int outDim() const
|
||||
{return d.ny();}
|
||||
void eval(const ConstVector& in, Vector& out);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Local Variables:
|
||||
// mode:C++
|
||||
// End:
|
|
@ -0,0 +1,282 @@
|
|||
// Copyright (C) 2006, Ondra Kamenik
|
||||
|
||||
// $Id: dynare_atoms.cpp 1765 2008-03-31 14:32:08Z kamenik $
|
||||
|
||||
#include "parser/cc/parser_exception.h"
|
||||
#include "utils/cc/exception.h"
|
||||
|
||||
#include "dynare_atoms.h"
|
||||
|
||||
#include <string>
|
||||
#include <cmath>
|
||||
|
||||
using namespace ogdyn;
|
||||
using std::string;
|
||||
|
||||
void DynareStaticAtoms::register_name(const char* name)
|
||||
{
|
||||
if (varnames.query(name))
|
||||
throw ogp::ParserException(string("The name ")+name+" is not unique.", 0);
|
||||
StaticAtoms::register_name(name);
|
||||
}
|
||||
|
||||
int DynareStaticAtoms::check_variable(const char* name) const
|
||||
{
|
||||
if (0 == varnames.query(name))
|
||||
throw ogp::ParserException(std::string("Unknown name <")+name+">", 0);
|
||||
Tvarmap::const_iterator it = vars.find(name);
|
||||
if (it == vars.end())
|
||||
return -1;
|
||||
else
|
||||
return (*it).second;
|
||||
}
|
||||
|
||||
DynareDynamicAtoms::DynareDynamicAtoms(const DynareDynamicAtoms& dda)
|
||||
: SAtoms(dda)
|
||||
{
|
||||
// fill atom_type
|
||||
for (Tatypemap::const_iterator it = dda.atom_type.begin();
|
||||
it != dda.atom_type.end(); ++it)
|
||||
atom_type.insert(Tatypemap::value_type(varnames.query((*it).first), (*it).second));
|
||||
}
|
||||
|
||||
void DynareDynamicAtoms::parse_variable(const char* in, std::string& out, int& ll) const
|
||||
{
|
||||
ll = 0;
|
||||
std::string str = in;
|
||||
int left = str.find_first_of("({");
|
||||
if (left != -1) {
|
||||
out = str.substr(0, left);
|
||||
left++;
|
||||
int right = str.find_first_of(")}", left);
|
||||
if ((int)string::npos == right)
|
||||
throw ogp::ParserException(
|
||||
string("Syntax error when parsing Dynare atom <")+in+">.", 0);
|
||||
std::string tmp(str, left, right-left);
|
||||
sscanf(tmp.c_str(), "%d", &ll);
|
||||
} else {
|
||||
out = in;
|
||||
}
|
||||
}
|
||||
|
||||
void DynareDynamicAtoms::register_uniq_endo(const char* name)
|
||||
{
|
||||
FineAtoms::register_uniq_endo(name);
|
||||
atom_type.insert(Tatypemap::value_type(varnames.query(name), endovar));
|
||||
}
|
||||
|
||||
void DynareDynamicAtoms::register_uniq_exo(const char* name)
|
||||
{
|
||||
FineAtoms::register_uniq_exo(name);
|
||||
atom_type.insert(Tatypemap::value_type(varnames.query(name), exovar));
|
||||
}
|
||||
|
||||
void DynareDynamicAtoms::register_uniq_param(const char* name)
|
||||
{
|
||||
FineAtoms::register_uniq_param(name);
|
||||
atom_type.insert(Tatypemap::value_type(varnames.query(name), param));
|
||||
}
|
||||
|
||||
bool DynareDynamicAtoms::is_type(const char* name, atype tp) const
|
||||
{
|
||||
Tatypemap::const_iterator it = atom_type.find(name);
|
||||
if (it != atom_type.end() && (*it).second == tp)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
void DynareDynamicAtoms::print() const
|
||||
{
|
||||
SAtoms::print();
|
||||
printf("Name types:\n");
|
||||
for (Tatypemap::const_iterator it = atom_type.begin();
|
||||
it != atom_type.end(); ++it)
|
||||
printf("name=%s type=%s\n", (*it).first,
|
||||
((*it).second == endovar) ? "endovar" : (((*it).second == exovar)? "exovar" : "param"));
|
||||
}
|
||||
|
||||
std::string DynareDynamicAtoms::convert(int t) const
|
||||
{
|
||||
if (t < ogp::OperationTree::num_constants) {
|
||||
throw ogu::Exception(__FILE__,__LINE__,
|
||||
"Tree index is a built-in constant in DynareDynamicAtoms::convert");
|
||||
return std::string();
|
||||
}
|
||||
if (is_constant(t)) {
|
||||
double v = get_constant_value(t);
|
||||
char buf[100];
|
||||
sprintf(buf, "%20.16g", v);
|
||||
const char* s = buf;
|
||||
while (*s == ' ')
|
||||
++s;
|
||||
return std::string(s);
|
||||
}
|
||||
|
||||
const char* s = name(t);
|
||||
if (is_type(s, endovar)) {
|
||||
int ll = lead(t);
|
||||
char buf[100];
|
||||
if (ll)
|
||||
sprintf(buf, "%s(%d)", s, ll);
|
||||
else
|
||||
sprintf(buf, "%s", s);
|
||||
return std::string(buf);
|
||||
}
|
||||
|
||||
return std::string(s);
|
||||
}
|
||||
|
||||
|
||||
void DynareAtomValues::setValues(ogp::EvalTree& et) const
|
||||
{
|
||||
// set constants
|
||||
atoms.setValues(et);
|
||||
|
||||
// set parameteres
|
||||
for (unsigned int i = 0; i < atoms.get_params().size(); i++) {
|
||||
try {
|
||||
const ogp::DynamicAtoms::Tlagmap& lmap = atoms.lagmap(atoms.get_params()[i]);
|
||||
for (ogp::DynamicAtoms::Tlagmap::const_iterator it = lmap.begin();
|
||||
it != lmap.end(); ++it) {
|
||||
int t = (*it).second;
|
||||
et.set_nulary(t, paramvals[i]);
|
||||
}
|
||||
} catch (const ogu::Exception& e) {
|
||||
// ignore non-referenced parameters; there is no
|
||||
// lagmap for them
|
||||
}
|
||||
}
|
||||
|
||||
// set endogenous
|
||||
for (unsigned int outer_i = 0; outer_i < atoms.get_endovars().size(); outer_i++) {
|
||||
try {
|
||||
const ogp::DynamicAtoms::Tlagmap& lmap = atoms.lagmap(atoms.get_endovars()[outer_i]);
|
||||
for (ogp::DynamicAtoms::Tlagmap::const_iterator it = lmap.begin();
|
||||
it != lmap.end(); ++it) {
|
||||
int ll = (*it).first;
|
||||
int t = (*it).second;
|
||||
int i = atoms.outer2y_endo()[outer_i];
|
||||
if (ll == -1) {
|
||||
et.set_nulary(t, yym[i-atoms.nstat()]);
|
||||
}
|
||||
else if (ll == 0)
|
||||
et.set_nulary(t, yy[i]);
|
||||
else
|
||||
et.set_nulary(t, yyp[i-atoms.nstat()-atoms.npred()]);
|
||||
}
|
||||
} catch (const ogu::Exception& e) {
|
||||
// ignore non-referenced endogenous variables; there is no
|
||||
// lagmap for them
|
||||
}
|
||||
}
|
||||
|
||||
// set exogenous
|
||||
for (unsigned int outer_i = 0; outer_i < atoms.get_exovars().size(); outer_i++) {
|
||||
try {
|
||||
const ogp::DynamicAtoms::Tlagmap& lmap = atoms.lagmap(atoms.get_exovars()[outer_i]);
|
||||
for (ogp::DynamicAtoms::Tlagmap::const_iterator it = lmap.begin();
|
||||
it != lmap.end(); ++it) {
|
||||
int ll = (*it).first;
|
||||
if (ll == 0) { // this is always true because of checks
|
||||
int t = (*it).second;
|
||||
int i = atoms.outer2y_exo()[outer_i];
|
||||
et.set_nulary(t, xx[i]);
|
||||
}
|
||||
}
|
||||
} catch (const ogu::Exception& e) {
|
||||
// ignore non-referenced variables
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DynareStaticSteadyAtomValues::setValues(ogp::EvalTree& et) const
|
||||
{
|
||||
// set constants
|
||||
atoms_static.setValues(et);
|
||||
|
||||
// set parameters
|
||||
for (unsigned int i = 0; i < atoms_static.get_params().size(); i++) {
|
||||
const char* name = atoms_static.get_params()[i];
|
||||
int t = atoms_static.index(name);
|
||||
if (t != -1) {
|
||||
int idyn = atoms.name2outer_param(name);
|
||||
et.set_nulary(t, paramvals[idyn]);
|
||||
}
|
||||
}
|
||||
|
||||
// set endogenous
|
||||
for (unsigned int i = 0; i < atoms_static.get_endovars().size(); i++) {
|
||||
const char* name = atoms_static.get_endovars()[i];
|
||||
int t = atoms_static.index(name);
|
||||
if (t != -1) {
|
||||
int idyn = atoms.outer2y_endo()[atoms.name2outer_endo(name)];
|
||||
et.set_nulary(t, yy[idyn]);
|
||||
}
|
||||
}
|
||||
|
||||
// set exogenous
|
||||
for (unsigned int i = 0; i < atoms_static.get_exovars().size(); i++) {
|
||||
const char* name = atoms_static.get_exovars()[i];
|
||||
int t = atoms_static.index(name);
|
||||
if (t != -1)
|
||||
et.set_nulary(t, 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
DynareSteadySubstitutions::DynareSteadySubstitutions(const ogp::FineAtoms& a,
|
||||
const ogp::OperationTree& tree,
|
||||
const Tsubstmap& subst,
|
||||
const Vector& pvals, Vector& yy)
|
||||
: atoms(a), y(yy)
|
||||
{
|
||||
// fill the vector of left and right hand sides
|
||||
for (Tsubstmap::const_iterator it = subst.begin();
|
||||
it != subst.end(); ++it) {
|
||||
left_hand_sides.push_back((*it).first);
|
||||
right_hand_sides.push_back((*it).second);
|
||||
}
|
||||
|
||||
// evaluate right hand sides
|
||||
DynareSteadyAtomValues dsav(atoms, pvals, y);
|
||||
ogp::FormulaCustomEvaluator fe(tree, right_hand_sides);
|
||||
fe.eval(dsav, *this);
|
||||
}
|
||||
|
||||
void DynareSteadySubstitutions::load(int i, double res)
|
||||
{
|
||||
const char* name = left_hand_sides[i];
|
||||
int iouter = atoms.name2outer_endo(name);
|
||||
int iy = atoms.outer2y_endo()[iouter];
|
||||
if (! std::isfinite(y[iy]))
|
||||
y[iy] = res;
|
||||
}
|
||||
|
||||
DynareStaticSteadySubstitutions::
|
||||
DynareStaticSteadySubstitutions(const ogp::FineAtoms& a, const ogp::StaticFineAtoms& sa,
|
||||
const ogp::OperationTree& tree,
|
||||
const Tsubstmap& subst,
|
||||
const Vector& pvals, Vector& yy)
|
||||
: atoms(a), atoms_static(sa), y(yy)
|
||||
{
|
||||
// fill the vector of left and right hand sides
|
||||
for (Tsubstmap::const_iterator it = subst.begin();
|
||||
it != subst.end(); ++it) {
|
||||
left_hand_sides.push_back((*it).first);
|
||||
right_hand_sides.push_back((*it).second);
|
||||
}
|
||||
|
||||
// evaluate right hand sides
|
||||
DynareStaticSteadyAtomValues dsav(atoms, atoms_static, pvals, y);
|
||||
ogp::FormulaCustomEvaluator fe(tree, right_hand_sides);
|
||||
fe.eval(dsav, *this);
|
||||
}
|
||||
|
||||
void DynareStaticSteadySubstitutions::load(int i, double res)
|
||||
{
|
||||
const char* name = left_hand_sides[i];
|
||||
int iouter = atoms.name2outer_endo(name);
|
||||
int iy = atoms.outer2y_endo()[iouter];
|
||||
if (! std::isfinite(y[iy]))
|
||||
y[iy] = res;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue