Merge branch 'master' into bitbucket-use-dynSeries
commit
f054428318
|
@ -181,6 +181,7 @@ AC_CONFIG_FILES([Makefile
|
|||
doc/parallel/Makefile
|
||||
doc/internals/Makefile
|
||||
doc/gsa/Makefile
|
||||
doc/dseries-and-reporting/Makefile
|
||||
tests/Makefile
|
||||
matlab/dynare_version.m
|
||||
windows/dynare-version.nsi
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
SUBDIRS = preprocessor macroprocessor userguide parallel internals gsa
|
||||
SUBDIRS = preprocessor macroprocessor userguide parallel internals gsa dseries-and-reporting
|
||||
|
||||
info_TEXINFOS = dynare.texi
|
||||
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
if HAVE_PDFLATEX
|
||||
if HAVE_BEAMER
|
||||
pdf-local: dseriesReporting.pdf
|
||||
endif
|
||||
endif
|
||||
|
||||
SRC = dseriesReporting.tex
|
||||
|
||||
EXTRA_DIST = $(SRC)
|
||||
|
||||
dseriesReporting.pdf: $(SRC)
|
||||
$(PDFLATEX) dseriesReporting
|
||||
$(PDFLATEX) dseriesReporting
|
||||
|
||||
clean-local:
|
||||
rm -f dseriesReporting.pdf *.toc *.aux *.log *.nav *.snm *.vrb *.out *~
|
|
@ -0,0 +1,684 @@
|
|||
\documentclass[10pt]{beamer}
|
||||
|
||||
\usepackage{tikz}
|
||||
\usetikzlibrary{positioning,shapes,shadows,arrows}
|
||||
\tikzstyle{abstract}=[rectangle, rounded corners, draw=black, anchor=north, fill=blue!10, text centered, minimum height={height("Gp")+2pt}, minimum width=3cm, font=\footnotesize]
|
||||
|
||||
\definecolor{links}{HTML}{0000CC}
|
||||
\hypersetup{colorlinks,linkcolor=,urlcolor=links}
|
||||
|
||||
\mode<handout>
|
||||
{
|
||||
\usepackage{pgfpages}
|
||||
\pgfpagesuselayout{4 on 1}[a4paper,border shrink=3mm,landscape]
|
||||
\usetheme{CambridgeUS}
|
||||
\usecolortheme{lily}
|
||||
}
|
||||
|
||||
\mode<beamer>
|
||||
{
|
||||
\usetheme{CambridgeUS}
|
||||
}
|
||||
|
||||
\AtBeginSection[]
|
||||
{
|
||||
\begin{frame}
|
||||
\frametitle{Outline}
|
||||
\tableofcontents[currentsection]
|
||||
\end{frame}
|
||||
}
|
||||
|
||||
\setbeamerfont{frametitle}{family=\rmfamily,series=\bfseries,size={\fontsize{10}{10}}}
|
||||
\setbeamertemplate{frametitle continuation}[from second]
|
||||
|
||||
\title{Dynare Time Series \& Reporting}
|
||||
\author{Houtan Bastani}
|
||||
\institute{CEPREMAP}
|
||||
\date{13 June 2014}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\begin{frame}
|
||||
\titlepage
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}[t]
|
||||
\frametitle{Outline}
|
||||
\tableofcontents
|
||||
\end{frame}
|
||||
|
||||
%
|
||||
% DATES
|
||||
%
|
||||
\section{Time Series}
|
||||
|
||||
\subsection{Overview}
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{Overview}
|
||||
\begin{itemize}
|
||||
\item Provide support for time series in Dynare
|
||||
\item Introduced in Dynare 4.4
|
||||
\item Currently only used for reporting
|
||||
\item Use will increase with time (\textit{e.g.,} to be included in new estimation code)
|
||||
\item NB: More complete information is included in the Dynare manual
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\subsection{Syntax}
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{Syntax}
|
||||
\begin{itemize}
|
||||
\item Two components of a time series:
|
||||
\begin{itemize}
|
||||
\item A time component. In Dynare: \texttt{dates}
|
||||
\item A data component mapped to time. In Dynare: \texttt{dseries}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\subsubsection{\texttt{dates} Syntax}
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{\texttt{dates} Syntax}
|
||||
\begin{itemize}
|
||||
\item The \texttt{dates} command creates an object that represents at least one date at a given frequency
|
||||
\begin{itemize}
|
||||
\item Yearly: \texttt{`Y', `y', 1}
|
||||
\item Quarterly: \texttt{`Q', `q', 4}
|
||||
\item Monthly: \texttt{`M', `m', 12}
|
||||
\item Weekly: \texttt{`W', `w', 52}
|
||||
\end{itemize}
|
||||
\item It has two slightly different syntaxes
|
||||
\begin{itemize}
|
||||
\item One for inclusion in \texttt{.m} files
|
||||
\item One for inclusion in \texttt{.mod} files (simplified using the preprocessor)
|
||||
\begin{itemize}
|
||||
\item To prevent date translation, escape the date with `\texttt{\$}' (\textit{e.g.,} \texttt{\$2020y})
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\item Minimal restrictions on dates. Can be
|
||||
\begin{itemize}
|
||||
\item Negative
|
||||
\item Empty
|
||||
\item Noncontiguous
|
||||
\end{itemize}
|
||||
\item Can index a \texttt{dates} object. If \texttt{t} is a \texttt{dates} object, then
|
||||
\begin{itemize}
|
||||
\item \texttt{t(1) \% refers to the first date}
|
||||
\item \texttt{t(1:3) \% refers to the first three dates}
|
||||
\item \texttt{t([1,4,5]) \% refers to the first, fourth, and fifth dates}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{Creating a new \texttt{dates} object}
|
||||
\begin{itemize}
|
||||
\item A single date:
|
||||
\begin{itemize}
|
||||
\item In a \texttt{.m} file: \texttt{t = dates(`1999y');}
|
||||
\item In a \texttt{.mod} file: \texttt{t = 1999y;}
|
||||
\end{itemize}
|
||||
\item A date range:
|
||||
\begin{itemize}
|
||||
\item In a \texttt{.m} file: \texttt{dr = dates(`1999y'):dates(`2020y');}
|
||||
\item In a \texttt{.mod} file: \texttt{dr = 1999y:2020y;}
|
||||
\end{itemize}
|
||||
\item In what follows, \texttt{t} and \texttt{dr} are as above. Operations that assign to \texttt{t} and \texttt{dr} keep the value thereafter.
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{Modifying \texttt{dates}}
|
||||
\begin{itemize}
|
||||
\item \texttt{append}: appends a date to the date
|
||||
\begin{itemize}
|
||||
\item \texttt{t=t.append(dates(`1900y')); \% <dates: 1999Y, 1900Y>}
|
||||
\end{itemize}
|
||||
\item \texttt{horzcat}: horizontal concatenation
|
||||
\begin{itemize}
|
||||
\item \texttt{[t t]; \% <dates: 1999Y, 1900Y, 1999Y, 1900Y>};
|
||||
\end{itemize}
|
||||
\item \texttt{minus}: either the distance between two \texttt{dates} or lag one \texttt{dates}
|
||||
\begin{itemize}
|
||||
\item \texttt{t-t \% [0 0]'}
|
||||
\item \texttt{t-[3 3]' \% <dates: 1996Y, 1897Y>}
|
||||
\end{itemize}
|
||||
\item \texttt{plus}: either combine two \texttt{dates} or forward one \texttt{dates}
|
||||
\begin{itemize}
|
||||
\item \texttt{t+t \% <dates: 1999Y, 1900Y, 1999Y, 1900Y>}
|
||||
\item \texttt{t+[3 3]' \% <dates: 2002Y, 1903Y>}
|
||||
\end{itemize}
|
||||
\item \texttt{pop}: remove last element
|
||||
\begin{itemize}
|
||||
\item \texttt{t.pop(); \% <dates: 1999Y>}
|
||||
\end{itemize}
|
||||
\item \texttt{sort}: sort dates in ascending order
|
||||
\begin{itemize}
|
||||
\item \texttt{t.sort(); \% <dates: 1900Y, 1999Y>}
|
||||
\end{itemize}
|
||||
\item \texttt{uminus}: shifts dates back one period
|
||||
\begin{itemize}
|
||||
\item \texttt{-t; \% <dates: 1998Y, 1899Y>}
|
||||
\end{itemize}
|
||||
\item \texttt{unique}: removes repetitions (keeping last unique value)
|
||||
\begin{itemize}
|
||||
\item \texttt{t.append(dates(`1999y')).unique() \% <dates: 1900Y, 1999Y>}
|
||||
\end{itemize}
|
||||
\item \texttt{uplus}: shifts dates forward one period
|
||||
\begin{itemize}
|
||||
\item \texttt{++t; \% <dates: 2001Y, 1902Y>}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{Getting info about \texttt{dates}}
|
||||
\begin{itemize}
|
||||
\item \texttt{char}: returns a single date as a string
|
||||
\begin{itemize}
|
||||
\item \texttt{t(1).char() \% 1999Y}
|
||||
\end{itemize}
|
||||
\item \texttt{double}: returns a floating point representation of the date
|
||||
\begin{itemize}
|
||||
\item \texttt{t.double() \% [1999 1900]'}
|
||||
\end{itemize}
|
||||
\item \texttt{freq}: returns the frequency
|
||||
\begin{itemize}
|
||||
\item \texttt{t.freq; \% 1}
|
||||
\end{itemize}
|
||||
\item \texttt{isequal}: returns true if the two arguments are equal
|
||||
\begin{itemize}
|
||||
\item \texttt{isequal(t,t) \% 1}
|
||||
\end{itemize}
|
||||
\item \texttt{length}: returns the number of dates
|
||||
\begin{itemize}
|
||||
\item \texttt{t.length() \% 2}
|
||||
\end{itemize}
|
||||
\item \texttt{max}: returns the maximum \texttt{dates} in the arguments
|
||||
\begin{itemize}
|
||||
\item \texttt{max(t,dr) \% <dates: 2020Y>}
|
||||
\end{itemize}
|
||||
\item \texttt{min}: returns the minimum \texttt{dates} in the arguments
|
||||
\begin{itemize}
|
||||
\item \texttt{min(t,dr) \% <dates: 1900Y>}
|
||||
\end{itemize}
|
||||
\item \texttt{eq, ge, gt, le, lt, ne}: returns boolean value of comparison
|
||||
\begin{itemize}
|
||||
\item \texttt{t==t \% [1 1]'}
|
||||
\item \texttt{t>=dates(`1950y') \% [1 0]'}
|
||||
\item \texttt{t$\thicksim$=dates(`1999y') \% [0 1]'}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{Set Operations on \texttt{dates}}
|
||||
\begin{itemize}
|
||||
\item \texttt{intersect}: returns the intersection of the arguments
|
||||
\begin{itemize}
|
||||
\item \texttt{intersect(t,dr) \% <dates: 1999Y>}
|
||||
\end{itemize}
|
||||
\item \texttt{isempty}: returns true if the argument is empty
|
||||
\begin{itemize}
|
||||
\item \texttt{isempty(t) \% 0}
|
||||
\item \texttt{isempty(dates()) \% 1}
|
||||
\end{itemize}
|
||||
\item \texttt{setdiff}: returns dates present in first arg but not in second
|
||||
\begin{itemize}
|
||||
\item \texttt{setdiff(t,dr) \% <dates: 1900Y>}
|
||||
\end{itemize}
|
||||
\item \texttt{union}:
|
||||
\begin{itemize}
|
||||
\item \texttt{union(dr,t) \% <dates: 1900Y, 1999Y, ..., 2019Y, 2020Y>}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
%
|
||||
% DSERIES
|
||||
%
|
||||
\subsubsection{\texttt{dseries} Syntax}
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{\texttt{dseries} Syntax}
|
||||
\begin{itemize}
|
||||
\item A \texttt{dseries} is composed of one or more individual series
|
||||
\item All time series in a dseries must have the same frequency
|
||||
\item A \texttt{dseries} runs from the earliest date to the latest date, with \texttt{NaN}'s inserted to pad the shorter series
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{Creating a new \texttt{dseries} object}
|
||||
\begin{itemize}
|
||||
\item Load series from data file (\texttt{.csv, .xls}). Dates in first column, optional variable names in first row
|
||||
\begin{itemize}
|
||||
\item \texttt{ts=dseries(`data.csv');}
|
||||
\end{itemize}
|
||||
\item Load series from a Matlab/Octave file (\texttt{.m, .mat}). Must define the variables \texttt{INIT\_\_}, \texttt{NAMES\_\_}, and, optionally, \texttt{TEX\_\_}.
|
||||
\begin{itemize}
|
||||
\item \texttt{ts=dseries(`data.m');}
|
||||
\end{itemize}
|
||||
\item Load data directly. Here: one variable, `MyVar1', with three annual observations starting in 1999. Only first argument is required.
|
||||
\begin{itemize}
|
||||
\item \texttt{ts=dseries([1;2;3], `1999y', \{`MyVar1'\}, \{`MyVar\_1'\});}
|
||||
\end{itemize}
|
||||
\item Create an empty time series. Usefull for programatically creating a series.
|
||||
\begin{itemize}
|
||||
\item \texttt{ts=dseries();}
|
||||
\item \texttt{ts=dseries(dates(`1999y'));}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{Referencing data from a \texttt{dseries}}
|
||||
\begin{itemize}
|
||||
\item For the following, let
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1=dseries([1;2;3], `1999y', \{`MyVar1'\}, \{`MyVar\_1'\})}
|
||||
\item \texttt{ts2=dseries([4;5;6], `2000y', \{`MyVar2'\}, \{`MyVar\_2'\})}
|
||||
\item \texttt{ts3=[ts1 ts2]}
|
||||
\end{itemize}
|
||||
\item To get \texttt{MyVar1} from \texttt{t3}, you have three options
|
||||
\begin{itemize}
|
||||
\item \texttt{ts3\{1\}}
|
||||
\item \texttt{ts3.MyVar1}
|
||||
\item \texttt{ts3\{`MyVar1'\}}
|
||||
\end{itemize}
|
||||
\item To select a subsample of MyVar2 from 2001 to 2002:
|
||||
\begin{itemize}
|
||||
\item \texttt{ts3\{2\}(dates(`2001y'):dates(`2002y'))}
|
||||
\item \texttt{ts3.MyVar2(dates(`2001y'):dates(`2002y'))}
|
||||
\item \texttt{ts3\{`MyVar2'\}(dates(`2001y'):dates(`2002y'))}
|
||||
\end{itemize}
|
||||
\item To see the data for both variables in 2000:
|
||||
\begin{itemize}
|
||||
\item \texttt{ts3(`2000y')}
|
||||
\item \texttt{ts3\{[1 2]\}(`2000y')}
|
||||
\item \texttt{ts3\{`MyVar1',`MyVar2'\}(`2000y')}
|
||||
\item \texttt{ts3\{`MyVar@1,2@'\}(`2000y')}
|
||||
\item \texttt{ts3\{`MyVar[1-2]'\}(`2000y')}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{Getting info about \texttt{dseries}}
|
||||
\begin{itemize}
|
||||
\item \texttt{eq, ne}: returns boolean value of element-wise comparison; series must have same start dates and number of observations
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1==ts3.MyVar1(dates(`1999y'):dates(`2001y')) \% [1 1 1]'}
|
||||
\item \texttt{ts1$\thicksim$=ts1 \% [0 0 0]'}
|
||||
\end{itemize}
|
||||
\item \texttt{isempty}: returns true if series is empty
|
||||
\begin{itemize}
|
||||
\item \texttt{isempty(dseries()) \% 1}
|
||||
\end{itemize}
|
||||
\item \texttt{isequal}: returns true if the series are equal
|
||||
\begin{itemize}
|
||||
\item \texttt{isequal(ts1,ts1) \% 1}
|
||||
\end{itemize}
|
||||
\item \texttt{size}: returns number of observations by number of variables
|
||||
\begin{itemize}
|
||||
\item \texttt{ts3.size() \% [4 2]}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{Working with \texttt{dseries}}
|
||||
\begin{itemize}
|
||||
\item \texttt{baxter\_king\_filter}: the Baxter and King (1999) band pass filter
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1.baxter\_king\_filter(high freq, low freq, K)}
|
||||
\end{itemize}
|
||||
\item \texttt{hpcycle}: HP Filters the \texttt{dseries}, returning the business cycle component
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1.hptrend(lambda) \% lambda = 1600 by default}
|
||||
\end{itemize}
|
||||
\item \texttt{hptrend}: HP Filters the \texttt{dseries}, returning the trend component
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1.hptrend(lambda) \% lambda = 1600 by default}
|
||||
\end{itemize}
|
||||
\item \texttt{qdiff}: Quarterly difference; works on quarterly, monthly, and weekly series
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1.qdiff()}
|
||||
\end{itemize}
|
||||
\item \texttt{qgrowth}: Quarterly growth rate: $\frac{ts_t-ts_{t-1}}{ts_{t-1}}$. Works on quarterly, monthly, and weekly series
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1.qgrowth()}
|
||||
\end{itemize}
|
||||
\item \texttt{yidff}: Yearly difference; works on yearly, quarterly, monthly, and weekly series
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1.ydiff()}
|
||||
\end{itemize}
|
||||
\item \texttt{ygrowth}: Annual growth rate: $\frac{ts_t-ts_{t-1}}{ts_{t-1}}$. Works on yearly ($t-1$), quarterly ($t-4$), monthly ($t-12$), and weekly ($t-52$) series
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1.ygrowth()}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{Operations on \texttt{dseries}}
|
||||
\begin{itemize}
|
||||
\item \texttt{abs}: The absolute value of each data point
|
||||
\item \texttt{cumprod}/\texttt{cumsum}: Cumulative product/sum
|
||||
\item \texttt{exp}/\texttt{log}: The exponential/log for each data point
|
||||
\item \texttt{mrdivide}/\texttt{mtimes}: Division/Multiplication
|
||||
\item \texttt{minus}/\texttt{plus}: Subtraction/Addition
|
||||
\item \texttt{mpower}: Power
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1-ts2}
|
||||
\item \texttt{ts1-3}
|
||||
\item \texttt{ts1-[1:3]'}
|
||||
\end{itemize}
|
||||
\item \texttt{lag}/\texttt{lead}: lag/lead the series
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1(-1)}/\texttt{ts1(2)}
|
||||
\item \texttt{ts1.lag()}/\texttt{ts1.lead(2)}
|
||||
\end{itemize}
|
||||
\item \texttt{uminus}: Equivalent to multiplying by $-1$.
|
||||
\begin{itemize}
|
||||
\item \texttt{-ts1}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}[fragile,t]
|
||||
\frametitle{Modifying \texttt{dates}}
|
||||
\begin{itemize}
|
||||
\item \texttt{align}: Makes both series cover the same time range by padding with \texttt{NaN}'s
|
||||
\begin{itemize}
|
||||
\item \texttt{[ts1,ts2]=align(ts1,ts2)}
|
||||
\end{itemize}
|
||||
\item \texttt{chain}:
|
||||
\item \texttt{horzcat}: Join two or more \texttt{dseries}
|
||||
\begin{itemize}
|
||||
\item \texttt{ts3=[ts1 ts2]}
|
||||
\end{itemize}
|
||||
\item \texttt{insert}: Inserts variables from one \texttt{dseries} into another
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1.insert(ts2, 2)}
|
||||
\end{itemize}
|
||||
\item \texttt{merge}: Merge two series
|
||||
\item \texttt{pop}: remove the last variable from \texttt{dseries}
|
||||
\item \texttt{plot}: plot a \texttt{dseries}
|
||||
\item \texttt{rename}: Rename a variable in a dseries
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1.rename(`MyVar1',`MyFirstVar')}
|
||||
\end{itemize}
|
||||
\item \texttt{save}: Save a \texttt{dseries} in either \texttt{.csv} (default), \texttt{.m}, or \texttt{.mat}
|
||||
\item \texttt{set\_names}: Rename all variables in a \texttt{dseries}
|
||||
\begin{itemize}
|
||||
\item \texttt{ts3.set\_names(`NewName1',`NewName2')}
|
||||
\end{itemize}
|
||||
\item \texttt{tex\_rename}: Rename the \LaTeX\ name for a given variable
|
||||
\begin{itemize}
|
||||
\item \texttt{ts1.tex\_rename(`MyVar1',`MyVar\textbackslash\_1')}
|
||||
\end{itemize}
|
||||
\item \texttt{vertcat}: Add more observations to existing \texttt{dseries}
|
||||
\begin{itemize}
|
||||
\item \texttt{[ts1; dseries(4,`2002y',`MyVar1')]}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
|
||||
\subsection{Examples}
|
||||
|
||||
|
||||
|
||||
|
||||
%
|
||||
% REPORTING
|
||||
%
|
||||
\section{Reporting}
|
||||
\subsection{Overview}
|
||||
\begin{frame}
|
||||
\frametitle{Overview}
|
||||
\begin{itemize}
|
||||
\item Introduced in Dynare 4.4
|
||||
\item Introduce reporting functionality to Dynare
|
||||
\begin{itemize}
|
||||
\item Input: \texttt{dseries}
|
||||
\item Output: \LaTeX\ report \& compiled \texttt{.pdf}
|
||||
\end{itemize}
|
||||
\item Graphs and Tables are modular
|
||||
\begin{itemize}
|
||||
\item Can easily be included in another document
|
||||
\end{itemize}
|
||||
\item Graphs are produced in Ti$k$Z/PGFPlots (standard in a TeX distribution)
|
||||
\begin{itemize}
|
||||
\item Scales well
|
||||
\item Formating follows that of enclosing document
|
||||
\end{itemize}
|
||||
\item Dynare provides a subset of the many Ti$k$Z options
|
||||
\begin{itemize}
|
||||
\item You can easily modify the Ti$k$Z graph if the option you want is not in Dynare
|
||||
\end{itemize}
|
||||
\item Works with Matlab \& Octave
|
||||
\item Works approximately 5 times faster than Iris reporting
|
||||
\item NB: Must install a \LaTeX\ distribution to compile reports
|
||||
\begin{itemize}
|
||||
\item Windows: MiKTeX \url{http://miktex.org}
|
||||
\item Mac OS X: MacTeX \url{http://tug.org/mactex}
|
||||
\item Linux: TeX Live (from your package manager)
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{How Reporting Works}
|
||||
\begin{itemize}
|
||||
\item Reports are created command by command
|
||||
\begin{itemize}
|
||||
\item Hence the order of commands matters
|
||||
\end{itemize}
|
||||
\item All reporting commands act on the previously added object until an object of greater or equal hierarchy is added (see next slide)
|
||||
\begin{itemize}
|
||||
\item \textit{e.g.,} Once you add a \texttt{Page} to your report with the \texttt{addPage()} command, every \texttt{Section} you add via the \texttt{addSection()} command will be placed on this page. Only when you add another \texttt{Page} will items go on a new page.
|
||||
\item This will become more clear with an example
|
||||
\end{itemize}
|
||||
\item Options to reporting commands are passed in option name/value pairs
|
||||
\begin{itemize}
|
||||
\item \textit{e.g.,} \texttt{addPage(`title', \{`Page Title', `Page Subtitle'\})}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Reporting Class Hierarchy}
|
||||
\begin{itemize}
|
||||
\item Class names on the top half of the box, constructor names on the bottom
|
||||
\item Arrows represent what the new object can be added to
|
||||
\end{itemize}
|
||||
\begin{center}
|
||||
\begin{tikzpicture}[
|
||||
node distance = .45cm,
|
||||
auto,
|
||||
line/.style={->, >=stealth'},
|
||||
]
|
||||
\node (Report) [abstract, rectangle split, rectangle split parts=2]
|
||||
{
|
||||
\textbf{Report}
|
||||
\nodepart{second}\texttt{report(...);}
|
||||
};
|
||||
\node (Page) [abstract, rectangle split, rectangle split parts=2, below=of Report]
|
||||
{
|
||||
\textbf{Page}
|
||||
\nodepart{second}\texttt{addPage(...);}
|
||||
};
|
||||
\node (Section) [abstract, rectangle split, rectangle split parts=2, below=of Page]
|
||||
{
|
||||
\textbf{Section}
|
||||
\nodepart{second}\texttt{addSection(...);}
|
||||
};
|
||||
\node (Vspace) [abstract, rectangle split, rectangle split parts=2, below=of Section]
|
||||
{
|
||||
\textbf{Vspace}
|
||||
\nodepart{second}\texttt{addVspace(...);}
|
||||
};
|
||||
\node (Graph) [abstract, rectangle split, rectangle split parts=2, left=of Vspace]
|
||||
{
|
||||
\textbf{Graph}
|
||||
\nodepart{second}\texttt{addGraph(...);}
|
||||
};
|
||||
\node (Table) [abstract, rectangle split, rectangle split parts=2, right=of Vspace, text height=]
|
||||
{
|
||||
\textbf{Table}
|
||||
\nodepart{second}\texttt{addTable(...);}
|
||||
};
|
||||
\node (Series) [abstract, rectangle split, rectangle split parts=2, below=of Vspace]
|
||||
{
|
||||
\textbf{Series}
|
||||
\nodepart{second}\texttt{addSeries(...);}
|
||||
};
|
||||
\draw [line] (Series) to node { } (Table);
|
||||
\draw [line] (Series) to node { } (Graph);
|
||||
\draw [line] (Table) to node { } (Section);
|
||||
\draw [line] (Graph) to node { } (Section);
|
||||
\draw [line] (Vspace) to node { } (Section);
|
||||
\draw [line] (Section) to node { } (Page);
|
||||
\draw [line] (Page) to node { } (Report);
|
||||
\end{tikzpicture}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\subsection{Syntax}
|
||||
\begin{frame}
|
||||
\frametitle{Reporting Syntax}
|
||||
\begin{itemize}
|
||||
\item \texttt{report(\ldots)}: Create a report
|
||||
\begin{itemize}
|
||||
\item Options: \texttt{compiler}, \texttt{showDate}, \texttt{fileName}, \texttt{margin}, \texttt{marginUnit}, \texttt{orientation}, \texttt{paper}, \texttt{title}
|
||||
\end{itemize}
|
||||
\item \texttt{addPage(\ldots)}: Add a page to the \texttt{Report}
|
||||
\begin{itemize}
|
||||
\item Options: \texttt{footnote}, \texttt{orientation}, \texttt{paper}, \texttt{title}, \texttt{titleFormat}
|
||||
\end{itemize}
|
||||
\item \texttt{addSection(\ldots)}: Add a section to the current \texttt{Page}
|
||||
\begin{itemize}
|
||||
\item You can think of a section as a matrix. As graphs and/or tables are added section, it fills up from left to right. Once you have added \texttt{cols} objects, a new row is started.
|
||||
\item Options: \texttt{cols}, \texttt{height}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Reporting Syntax (continued)}
|
||||
\begin{itemize}
|
||||
\item \texttt{addGraph(\ldots)}: Add a graph to the current \texttt{Section}
|
||||
\begin{itemize}
|
||||
\item Options: \texttt{data}, \texttt{graphDirName}, \texttt{graphName}, \texttt{graphSize}, \texttt{height}, \texttt{showGrid}, \texttt{showLegend}, \texttt{showLegendBox}, \texttt{legendLocation}, \texttt{legendOrientation}, \texttt{legendFontSize}, \texttt{seriesToUse}, \texttt{shade}, \texttt{shadeColor}, \texttt{shadeOpacity}, \texttt{title}, \texttt{titleFormat}, \texttt{width}, \texttt{xlabel}, \texttt{ylabel}, \texttt{xAxisTight}, \texttt{xrange}, \texttt{xTicks}, \texttt{xTickLabels}, \texttt{xTickLabelAnchor}, \texttt{xTickLabelRotation}, \texttt{yAxisTight}, \texttt{yrange}, \texttt{showZeroLine}
|
||||
\end{itemize}
|
||||
\item \texttt{addTable(\ldots)}: Add a table to the current \texttt{Section}
|
||||
\begin{itemize}
|
||||
\item Options: \texttt{data}, \texttt{showHlines}, \texttt{precision}, \texttt{range}, \texttt{seriesToUse}, \texttt{tableDirName}, \texttt{tableName}, \texttt{title}, \texttt{titleFormat}, \texttt{vlineAfter}, \texttt{vlineAfterEndOfPeriod}, \texttt{showVlines}
|
||||
\end{itemize}
|
||||
\item \texttt{addSeries(\ldots)}: Add a series to the current \texttt{Graph} or \texttt{Table}
|
||||
\begin{itemize}
|
||||
\item \texttt{data}, \texttt{graphLineStyle}, \texttt{graphLineWidth}, \texttt{graphMarker}, \texttt{graphMarkerEdgeColor}, \texttt{graphMarkerFaceColor}, \texttt{graphMarkerSize}, \texttt{tableDataRhs}, \texttt{tableRowColor}, \texttt{tableRowIndent}, \texttt{tableShowMarkers}, \texttt{tableAlignRight}, \texttt{tableMarkerLimit}, \texttt{tableNegColor}, \texttt{tablePosColor}, \texttt{tableSubSectionHeader}, \texttt{zeroTol}
|
||||
\end{itemize}
|
||||
\item Options: \texttt{addVspace(\ldots)}: Add a vertical space to the current \texttt{Section}
|
||||
\begin{itemize}
|
||||
\item \texttt{hline}, \texttt{number}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Output}
|
||||
To create a report:
|
||||
\begin{itemize}
|
||||
\item \texttt{write()}: Writes the report to a \LaTeX\ file
|
||||
\item \texttt{compile(\ldots)}: Compiles the report
|
||||
\begin{itemize}
|
||||
\item Options: \texttt{compiler}
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
Report Output
|
||||
\begin{itemize}
|
||||
\item Unless you pass the \texttt{fileName} option to \texttt{report(\ldots)}, the report will be located in your working directory with the name \texttt{report.tex}. The compiled version will be called \texttt{report.pdf}.
|
||||
\item Unless you pass the \texttt{graphDirName} or \texttt{graphName} options to \texttt{addGraph(\ldots)}, your graphs will be in a subdirectory of your working directory called \texttt{tmpRepDir}. The default name will take the form \texttt{graph\_pg9\_sec1\_row1\_col5.tex}
|
||||
\item The same holds for the tables (substituting `table' for `graph' above).
|
||||
\item Thus you can easily modify these files and include them in another report.
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\subsection{Examples}
|
||||
|
||||
\section{Putting it All Together}
|
||||
\begin{frame}[fragile=singleslide]
|
||||
\frametitle{Create Report of IRFs from \texttt{example1.mod}}
|
||||
\begin{itemize}
|
||||
\item \texttt{example1.mod} is located in the Dynare \texttt{examples} directory
|
||||
\end{itemize}
|
||||
\begin{block}{Create \texttt{dseries} from IRFs}
|
||||
\begin{verbatim}
|
||||
@#define endovars=["y", "c", "k", "a", "h", "b"]
|
||||
@#for var in endovars
|
||||
shocke.@{var} = dseries(@{var}_e, 2014q3, `@{var}');
|
||||
shocku.@{var} = dseries(@{var}_u, 2014q3, `@{var}');
|
||||
@#endfor
|
||||
\end{verbatim}
|
||||
\end{block}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}[fragile=singleslide]
|
||||
\frametitle{Create Report of IRFs from \texttt{example1.mod}}
|
||||
\begin{block}{Populate Report}
|
||||
\small{
|
||||
\begin{verbatim}
|
||||
@#for shock in ["e", "u"]
|
||||
report = report.addPage(`title', {`Dseries \& Report Example', ...
|
||||
`Shock to @{shock}'}, ...
|
||||
`titleFormat', {`\Large\bfseries', ...
|
||||
`\large\bfseries'});
|
||||
report = report.addSection(`cols', 2);
|
||||
@# for var in endovars
|
||||
report = report.addGraph(`data', shock@{shock}.@{var}, ...
|
||||
`title', `@{var}', ...
|
||||
`showGrid', false, ...
|
||||
`showZeroLine', true);
|
||||
@# endfor
|
||||
report = report.addSection(`cols', 1);
|
||||
report = report.addTable();
|
||||
@# for var in endovars
|
||||
report = report.addSeries(`data', shock@{shock}.@{var});
|
||||
@# endfor
|
||||
@#endfor
|
||||
\end{verbatim}
|
||||
}
|
||||
\end{block}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}[fragile=singleslide]
|
||||
\frametitle{Create Report of IRFs from \texttt{example1.mod}}
|
||||
\begin{block}{Compile Report}
|
||||
\begin{verbatim}
|
||||
report.write();
|
||||
report.compile();
|
||||
\end{verbatim}
|
||||
\end{block}
|
||||
\end{frame}
|
||||
\end{document}
|
|
@ -2758,7 +2758,8 @@ using the same solver as value @code{1}
|
|||
Use Chris Sims' solver
|
||||
|
||||
@item 4
|
||||
Trust-region solver with autoscaling.
|
||||
Splits the model into recursive blocks and solves each block in turn
|
||||
using a trust-region solver with autoscaling.
|
||||
|
||||
@item 5
|
||||
Newton algorithm with a sparse Gaussian elimination (SPE) (requires
|
||||
|
@ -2777,6 +2778,9 @@ each iteration (requires @code{bytecode} and/or @code{block} option,
|
|||
Newton algorithm with a Stabilized Bi-Conjugate Gradient (BICGSTAB)
|
||||
solver at each iteration (requires @code{bytecode} and/or @code{block}
|
||||
option, @pxref{Model declaration})
|
||||
|
||||
@item 9
|
||||
Trust-region algorithm on the entire model.
|
||||
@end table
|
||||
|
||||
@noindent
|
||||
|
@ -5820,7 +5824,7 @@ confidence interval.
|
|||
@table @code
|
||||
|
||||
@item periods = @var{INTEGER}
|
||||
Number of periods of the forecast. Default: @code{40}
|
||||
Number of periods of the forecast. Default: @code{5}.
|
||||
|
||||
@item conf_sig = @var{DOUBLE}
|
||||
@anchor{conf_sig} Level of significance for confidence
|
||||
|
@ -6097,37 +6101,37 @@ can be computed using an extended path method. The forecast scenario describing
|
|||
The first step is the forecast scenario initialization using the function @code{init_plan}:
|
||||
|
||||
@anchor{init_plan}
|
||||
@deffn {MATLAB/Octave command} HANDLE = init_plan(DATES) ;
|
||||
@deftypefn {MATLAB/Octave command} {HANDLE =} init_plan (DATES) ;
|
||||
|
||||
Creates a new forecast scenario for a forecast period (indicated as a dates class, see @ref{dates class members}). This function return a handle on the new forecast scenario.
|
||||
|
||||
@end deffn
|
||||
@end deftypefn
|
||||
|
||||
The forecast scenario can contain some simple shocks on the exogenous variables. This shocks are described using the function @code{basic_plan}:
|
||||
|
||||
@anchor{basic_plan}
|
||||
@deffn {MATLAB/Octave command} HANDLE = basic_plan(HANDLE, 'VARIABLE_NAME', 'SHOCK_TYPE', DATES, MATLAB VECTOR OF DOUBLE | [DOUBLE | EXPRESSION [DOUBLE | | EXPRESSION] ] ) ;
|
||||
@deftypefn {MATLAB/Octave command} {HANDLE =} basic_plan (HANDLE, 'VARIABLE_NAME', 'SHOCK_TYPE', DATES, MATLAB VECTOR OF DOUBLE | [DOUBLE | EXPRESSION [DOUBLE | | EXPRESSION] ] ) ;
|
||||
|
||||
Adds to the forecast scenario a shock on the exogenous variable indicated between quotes in the second argument. The shock type has to be specified in the third argument between quotes: 'surprise' in case of an unexpected shock or 'perfect_foresight' for a perfectly anticipated shock. The fourth argument indicates the period of the shock using a dates class (see @ref{dates class members}). The last argument is the shock path indicated as a Matlab vector of double. This function return the handle of the updated forecast scenario.
|
||||
|
||||
@end deffn
|
||||
@end deftypefn
|
||||
|
||||
The forecast scenario can also contain a constrained path on an endogenous variable. The values of the related exogenous variable compatible with the constrained path are in this case computed. In other words, a conditional forecast is performed. This kind of shock is described with the function @code{flip_plan}:
|
||||
|
||||
@anchor{flip_plan}
|
||||
@deffn {MATLAB/Octave command} HANDLE = flip_plan(HANDLE, 'VARIABLE_NAME, 'VARIABLE_NAME', 'SHOCK_TYPE', DATES, MATLAB VECTOR OF DOUBLE | [DOUBLE | EXPRESSION [DOUBLE | | EXPRESSION] ] ) ;
|
||||
@deftypefn {MATLAB/Octave command} {HANDLE =} flip_plan (HANDLE, 'VARIABLE_NAME, 'VARIABLE_NAME', 'SHOCK_TYPE', DATES, MATLAB VECTOR OF DOUBLE | [DOUBLE | EXPRESSION [DOUBLE | | EXPRESSION] ] ) ;
|
||||
|
||||
Adds to the forecast scenario a constrained path on the endogenous variable specified between quotes in the second argument. The associated exogenous variable provided in the third argument between quotes, is considered as an endogenous variable and its values compatible with the constrained path on the endogenous variable will be computed. The nature of the expectation on the constrained path has to be specified in the fourth argument between quotes: 'surprise' in case of an unexpected path or 'perfect_foresight' for a perfectly anticipated path. The fifth argument indicates the period where the path of the endogenous variable is constrained using a dates class (see @ref{dates class members}). The last argument contains the constrained path as a Matlab vector of double. This function return the handle of the updated forecast scenario.
|
||||
|
||||
@end deffn
|
||||
@end deftypefn
|
||||
|
||||
Once the forecast scenario if fully described, the forecast is computed with the command @code{det_cond_forecast}:
|
||||
@anchor{det_cond_forecast}
|
||||
@deffn {MATLAB/Octave command} DSERIES = det_cond_forecast(HANDLE[, DSERIES [, DATES]]) ;
|
||||
@deftypefn {MATLAB/Octave command} {DSERIES =} det_cond_forecast (HANDLE[, DSERIES [, DATES]]) ;
|
||||
|
||||
Computes the forecast or the conditional forecast using an extended path method for the given forecast scenario (first argument). The past values of the endogenous and exogenous variables provided with a dseries class (see @ref{dseries class members}) can be indicated in the second argument. By default, the past values of the variables are equal to their steady-state values. The initial date of the forecast can be provided in the third argument. By default, the forecast will start at the first date indicated in the @code{init_plan} command. This function returns a dset containing the historical and forecast values for the endogenous and exogenous variables.
|
||||
|
||||
@end deffn
|
||||
@end deftypefn
|
||||
|
||||
|
||||
|
||||
|
@ -9867,7 +9871,7 @@ ts1 is a dseries object:
|
|||
|
||||
@deftypefn {dseries} {@var{B} = } baxter_king_filter (@var{A}, @var{hf}, @var{lf}, @var{K})
|
||||
|
||||
Implementation of the Baxter and King (1999) band pass filter for @dseries objects. This filter isolates business cycle fluctuations with a period of length ranging between @var{hf} (high frequency) to @var{lf} (low frequency) using a symmetric moving average smoother with @math{2K+1} points, so that K observations at the beginning and at the end of the sample are lost in the computation of the filter.
|
||||
Implementation of the Baxter and King (1999) band pass filter for @dseries objects. This filter isolates business cycle fluctuations with a period of length ranging between @var{hf} (high frequency) to @var{lf} (low frequency) using a symmetric moving average smoother with @math{2K+1} points, so that K observations at the beginning and at the end of the sample are lost in the computation of the filter. The default value for @var{hf} is @math{6}, for @var{lf} is @math{32}, and for @var{K} is 12.
|
||||
|
||||
@examplehead
|
||||
@example
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
function s = char(dd)
|
||||
|
||||
% Given a one element dates object, returns a string with the formatted date.
|
||||
%
|
||||
% INPUTS
|
||||
% o dd dates object with one element
|
||||
%
|
||||
% OUTPUTS
|
||||
% o s a string
|
||||
%
|
||||
% SPECIAL REQUIREMENTS
|
||||
% none
|
||||
|
||||
% Copyright (C) 2014 Dynare Team
|
||||
%
|
||||
% This file is part of Dynare.
|
||||
%
|
||||
% Dynare is free software: you can redistribute it and/or modify
|
||||
% it under the terms of the GNU General Public License as published by
|
||||
% the Free Software Foundation, either version 3 of the License, or
|
||||
% (at your option) any later version.
|
||||
%
|
||||
% Dynare is distributed in the hope that it will be useful,
|
||||
% but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
% GNU General Public License for more details.
|
||||
%
|
||||
% You should have received a copy of the GNU General Public License
|
||||
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
if length(dd)>1
|
||||
error('The input argument must be a singleton dates object!')
|
||||
end
|
||||
|
||||
s = date2string(dd.time, dd.freq);
|
|
@ -37,7 +37,7 @@ switch S(1).type
|
|||
error(['dates::subsref: ' S(1).subs ' is not a method but a member!'])
|
||||
end
|
||||
B = builtin('subsref', A, S(1));
|
||||
case {'sort','unique','double','isempty','length'}% Public methods (without arguments)
|
||||
case {'sort','unique','double','isempty','length','char'}% Public methods (without input arguments)
|
||||
B = feval(S(1).subs,A);
|
||||
if length(S)>1 && isequal(S(2).type,'()') && isempty(S(2).subs)
|
||||
S = shiftS(S,1);
|
||||
|
@ -49,6 +49,9 @@ switch S(1).type
|
|||
else
|
||||
error('dates::subsref: Something is wrong in your syntax!')
|
||||
end
|
||||
case {'disp'}
|
||||
feval(S(1).subs,A);
|
||||
return
|
||||
otherwise
|
||||
error('dates::subsref: Unknown public member or method!')
|
||||
end
|
||||
|
|
|
@ -157,6 +157,9 @@ switch S(1).type
|
|||
case {'set_names','rename','tex_rename'}
|
||||
B = feval(S(1).subs,A,S(2).subs{:});
|
||||
S = shiftS(S,1);
|
||||
case {'disp'}
|
||||
feval(S(1).subs,A);
|
||||
return
|
||||
otherwise % Extract a sub-object by selecting one variable.
|
||||
ndx = find(strcmp(S(1).subs,A.name));
|
||||
if ~isempty(ndx)
|
||||
|
|
|
@ -109,7 +109,11 @@ if options_.solve_algo == 0
|
|||
info = 1;
|
||||
end
|
||||
elseif options_.solve_algo == 1
|
||||
[x,info]=solve1(func,x,1:nn,1:nn,jacobian_flag,options_.gstep, ...
|
||||
[x,info]=solve1(func,x,1:nn,1:nn,jacobian_flag,options_.gstep, ...
|
||||
tolf,options_.solve_tolx, ...
|
||||
options_.steady.maxit,options_.debug,varargin{:});
|
||||
elseif options_.solve_algo == 9
|
||||
[x,info]=trust_region(func,x,1:nn,1:nn,jacobian_flag,options_.gstep, ...
|
||||
tolf,options_.solve_tolx, ...
|
||||
options_.steady.maxit,options_.debug,varargin{:});
|
||||
elseif options_.solve_algo == 2 || options_.solve_algo == 4
|
||||
|
@ -161,5 +165,5 @@ elseif options_.solve_algo == 3
|
|||
[x,info] = csolve(func,x,[],1e-6,500,varargin{:});
|
||||
end
|
||||
else
|
||||
error('DYNARE_SOLVE: option solve_algo must be one of [0,1,2,3,4]')
|
||||
error('DYNARE_SOLVE: option solve_algo must be one of [0,1,2,3,4,9]')
|
||||
end
|
||||
|
|
|
@ -0,0 +1,194 @@
|
|||
function [res,A,info] = ep_problem_2(y,x,pm)
|
||||
|
||||
info = 0;
|
||||
res = [];
|
||||
A = [];
|
||||
|
||||
dynamic_model = pm.dynamic_model;
|
||||
ny = pm.ny;
|
||||
params = pm.params;
|
||||
steady_state = pm.steady_state;
|
||||
order = pm.order;
|
||||
nodes = pm.nodes;
|
||||
nnodes = pm.nnodes;
|
||||
weights = pm.weights;
|
||||
h_correction = pm.h_correction;
|
||||
dimension = pm.dimension;
|
||||
world_nbr = pm.world_nbr;
|
||||
nnzA = pm.nnzA;
|
||||
periods = pm.periods;
|
||||
i_rows = pm.i_rows';
|
||||
i_cols = pm.i_cols;
|
||||
nyp = pm.nyp;
|
||||
nyf = pm.nyf;
|
||||
hybrid_order = pm.hybrid_order;
|
||||
i_cols_1 = pm.i_cols_1;
|
||||
i_cols_j = pm.i_cols_j;
|
||||
icA = pm.icA;
|
||||
i_cols_T = pm.i_cols_T;
|
||||
|
||||
i_cols_p = i_cols(1:nyp);
|
||||
i_cols_s = i_cols(nyp+(1:ny));
|
||||
i_cols_f = i_cols(nyp+ny+(1:nyf));
|
||||
i_cols_A = i_cols;
|
||||
i_cols_Ap0 = i_cols_p;
|
||||
i_cols_As = i_cols_s;
|
||||
i_cols_Af0 = i_cols_f - ny;
|
||||
i_hc = i_cols_f - 2*ny;
|
||||
|
||||
nzA = cell(periods,world_nbr);
|
||||
res = zeros(ny,periods,world_nbr);
|
||||
Y = zeros(ny*(periods+2),world_nbr);
|
||||
Y(1:ny,1) = pm.y0;
|
||||
Y(end-ny+1:end,:) = repmat(steady_state,1,world_nbr);
|
||||
Y(pm.i_upd_y) = y;
|
||||
offset_r0 = 0;
|
||||
for i = 1:order+1
|
||||
i_w_p = 1;
|
||||
for j = 1:(1+(nnodes-1)*(i-1))
|
||||
innovation = x;
|
||||
if i <= order && j == 1
|
||||
% first world, integrating future shocks
|
||||
if nargout > 1
|
||||
A1 = sparse([],[],[],i*ny,dimension,nnzA*world_nbr);
|
||||
end
|
||||
for k=1:nnodes
|
||||
if nargout > 1
|
||||
if i == 2
|
||||
i_cols_Ap = i_cols_Ap0;
|
||||
elseif i > 2
|
||||
i_cols_Ap = i_cols_Ap0 + ny*(i-2+(nnodes- ...
|
||||
1)*(i-2)*(i-3)/2);
|
||||
end
|
||||
if k == 1
|
||||
i_cols_Af = i_cols_Af0 + ny*(i-1+(nnodes-1)*i*(i-1)/ ...
|
||||
2);
|
||||
else
|
||||
i_cols_Af = i_cols_Af0 + ny*(i-1+(nnodes-1)*i*(i-1)/ ...
|
||||
2+(i-1)*(nnodes-1) ...
|
||||
+ k - 1);
|
||||
end
|
||||
end
|
||||
if i > 1
|
||||
innovation(i+1,:) = nodes(k,:);
|
||||
end
|
||||
if k == 1
|
||||
k1 = 1;
|
||||
else
|
||||
k1 = (nnodes-1)*(i-1)+k;
|
||||
end
|
||||
if hybrid_order == 2 && (k > 1 || i == order)
|
||||
z = [Y(i_cols_p,1);
|
||||
Y(i_cols_s,1);
|
||||
Y(i_cols_f,k1)+h_correction(i_hc)];
|
||||
else
|
||||
z = [Y(i_cols_p,1);
|
||||
Y(i_cols_s,1);
|
||||
Y(i_cols_f,k1)];
|
||||
end
|
||||
if nargout > 1
|
||||
[d1,jacobian] = dynamic_model(z,innovation,params,steady_state,i+1);
|
||||
if i == 1
|
||||
% in first period we don't keep track of
|
||||
% predetermined variables
|
||||
i_cols_A = [i_cols_As - ny; i_cols_Af];
|
||||
A1(i_rows,i_cols_A) = A1(i_rows,i_cols_A) + weights(k)*jacobian(:,i_cols_1);
|
||||
else
|
||||
i_cols_A = [i_cols_Ap; i_cols_As; i_cols_Af];
|
||||
A1(i_rows,i_cols_A) = A1(i_rows,i_cols_A) + weights(k)*jacobian(:,i_cols_j);
|
||||
end
|
||||
else
|
||||
d1 = dynamic_model(z,innovation,params,steady_state,i+1);
|
||||
end
|
||||
res(:,i,1) = res(:,i,1)+weights(k)*d1;
|
||||
end
|
||||
if nargout > 1
|
||||
[ir,ic,v] = find(A1);
|
||||
nzA{i,j} = [ir,ic,v]';
|
||||
end
|
||||
elseif j > 1 + (nnodes-1)*(i-2)
|
||||
% new world, using previous state of world 1 and hit
|
||||
% by shocks from nodes
|
||||
if nargout > 1
|
||||
i_cols_Ap = i_cols_Ap0 + ny*(i-2+(nnodes-1)*(i-2)*(i-3)/2);
|
||||
i_cols_Af = i_cols_Af0 + ny*(i+(nnodes-1)*i*(i-1)/2+j-2);
|
||||
end
|
||||
k = j - (nnodes-1)*(i-2);
|
||||
innovation(i+1,:) = nodes(k,:);
|
||||
z = [Y(i_cols_p,1);
|
||||
Y(i_cols_s,j);
|
||||
Y(i_cols_f,j)];
|
||||
if nargout > 1
|
||||
[d1,jacobian] = dynamic_model(z,innovation,params,steady_state,i+1);
|
||||
i_cols_A = [i_cols_Ap; i_cols_As; i_cols_Af];
|
||||
[ir,ic,v] = find(jacobian(:,i_cols_j));
|
||||
nzA{i,j} = [i_rows(ir),i_cols_A(ic), v]';
|
||||
else
|
||||
d1 = dynamic_model(z,innovation,params,steady_state,i+1);
|
||||
end
|
||||
res(:,i,j) = d1;
|
||||
if nargout > 1
|
||||
i_cols_Af = i_cols_Af + ny;
|
||||
end
|
||||
else
|
||||
% existing worlds other than 1
|
||||
if nargout > 1
|
||||
i_cols_Ap = i_cols_Ap0 + ny*(i-2+(nnodes-1)*(i-2)*(i-3)/2+j-1);
|
||||
i_cols_Af = i_cols_Af0 + ny*(i+(nnodes-1)*i*(i-1)/2+j-2);
|
||||
end
|
||||
z = [Y(i_cols_p,j);
|
||||
Y(i_cols_s,j);
|
||||
Y(i_cols_f,j)];
|
||||
if nargout > 1
|
||||
[d1,jacobian] = dynamic_model(z,innovation,params,steady_state,i+1);
|
||||
i_cols_A = [i_cols_Ap; i_cols_As; i_cols_Af];
|
||||
[ir,ic,v] = find(jacobian(:,i_cols_j));
|
||||
nzA{i,j} = [i_rows(ir),i_cols_A(ic),v]';
|
||||
i_cols_Af = i_cols_Af + ny;
|
||||
else
|
||||
d1 = dynamic_model(z,innovation,params,steady_state,i+1);
|
||||
end
|
||||
res(:,i,j) = d1;
|
||||
end
|
||||
i_rows = i_rows + ny;
|
||||
if mod(j,nnodes) == 0
|
||||
i_w_p = i_w_p + 1;
|
||||
end
|
||||
if nargout > 1 && i > 1
|
||||
i_cols_As = i_cols_As + ny;
|
||||
end
|
||||
offset_r0 = offset_r0 + ny;
|
||||
end
|
||||
i_cols_p = i_cols_p + ny;
|
||||
i_cols_s = i_cols_s + ny;
|
||||
i_cols_f = i_cols_f + ny;
|
||||
end
|
||||
for j=1:world_nbr
|
||||
i_rows_y = i_cols+(order+1)*ny;
|
||||
offset_c = ny*(order+(nnodes-1)*(order-1)*order/2+j-1);
|
||||
offset_r = offset_r0+(j-1)*ny;
|
||||
for i=order+2:periods
|
||||
if nargout > 1
|
||||
[d1,jacobian] = dynamic_model(Y(i_rows_y,j),x,params, ...
|
||||
steady_state,i+1);
|
||||
if i < periods
|
||||
[ir,ic,v] = find(jacobian(:,i_cols_j));
|
||||
else
|
||||
[ir,ic,v] = find(jacobian(:,i_cols_T));
|
||||
end
|
||||
nzA{i,j} = [offset_r+ir,offset_c+icA(ic), v]';
|
||||
else
|
||||
d1 = dynamic_model(Y(i_rows_y,j),x,params, ...
|
||||
steady_state,i+1);
|
||||
end
|
||||
res(:,i,j) = d1;
|
||||
i_rows_y = i_rows_y + ny;
|
||||
offset_c = offset_c + world_nbr*ny;
|
||||
offset_r = offset_r + world_nbr*ny;
|
||||
end
|
||||
end
|
||||
if nargout > 1
|
||||
iA = [nzA{:}]';
|
||||
A = sparse(iA(:,1),iA(:,2),iA(:,3),dimension,dimension);
|
||||
end
|
||||
res = res(pm.i_upd_r);
|
|
@ -35,8 +35,11 @@ global M_ options_ oo_
|
|||
options_.verbosity = options_.ep.verbosity;
|
||||
verbosity = options_.ep.verbosity+options_.ep.debug;
|
||||
|
||||
% Set maximum number of iterations for the deterministic solver.
|
||||
options_.simul.maxit = options_.ep.maxit;
|
||||
|
||||
% Prepare a structure needed by the matlab implementation of the perfect foresight model solver
|
||||
pfm = setup_stochastic_perfect_foresight_model_solver(M_,options_,oo_,'Tensor-Gaussian-Quadrature');
|
||||
pfm = setup_stochastic_perfect_foresight_model_solver(M_,options_,oo_);
|
||||
|
||||
exo_nbr = M_.exo_nbr;
|
||||
periods = options_.periods;
|
||||
|
@ -53,12 +56,10 @@ if isempty(initial_conditions)
|
|||
end
|
||||
end
|
||||
|
||||
% Set maximum number of iterations for the deterministic solver.
|
||||
options_.simul.maxit = options_.ep.maxit;
|
||||
|
||||
% Set the number of periods for the perfect foresight model
|
||||
periods = options_.ep.periods;
|
||||
pfm.periods = options_.ep.periods;
|
||||
pfm.periods = periods;
|
||||
pfm.i_upd = pfm.ny+(1:pfm.periods*pfm.ny);
|
||||
|
||||
% keep a copy of pfm.i_upd
|
||||
|
@ -138,6 +139,17 @@ else
|
|||
pfm.dr = [];
|
||||
end
|
||||
|
||||
% number of nonzero derivatives
|
||||
pfm.nnzA = M_.NNZDerivatives(1);
|
||||
|
||||
% setting up integration nodes if order > 0
|
||||
if options_.ep.stochastic.order > 0
|
||||
[nodes,weights,nnodes] = setup_integration_nodes(options_.ep,pfm);
|
||||
pfm.nodes = nodes;
|
||||
pfm.weights = weights;
|
||||
pfm.nnodes = nnodes;
|
||||
end
|
||||
|
||||
% Main loop.
|
||||
while (t<sample_size)
|
||||
if ~mod(t,10)
|
||||
|
@ -168,6 +180,9 @@ while (t<sample_size)
|
|||
increase_periods = 0;
|
||||
% Keep a copy of endo_simul_1
|
||||
endo_simul = endo_simul_1;
|
||||
if verbosity
|
||||
save ep_test_1 endo_simul_1 exo_simul_1
|
||||
end
|
||||
while 1
|
||||
if ~increase_periods
|
||||
if bytecode_flag && ~options_.ep.stochastic.order
|
||||
|
@ -185,7 +200,7 @@ while (t<sample_size)
|
|||
solve_stochastic_perfect_foresight_model(endo_simul_1,exo_simul_1,pfm1,options_.ep.stochastic.quadrature.nodes,options_.ep.stochastic.order);
|
||||
case 1
|
||||
[flag,tmp] = ...
|
||||
solve_stochastic_perfect_foresight_model_1(endo_simul_1,exo_simul_1,pfm1,options_.ep.stochastic.quadrature.nodes,options_.ep.stochastic.order);
|
||||
solve_stochastic_perfect_foresight_model_1(endo_simul_1,exo_simul_1,options_,pfm1,options_.ep.stochastic.order);
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -263,7 +278,14 @@ while (t<sample_size)
|
|||
if options_.ep.stochastic.order == 0
|
||||
[flag,tmp,err] = solve_perfect_foresight_model(endo_simul_1,exo_simul_1,pfm1);
|
||||
else
|
||||
[flag,tmp] = solve_stochastic_perfect_foresight_model(endo_simul_1,exo_simul_1,pfm1,options_.ep.stochastic.nodes,options_.ep.stochastic.order);
|
||||
switch(options_.ep.stochastic.algo)
|
||||
case 0
|
||||
[flag,tmp] = ...
|
||||
solve_stochastic_perfect_foresight_model(endo_simul_1,exo_simul_1,pfm1,options_.ep.stochastic.quadrature.nodes,options_.ep.stochastic.order);
|
||||
case 1
|
||||
[flag,tmp] = ...
|
||||
solve_stochastic_perfect_foresight_model_1(endo_simul_1,exo_simul_1,options_,pfm1,options_.ep.stochastic.order);
|
||||
end
|
||||
end
|
||||
end
|
||||
info_convergence = ~flag;
|
||||
|
|
|
@ -33,12 +33,13 @@ exxo_simul = exo_simul0;
|
|||
initial_step_length = step_length;
|
||||
max_iter = 1000/step_length;
|
||||
weight = initial_weight;
|
||||
verbose = options_.ep.debug;
|
||||
verbose = options_.ep.verbosity;
|
||||
|
||||
reduce_step_flag = 0;
|
||||
|
||||
if verbose
|
||||
format long
|
||||
disp('Entering homotopic_steps')
|
||||
end
|
||||
|
||||
% (re)Set iter.
|
||||
|
@ -59,6 +60,9 @@ while weight<1
|
|||
else
|
||||
flag = 1;
|
||||
end
|
||||
if options_.debug
|
||||
save ep-test3 weight exo_simul0 endo_simul0
|
||||
end
|
||||
if flag
|
||||
if ~options_.ep.stochastic.order
|
||||
[flag,tmp,err] = solve_perfect_foresight_model(endo_simul0,exo_simul0,pfm);
|
||||
|
@ -69,7 +73,8 @@ while weight<1
|
|||
solve_stochastic_perfect_foresight_model(endo_simul0,exo_simul0,pfm,options_.ep.stochastic.quadrature.nodes,options_.ep.stochastic.order);
|
||||
case 1
|
||||
[flag,tmp] = ...
|
||||
solve_stochastic_perfect_foresight_model_1(endo_simul0,exo_simul0,options_.ep,pfm,options_.ep.stochastic.order);
|
||||
solve_stochastic_perfect_foresight_model_1(endo_simul0,exxo_simul,options_,pfm,options_.ep.stochastic.order,weight);
|
||||
% solve_stochastic_perfect_foresight_model_1(endo_simul0,exo_simul0,options_.ep,pfm,options_.ep.stochastic.order);
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -82,9 +87,10 @@ while weight<1
|
|||
end
|
||||
end
|
||||
if info.convergence
|
||||
%if d<stochastic_extended_path_depth
|
||||
endo_simul0 = tmp;
|
||||
%end
|
||||
if options_.debug
|
||||
save ep-test2 weight exo_simul0 endo_simul0
|
||||
end
|
||||
endo_simul0 = tmp;
|
||||
jter = jter + 1;
|
||||
if jter>3
|
||||
if verbose
|
||||
|
@ -108,7 +114,6 @@ while weight<1
|
|||
endo_simul0 = endo_simul;
|
||||
exo_simul0 = exxo_simul;
|
||||
info.convergence = 0;
|
||||
info.depth = d;
|
||||
tmp = [];
|
||||
return
|
||||
end
|
||||
|
@ -153,7 +158,8 @@ if weight<1
|
|||
solve_stochastic_perfect_foresight_model(endo_simul0,exo_simul0,pfm,options_.ep.stochastic.quadrature.nodes,options_.ep.stochastic.order);
|
||||
case 1
|
||||
[flag,tmp] = ...
|
||||
solve_stochastic_perfect_foresight_model_1(endo_simul0,exo_simul0,options_.ep,pfm,options_.ep.stochastic.order);
|
||||
solve_stochastic_perfect_foresight_model_1(endo_simul0,exxo_simul,options_,pfm,options_.ep.stochastic.order,weight);
|
||||
% solve_stochastic_perfect_foresight_model_1(endo_simul0,exo_simul0,options_.ep,pfm,options_.ep.stochastic.order);
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -166,7 +172,6 @@ if weight<1
|
|||
endo_simul0 = endo_simul;
|
||||
exo_simul0 = exxo_simul;
|
||||
info.convergence = 0;
|
||||
info.depth = d;
|
||||
tmp = [];
|
||||
return
|
||||
else
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
function [nodes,weights,nnodes] = setup_integration_nodes(EpOptions,pfm)
|
||||
if EpOptions.stochastic.order
|
||||
% Compute weights and nodes for the stochastic version of the extended path.
|
||||
switch EpOptions.IntegrationAlgorithm
|
||||
case 'Tensor-Gaussian-Quadrature'
|
||||
% Get the nodes and weights from a univariate Gauss-Hermite quadrature.
|
||||
[nodes0,weights0] = gauss_hermite_weights_and_nodes(EpOptions.stochastic.quadrature.nodes);
|
||||
% Replicate the univariate nodes for each innovation and dates, and, if needed, correlate them.
|
||||
nodes0 = repmat(nodes0,1,pfm.number_of_shocks*pfm.stochastic_order)*kron(eye(pfm.stochastic_order),pfm.Omega);
|
||||
% Put the nodes and weights in cells
|
||||
for i=1:pfm.number_of_shocks
|
||||
rr(i) = {nodes0(:,i)};
|
||||
ww(i) = {weights0};
|
||||
end
|
||||
% Build the tensorial grid
|
||||
nodes = cartesian_product_of_sets(rr{:});
|
||||
weights = prod(cartesian_product_of_sets(ww{:}),2);
|
||||
nnodes = length(weights);
|
||||
case 'Stroud-Cubature-3'
|
||||
[nodes,weights] = cubature_with_gaussian_weight(pfm.number_of_shocks*pfm.stochastic_order,3,'Stroud')
|
||||
nodes = kron(eye(pfm.stochastic_order),transpose(pfm.Omega))*nodes;
|
||||
weights = weights;
|
||||
nnodes = length(weights);
|
||||
case 'Stroud-Cubature-5'
|
||||
[nodes,weights] = cubature_with_gaussian_weight(pfm.number_of_shocks*pfm.stochastic_order,5,'Stroud')
|
||||
nodes = kron(eye(pfm.stochastic_order),transpose(pfm.Omega))*nodes;
|
||||
weights = weights;
|
||||
nnodes = length(weights);
|
||||
case 'UT_2p+1'
|
||||
p = pfm.number_of_shocks;
|
||||
k = EpOptions.ut.k;
|
||||
C = sqrt(pfm.number_of_shocks + k)*pfm.Omega';
|
||||
nodes = [zeros(1,p); -C; C];
|
||||
weights = [k/(p+k); (1/(2*(p+k)))*ones(2*p,1)];
|
||||
nnodes = 2*p+1;
|
||||
otherwise
|
||||
error('Stochastic extended path:: Unknown integration algorithm!')
|
||||
end
|
||||
end
|
|
@ -1,4 +1,4 @@
|
|||
function pfm = setup_stochastic_perfect_foresight_model_solver(DynareModel,DynareOptions,DynareOutput,IntegrationMethod)
|
||||
function pfm = setup_stochastic_perfect_foresight_model_solver(DynareModel,DynareOptions,DynareOutput)
|
||||
|
||||
% Copyright (C) 2013 Dynare Team
|
||||
%
|
||||
|
@ -69,34 +69,3 @@ pfm.verbose = DynareOptions.ep.verbosity;
|
|||
pfm.maxit_ = DynareOptions.simul.maxit;
|
||||
pfm.tolerance = DynareOptions.dynatol.f;
|
||||
|
||||
if nargin>3 && DynareOptions.ep.stochastic.order
|
||||
% Compute weights and nodes for the stochastic version of the extended path.
|
||||
switch IntegrationMethod
|
||||
case 'Tensor-Gaussian-Quadrature'
|
||||
% Get the nodes and weights from a univariate Gauss-Hermite quadrature.
|
||||
[nodes,weights] = gauss_hermite_weights_and_nodes(DynareOptions.ep.stochastic.quadrature.nodes);
|
||||
% Replicate the univariate nodes for each innovation and dates, and, if needed, correlate them.
|
||||
nodes = repmat(nodes,1,pfm.number_of_shocks*pfm.stochastic_order)*kron(eye(pfm.stochastic_order),pfm.Omega);
|
||||
% Put the nodes and weights in cells
|
||||
for i=1:pfm.number_of_shocks
|
||||
rr(i) = {nodes(:,i)};
|
||||
ww(i) = {weights};
|
||||
end
|
||||
% Build the tensorial grid
|
||||
pfm.nodes = cartesian_product_of_sets(rr{:});
|
||||
pfm.weights = prod(cartesian_product_of_sets(ww{:}),2);
|
||||
pfm.nnodes = length(pfm.weights);
|
||||
case 'Stroud-Cubature-3'
|
||||
[nodes,weights] = cubature_with_gaussian_weight(pfm.number_of_shocks*pfm.stochastic_order,3,'Stroud')
|
||||
pfm.nodes = kron(eye(pfm.stochastic_order),transpose(Omega))*nodes;
|
||||
pfm.weights = weights;
|
||||
pfm.nnodes = length(pfm.weights);
|
||||
case 'Stroud-Cubature-5'
|
||||
[nodes,weights] = cubature_with_gaussian_weight(pfm.number_of_shocks*pfm.stochastic_order,5,'Stroud')
|
||||
pfm.nodes = kron(eye(pfm.stochastic_order),transpose(Omega))*nodes;
|
||||
pfm.weights = weights;
|
||||
pfm.nnodes = length(weights);
|
||||
otherwise
|
||||
error('setup_stochastic_perfect_foresight_model_solver:: Unknown integration algorithm!')
|
||||
end
|
||||
end
|
|
@ -1,4 +1,4 @@
|
|||
function [flag,endo_simul,err] = solve_stochastic_perfect_foresight_model_1(endo_simul,exo_simul,pfm,nnodes,order)
|
||||
function [flag,endo_simul,err] = solve_stochastic_perfect_foresight_model_1(endo_simul,exo_simul,Options,pfm,order,varargin)
|
||||
|
||||
% Copyright (C) 2012-2013 Dynare Team
|
||||
%
|
||||
|
@ -17,269 +17,139 @@ function [flag,endo_simul,err] = solve_stochastic_perfect_foresight_model_1(endo
|
|||
% You should have received a copy of the GNU General Public License
|
||||
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
flag = 0;
|
||||
err = 0;
|
||||
stop = 0;
|
||||
global options_
|
||||
|
||||
params = pfm.params;
|
||||
steady_state = pfm.steady_state;
|
||||
ny = pfm.ny;
|
||||
periods = pfm.periods;
|
||||
dynamic_model = pfm.dynamic_model;
|
||||
lead_lag_incidence = pfm.lead_lag_incidence;
|
||||
nyp = pfm.nyp;
|
||||
nyf = pfm.nyf;
|
||||
i_cols_1 = pfm.i_cols_1;
|
||||
i_cols_A1 = pfm.i_cols_A1;
|
||||
i_cols_j = pfm.i_cols_j;
|
||||
i_cols_T = nonzeros(lead_lag_incidence(1:2,:)');
|
||||
hybrid_order = pfm.hybrid_order;
|
||||
dr = pfm.dr;
|
||||
|
||||
maxit = pfm.maxit_;
|
||||
tolerance = pfm.tolerance;
|
||||
verbose = pfm.verbose;
|
||||
if nargin < 6
|
||||
homotopy_parameter = 1;
|
||||
else
|
||||
homotopy_parameter = varargin{1};
|
||||
end
|
||||
flag = 0;
|
||||
err = 0;
|
||||
stop = 0;
|
||||
|
||||
number_of_shocks = size(exo_simul,2);
|
||||
EpOptions = Options.ep;
|
||||
|
||||
[nodes,weights] = gauss_hermite_weights_and_nodes(nnodes);
|
||||
|
||||
% make sure that there is a node equal to zero
|
||||
% and permute nodes and weights to have zero first
|
||||
k = find(abs(nodes) < 1e-12);
|
||||
if ~isempty(k)
|
||||
nodes = [nodes(k); nodes(1:k-1); nodes(k+1:end)];
|
||||
weights = [weights(k); weights(1:k-1); weights(k+1:end)];
|
||||
else
|
||||
error('there is no nodes equal to zero')
|
||||
end
|
||||
|
||||
if number_of_shocks>1
|
||||
nodes = repmat(nodes,1,number_of_shocks)*chol(pfm.Sigma);
|
||||
% to be fixed for Sigma ~= I
|
||||
for i=1:number_of_shocks
|
||||
rr(i) = {nodes(:,i)};
|
||||
ww(i) = {weights};
|
||||
end
|
||||
nodes = cartesian_product_of_sets(rr{:});
|
||||
weights = prod(cartesian_product_of_sets(ww{:}),2);
|
||||
nnodes = nnodes^number_of_shocks;
|
||||
else
|
||||
nodes = nodes*sqrt(pfm.Sigma);
|
||||
end
|
||||
|
||||
if hybrid_order > 0
|
||||
if hybrid_order == 2
|
||||
h_correction = 0.5*dr.ghs2(dr.inv_order_var);
|
||||
end
|
||||
params = pfm.params;
|
||||
steady_state = pfm.steady_state;
|
||||
ny = pfm.ny;
|
||||
periods = pfm.periods;
|
||||
dynamic_model = pfm.dynamic_model;
|
||||
lead_lag_incidence = pfm.lead_lag_incidence;
|
||||
nyp = pfm.nyp;
|
||||
nyf = pfm.nyf;
|
||||
i_cols_1 = pfm.i_cols_1;
|
||||
i_cols_A1 = pfm.i_cols_A1;
|
||||
i_cols_j = pfm.i_cols_j;
|
||||
i_cols_T = nonzeros(lead_lag_incidence(1:2,:)');
|
||||
hybrid_order = pfm.hybrid_order;
|
||||
dr = pfm.dr;
|
||||
nodes = pfm.nodes;
|
||||
weights = pfm.weights;
|
||||
nnodes = pfm.nnodes;
|
||||
|
||||
maxit = pfm.maxit_;
|
||||
tolerance = pfm.tolerance;
|
||||
verbose = pfm.verbose;
|
||||
|
||||
number_of_shocks = size(exo_simul,2);
|
||||
|
||||
% make sure that there is a node equal to zero
|
||||
% and permute nodes and weights to have zero first
|
||||
k = find(sum(abs(nodes),2) < 1e-12);
|
||||
if ~isempty(k)
|
||||
nodes = [nodes(k,:); nodes(1:k-1,:); nodes(k+1:end,:)];
|
||||
weights = [weights(k); weights(1:k-1); weights(k+1:end)];
|
||||
else
|
||||
error('there is no nodes equal to zero')
|
||||
end
|
||||
|
||||
if hybrid_order > 0
|
||||
if hybrid_order == 2
|
||||
h_correction = 0.5*dr.ghs2(dr.inv_order_var);
|
||||
end
|
||||
else
|
||||
h_correction = 0;
|
||||
end
|
||||
|
||||
if verbose
|
||||
disp ([' -----------------------------------------------------']);
|
||||
disp (['MODEL SIMULATION :']);
|
||||
fprintf('\n');
|
||||
end
|
||||
if verbose
|
||||
disp ([' -----------------------------------------------------']);
|
||||
disp (['MODEL SIMULATION :']);
|
||||
fprintf('\n');
|
||||
end
|
||||
|
||||
z = endo_simul(find(lead_lag_incidence'));
|
||||
[d1,jacobian] = dynamic_model(z,exo_simul,params,steady_state,2);
|
||||
% Each column of Y represents a different world
|
||||
% The upper right cells are unused
|
||||
% The first row block is ny x 1
|
||||
% The second row block is ny x nnodes
|
||||
% The third row block is ny x nnodes^2
|
||||
% and so on until size ny x nnodes^order
|
||||
world_nbr = 1+(nnodes-1)*order;
|
||||
Y = endo_simul(:,2:end-1);
|
||||
Y = repmat(Y,1,world_nbr);
|
||||
pfm.y0 = endo_simul(:,1);
|
||||
|
||||
% Each column of Y represents a different world
|
||||
% The upper right cells are unused
|
||||
% The first row block is ny x 1
|
||||
% The second row block is ny x nnodes
|
||||
% The third row block is ny x nnodes^2
|
||||
% and so on until size ny x nnodes^order
|
||||
world_nbr = 1+(nnodes-1)*order;
|
||||
Y = repmat(endo_simul(:),1,world_nbr);
|
||||
% The columns of A map the elements of Y such that
|
||||
% each block of Y with ny rows are unfolded column wise
|
||||
% number of blocks
|
||||
block_nbr = (order+(nnodes-1)*(order-1)*order/2+(periods-order)*world_nbr);
|
||||
% dimension of the problem
|
||||
dimension = ny*block_nbr;
|
||||
pfm.block_nbr = block_nbr;
|
||||
pfm.dimension = dimension;
|
||||
if order == 0
|
||||
i_upd_r = (1:ny*periods);
|
||||
i_upd_y = i_upd_r + ny;
|
||||
else
|
||||
i_upd_r = zeros(dimension,1);
|
||||
i_upd_y = i_upd_r;
|
||||
i_upd_r(1:ny) = (1:ny);
|
||||
i_upd_y(1:ny) = ny+(1:ny);
|
||||
i1 = ny+1;
|
||||
i2 = 2*ny;
|
||||
n1 = ny+1;
|
||||
n2 = 2*ny;
|
||||
for i=2:periods
|
||||
k = n1:n2;
|
||||
for j=1:(1+(nnodes-1)*min(i-1,order))
|
||||
i_upd_r(i1:i2) = k+(j-1)*ny*periods;
|
||||
i_upd_y(i1:i2) = k+ny+(j-1)*ny*(periods+2);
|
||||
i1 = i2+1;
|
||||
i2 = i2+ny;
|
||||
end
|
||||
n1 = n2+1;
|
||||
n2 = n2+ny;
|
||||
end
|
||||
end
|
||||
icA = [find(lead_lag_incidence(1,:)) find(lead_lag_incidence(2,:))+world_nbr*ny ...
|
||||
find(lead_lag_incidence(3,:))+2*world_nbr*ny]';
|
||||
h1 = clock;
|
||||
pfm.order = order;
|
||||
pfm.world_nbr = world_nbr;
|
||||
pfm.nodes = nodes;
|
||||
pfm.nnodes = nnodes;
|
||||
pfm.weights = weights;
|
||||
pfm.h_correction = h_correction;
|
||||
pfm.i_rows = 1:ny;
|
||||
i_cols = find(lead_lag_incidence');
|
||||
pfm.i_cols = i_cols;
|
||||
pfm.nyp = nyp;
|
||||
pfm.nyf = nyf;
|
||||
pfm.hybrid_order = hybrid_order;
|
||||
pfm.i_cols_1 = i_cols_1;
|
||||
pfm.i_cols_h = i_cols_j;
|
||||
pfm.icA = icA;
|
||||
pfm.i_cols_T = i_cols_T;
|
||||
pfm.i_upd_r = i_upd_r;
|
||||
pfm.i_upd_y = i_upd_y;
|
||||
|
||||
% The columns of A map the elements of Y such that
|
||||
% each block of Y with ny rows are unfolded column wise
|
||||
dimension = ny*(order+(nnodes-1)*(order-1)*order/2+(periods-order)*world_nbr);
|
||||
if order == 0
|
||||
i_upd_r = (1:ny*periods);
|
||||
i_upd_y = i_upd_r + ny;
|
||||
else
|
||||
i_upd_r = zeros(dimension,1);
|
||||
i_upd_y = i_upd_r;
|
||||
i_upd_r(1:ny) = (1:ny);
|
||||
i_upd_y(1:ny) = ny+(1:ny);
|
||||
i1 = ny+1;
|
||||
i2 = 2*ny;
|
||||
n1 = ny+1;
|
||||
n2 = 2*ny;
|
||||
for i=2:periods
|
||||
k = n1:n2;
|
||||
for j=1:(1+(nnodes-1)*min(i-1,order))
|
||||
i_upd_r(i1:i2) = (n1:n2)+(j-1)*ny*periods;
|
||||
i_upd_y(i1:i2) = (n1:n2)+ny+(j-1)*ny*(periods+2);
|
||||
i1 = i2+1;
|
||||
i2 = i2+ny;
|
||||
end
|
||||
n1 = n2+1;
|
||||
n2 = n2+ny;
|
||||
end
|
||||
end
|
||||
icA = [find(lead_lag_incidence(1,:)) find(lead_lag_incidence(2,:))+world_nbr*ny ...
|
||||
find(lead_lag_incidence(3,:))+2*world_nbr*ny]';
|
||||
h1 = clock;
|
||||
for iter = 1:maxit
|
||||
h2 = clock;
|
||||
A1 = sparse([],[],[],ny*(order+(nnodes-1)*(order-1)*order/2),dimension,(order+1)*world_nbr*nnz(jacobian));
|
||||
res = zeros(ny,periods,world_nbr);
|
||||
i_rows = 1:ny;
|
||||
i_cols = find(lead_lag_incidence');
|
||||
i_cols_p = i_cols(1:nyp);
|
||||
i_cols_s = i_cols(nyp+(1:ny));
|
||||
i_cols_f = i_cols(nyp+ny+(1:nyf));
|
||||
i_cols_A = i_cols;
|
||||
i_cols_Ap0 = i_cols_p;
|
||||
i_cols_As = i_cols_s;
|
||||
i_cols_Af0 = i_cols_f - ny;
|
||||
i_hc = i_cols_f - 2*ny;
|
||||
for i = 1:order+1
|
||||
i_w_p = 1;
|
||||
for j = 1:(1+(nnodes-1)*(i-1))
|
||||
innovation = exo_simul;
|
||||
if i <= order && j == 1
|
||||
% first world, integrating future shocks
|
||||
for k=1:nnodes
|
||||
if i == 2
|
||||
i_cols_Ap = i_cols_Ap0;
|
||||
elseif i > 2
|
||||
i_cols_Ap = i_cols_Ap0 + ny*(i-2+(nnodes- ...
|
||||
1)*(i-2)*(i-3)/2);
|
||||
end
|
||||
if k == 1
|
||||
i_cols_Af = i_cols_Af0 + ny*(i-1+(nnodes-1)*i*(i-1)/ ...
|
||||
2);
|
||||
else
|
||||
i_cols_Af = i_cols_Af0 + ny*(i-1+(nnodes-1)*i*(i-1)/ ...
|
||||
2+(i-1)*(nnodes-1) ...
|
||||
+ k - 1);
|
||||
end
|
||||
if i > 1
|
||||
innovation(i+1,:) = nodes(k,:);
|
||||
end
|
||||
if k == 1
|
||||
k1 = 1;
|
||||
else
|
||||
k1 = (nnodes-1)*(i-1)+k;
|
||||
end
|
||||
if hybrid_order == 2 && (k > 1 || i == order)
|
||||
y = [Y(i_cols_p,1);
|
||||
Y(i_cols_s,1);
|
||||
Y(i_cols_f,k1)+h_correction(i_hc)];
|
||||
else
|
||||
y = [Y(i_cols_p,1);
|
||||
Y(i_cols_s,1);
|
||||
Y(i_cols_f,k1)];
|
||||
end
|
||||
[d1,jacobian] = dynamic_model(y,innovation,params,steady_state,i+1);
|
||||
if i == 1
|
||||
% in first period we don't keep track of
|
||||
% predetermined variables
|
||||
i_cols_A = [i_cols_As - ny; i_cols_Af];
|
||||
A1(i_rows,i_cols_A) = A1(i_rows,i_cols_A) + weights(k)*jacobian(:,i_cols_1);
|
||||
else
|
||||
i_cols_A = [i_cols_Ap; i_cols_As; i_cols_Af];
|
||||
A1(i_rows,i_cols_A) = A1(i_rows,i_cols_A) + weights(k)*jacobian(:,i_cols_j);
|
||||
end
|
||||
res(:,i,1) = res(:,i,1)+weights(k)*d1;
|
||||
end
|
||||
elseif j > 1 + (nnodes-1)*(i-2)
|
||||
% new world, using previous state of world 1 and hit
|
||||
% by shocks from nodes
|
||||
i_cols_Ap = i_cols_Ap0 + ny*(i-2+(nnodes-1)*(i-2)*(i-3)/2);
|
||||
i_cols_Af = i_cols_Af0 + ny*(i+(nnodes-1)*i*(i-1)/2+j-2);
|
||||
k = j - (nnodes-1)*(i-2);
|
||||
innovation(i+1,:) = nodes(k,:);
|
||||
y = [Y(i_cols_p,1);
|
||||
Y(i_cols_s,j);
|
||||
Y(i_cols_f,j)];
|
||||
[d1,jacobian] = dynamic_model(y,innovation,params,steady_state,i+1);
|
||||
i_cols_A = [i_cols_Ap; i_cols_As; i_cols_Af];
|
||||
A1(i_rows,i_cols_A) = jacobian(:,i_cols_j);
|
||||
res(:,i,j) = d1;
|
||||
i_cols_Af = i_cols_Af + ny;
|
||||
else
|
||||
% existing worlds other than 1
|
||||
i_cols_Ap = i_cols_Ap0 + ny*(i-2+(nnodes-1)*(i-2)*(i-3)/2+j-1);
|
||||
i_cols_Af = i_cols_Af0 + ny*(i+(nnodes-1)*i*(i-1)/2+j-2);
|
||||
y = [Y(i_cols_p,j);
|
||||
Y(i_cols_s,j);
|
||||
Y(i_cols_f,j)];
|
||||
[d1,jacobian] = dynamic_model(y,innovation,params,steady_state,i+1);
|
||||
i_cols_A = [i_cols_Ap; i_cols_As; i_cols_Af];
|
||||
A1(i_rows,i_cols_A) = jacobian(:,i_cols_j);
|
||||
res(:,i,j) = d1;
|
||||
i_cols_Af = i_cols_Af + ny;
|
||||
end
|
||||
i_rows = i_rows + ny;
|
||||
if mod(j,nnodes) == 0
|
||||
i_w_p = i_w_p + 1;
|
||||
end
|
||||
if i > 1
|
||||
i_cols_As = i_cols_As + ny;
|
||||
end
|
||||
end
|
||||
i_cols_p = i_cols_p + ny;
|
||||
i_cols_s = i_cols_s + ny;
|
||||
i_cols_f = i_cols_f + ny;
|
||||
end
|
||||
nzA = cell(periods,world_nbr);
|
||||
parfor j=1:world_nbr
|
||||
i_rows_y = find(lead_lag_incidence')+(order+1)*ny;
|
||||
offset_c = ny*(order+(nnodes-1)*(order-1)*order/2+j-1);
|
||||
offset_r = (j-1)*ny;
|
||||
for i=order+2:periods
|
||||
[d1,jacobian] = dynamic_model(Y(i_rows_y,j), ...
|
||||
exo_simul,params, ...
|
||||
steady_state,i+1);
|
||||
if i == periods
|
||||
[ir,ic,v] = find(jacobian(:,i_cols_T));
|
||||
else
|
||||
[ir,ic,v] = find(jacobian(:,i_cols_j));
|
||||
end
|
||||
nzA{i,j} = [offset_r+ir,offset_c+icA(ic), v]';
|
||||
res(:,i,j) = d1;
|
||||
i_rows_y = i_rows_y + ny;
|
||||
offset_c = offset_c + world_nbr*ny;
|
||||
offset_r = offset_r + world_nbr*ny;
|
||||
end
|
||||
end
|
||||
err = max(abs(res(i_upd_r)));
|
||||
if err < tolerance
|
||||
stop = 1;
|
||||
if verbose
|
||||
fprintf('\n') ;
|
||||
disp([' Total time of simulation :' num2str(etime(clock,h1))]) ;
|
||||
fprintf('\n') ;
|
||||
disp([' Convergency obtained.']) ;
|
||||
fprintf('\n') ;
|
||||
end
|
||||
flag = 0;% Convergency obtained.
|
||||
endo_simul = reshape(Y(:,1),ny,periods+2);%Y(ny+(1:ny),1);
|
||||
% figure;plot(Y(16:ny:(periods+2)*ny,:))
|
||||
% pause
|
||||
break
|
||||
end
|
||||
A2 = [nzA{:}]';
|
||||
A = [A1; sparse(A2(:,1),A2(:,2),A2(:,3),ny*(periods-order-1)*world_nbr,dimension)];
|
||||
dy = -A\res(i_upd_r);
|
||||
Y(i_upd_y) = Y(i_upd_y) + dy;
|
||||
end
|
||||
|
||||
if ~stop
|
||||
if verbose
|
||||
fprintf('\n') ;
|
||||
disp([' Total time of simulation :' num2str(etime(clock,h1))]) ;
|
||||
fprintf('\n') ;
|
||||
disp(['WARNING : maximum number of iterations is reached (modify options_.simul.maxit).']) ;
|
||||
fprintf('\n') ;
|
||||
end
|
||||
flag = 1;% more iterations are needed.
|
||||
endo_simul = 1;
|
||||
end
|
||||
if verbose
|
||||
disp (['-----------------------------------------------------']) ;
|
||||
end
|
||||
options_.solve_algo = 9;
|
||||
options_.steady.maxit = 100;
|
||||
y = repmat(steady_state,block_nbr,1);
|
||||
[y,info] = dynare_solve(@ep_problem_2,y,1,exo_simul,pfm);
|
||||
if info
|
||||
flag = 1;
|
||||
err = info;
|
||||
end
|
||||
endo_simul(:,2) = y(1:ny);
|
|
@ -0,0 +1,9 @@
|
|||
function [X,w]=stroud_judd_7.5.8(d)
|
||||
|
||||
E = eye(d);
|
||||
X = cell(2*d,1);
|
||||
m = 1;
|
||||
for i=1:d
|
||||
X{m} = E(:,i);
|
||||
m = m+1;
|
||||
X{m} = -E(:,i);
|
|
@ -63,6 +63,12 @@ if M.exo_nbr == 0
|
|||
oo.exo_steady_state = [] ;
|
||||
end
|
||||
|
||||
|
||||
info=test_for_deep_parameters_calibration(M);
|
||||
if info
|
||||
problem_dummy=1;
|
||||
end;
|
||||
|
||||
% check if ys is steady state
|
||||
options.debug=1; %locally set debug option to 1
|
||||
[dr.ys,params,check1]=evaluate_steady_state(oo.steady_state,M,options,oo,1);
|
||||
|
|
|
@ -177,11 +177,11 @@ end
|
|||
if ~isempty(o.data)
|
||||
if isempty(o.seriesToUse)
|
||||
for i=1:o.data.vobs
|
||||
o = o.addSeries('data', o.data{o.data.name{i}});
|
||||
o.series{end+1} = report_series('data', o.data{o.data.name{i}});
|
||||
end
|
||||
else
|
||||
for i=1:length(o.seriesToUse)
|
||||
o = o.addSeries('data', o.data{o.seriesToUse{i}});
|
||||
o.series{end+1} = report_series('data', o.data{o.seriesToUse{i}});
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -33,7 +33,7 @@ function o = writeTableFile(o, pg, sec, row, col)
|
|||
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ne = length(o.series);
|
||||
if length(o.series) == 0
|
||||
if ne == 0
|
||||
warning('@report_table.write: no series to plot, returning');
|
||||
return;
|
||||
end
|
||||
|
@ -56,6 +56,7 @@ nlhc = 1;
|
|||
|
||||
if isempty(o.range)
|
||||
dates = getMaxRange(o.series);
|
||||
o.range = {dates};
|
||||
else
|
||||
dates = o.range{1};
|
||||
end
|
||||
|
|
|
@ -71,6 +71,6 @@ for i=1:ne
|
|||
end
|
||||
end
|
||||
end
|
||||
fprintf(fid, '\\end{tabular}}\n');
|
||||
fprintf(fid, '\\end{tabular}}\\\\\n');
|
||||
fprintf(fid, '%% End Section Object\n\n');
|
||||
end
|
|
@ -171,7 +171,7 @@ if stop
|
|||
skipline();
|
||||
fprintf('\nSimulation terminated after %d iterations.\n',iter);
|
||||
fprintf('Total time of simulation: %16.13f\n',etime(clock,h1));
|
||||
error('Simulation terminated with NaN or Inf in the residuals or endogenous variables. There is most likely something wrong with your model.');
|
||||
fprintf('WARNING: Simulation terminated with NaN or Inf in the residuals or endogenous variables. There is most likely something wrong with your model.\n');
|
||||
else
|
||||
skipline();
|
||||
fprintf('\nSimulation concluded successfully after %d iterations.\n',iter);
|
||||
|
|
|
@ -41,7 +41,6 @@ function [x,check] = solve1(func,x,j1,j2,jacobian_flag,gstep,tolf,tolx,maxit,deb
|
|||
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
nn = length(j1);
|
||||
fjac = zeros(nn,nn) ;
|
||||
g = zeros(nn,1) ;
|
||||
|
||||
tolmin = tolx ;
|
||||
|
@ -71,6 +70,9 @@ end
|
|||
|
||||
stpmax = stpmx*max([sqrt(x'*x);nn]) ;
|
||||
first_time = 1;
|
||||
if ~jacobian_flag
|
||||
fjac = zeros(nn,nn) ;
|
||||
end
|
||||
for its = 1:maxit
|
||||
if jacobian_flag
|
||||
[fvec,fjac] = feval(func,x,varargin{:});
|
||||
|
@ -90,9 +92,14 @@ for its = 1:maxit
|
|||
|
||||
g = (fvec'*fjac)';
|
||||
if debug
|
||||
disp(['cond(fjac) ' num2str(cond(fjac))])
|
||||
disp(['cond(fjac) ' num2str(condest(fjac))])
|
||||
end
|
||||
if rcond(fjac) < sqrt(eps)
|
||||
if issparse(fjac)
|
||||
rcond_fjac = 1/condest(fjac);
|
||||
else
|
||||
rcond_fjac = rcond(fjac);
|
||||
end
|
||||
if rcond_fjac < sqrt(eps)
|
||||
fjac2=fjac'*fjac;
|
||||
p=-(fjac2+1e6*sqrt(nn*eps)*max(sum(abs(fjac2)))*eye(nn))\(fjac'*fvec);
|
||||
else
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
function test_for_deep_parameters_calibration(M_)
|
||||
function info=test_for_deep_parameters_calibration(M_)
|
||||
% Issues a warning is some of the parameters are NaNs.
|
||||
%
|
||||
% INPUTS
|
||||
% M_ [structure] Description of the (simulated or estimated) model.
|
||||
%
|
||||
% OUTPUTS
|
||||
% none
|
||||
% info [scalar] 0 if no problems detected, 1 otherwise
|
||||
%
|
||||
% ALGORITHM
|
||||
% none
|
||||
|
@ -13,7 +13,7 @@ function test_for_deep_parameters_calibration(M_)
|
|||
% SPECIAL REQUIREMENTS
|
||||
% none
|
||||
|
||||
% Copyright (C) 2010 Dynare Team
|
||||
% Copyright (C) 2010-2014 Dynare Team
|
||||
%
|
||||
% This file is part of Dynare.
|
||||
%
|
||||
|
@ -31,6 +31,7 @@ function test_for_deep_parameters_calibration(M_)
|
|||
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
|
||||
plist = list_of_parameters_calibrated_as_NaN(M_);
|
||||
if ~isempty(plist)
|
||||
info=1;
|
||||
message = ['Some of the parameters have no value (' ];
|
||||
for i=1:size(plist,1)
|
||||
if i<size(plist,1)
|
||||
|
@ -47,4 +48,6 @@ if ~isempty(plist)
|
|||
if strmatch('optimal_policy_discount_factor',plist,'exact')
|
||||
warning('Either you have not correctly initialized planner_discount or you are calling a command like steady or stoch_simul that is not allowed in the context of ramsey_policy')
|
||||
end
|
||||
else
|
||||
info=0;
|
||||
end
|
|
@ -65,7 +65,6 @@ info = 0;
|
|||
fvec = fcn (x, varargin{:});
|
||||
fvec = fvec(j1);
|
||||
fn = norm (fvec);
|
||||
jcn = nan(n, 1);
|
||||
|
||||
% Outer loop.
|
||||
while (niter < maxiter && ~info)
|
||||
|
@ -88,9 +87,7 @@ while (niter < maxiter && ~info)
|
|||
end
|
||||
|
||||
% Get column norms, use them as scaling factors.
|
||||
for j = 1:n
|
||||
jcn(j) = norm(fjac(:,j));
|
||||
end
|
||||
jcn = sqrt(sum(fjac.*fjac))';
|
||||
if (niter == 1)
|
||||
dg = jcn;
|
||||
dg(dg == 0) = 1;
|
||||
|
@ -228,7 +225,7 @@ if (xn > delta)
|
|||
bn = norm (b);
|
||||
dxn = delta/xn; snmd = snm/delta;
|
||||
t = (bn/sn) * (bn/xn) * snmd;
|
||||
t = t - dxn * snmd^2 - sqrt ((t-dxn)^2 + (1-dxn^2)*(1-snmd^2));
|
||||
t = t - dxn * snmd^2 + sqrt ((t-dxn)^2 + (1-dxn^2)*(1-snmd^2));
|
||||
alpha = dxn*(1-snmd^2) / t;
|
||||
else
|
||||
alpha = 0;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2011-2012 Houtan Bastani, Daniel Waggoner, Tao Zha
|
||||
* Copyright (C) 2014 DynareTeam
|
||||
*
|
||||
* This is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -17,14 +17,6 @@
|
|||
#ifndef _DYNARE_C_DRIVER_H
|
||||
#define _DYNARE_C_DRIVER_H
|
||||
|
||||
#include <cstdlib>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <limits>
|
||||
|
||||
using namespace std;
|
||||
|
||||
struct aux_vars_t {
|
||||
int endo_index, type, orig_index, orig_lead_lag;
|
||||
} ;
|
||||
|
|
|
@ -4285,3 +4285,4 @@ DynamicModel::dynamicOnlyEquationsNbr() const
|
|||
}
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -221,6 +221,7 @@ public:
|
|||
void writeDynamicFile(const string &basename, bool block, bool bytecode, bool use_dll, int order) const;
|
||||
//! Writes file containing parameters derivatives
|
||||
void writeParamsDerivativesFile(const string &basename) const;
|
||||
|
||||
//! Converts to static model (only the equations)
|
||||
/*! It assumes that the static model given in argument has just been allocated */
|
||||
void toStatic(StaticModel &static_model) const;
|
||||
|
|
|
@ -820,4 +820,3 @@ ModFile::writeOutputFiles(const string &basename, bool clear_all, bool no_log, b
|
|||
cout << "done" << endl;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -323,6 +323,9 @@ ParsingDriver::add_model_variable(int symb_id, int lag)
|
|||
if (dynamic_cast<StaticModel *>(model_tree) != NULL && lag != 0)
|
||||
error("Leads and lags on variables are forbidden in 'planner_objective'.");
|
||||
|
||||
if (dynamic_cast<StaticModel *>(model_tree) != NULL && type == eModelLocalVariable)
|
||||
error("Model local variable " + mod_file->symbol_table.getName(symb_id) + " cannot be used in 'planner_objective'.");
|
||||
|
||||
// It makes sense to allow a lead/lag on parameters: during steady state calibration, endogenous and parameters can be swapped
|
||||
return model_tree->AddVariable(symb_id, lag);
|
||||
}
|
||||
|
|
|
@ -152,3 +152,4 @@ SteadyStateModel::writeSteadyStateFile(const string &basename, bool ramsey_model
|
|||
<< "end" << endl;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -176,7 +176,8 @@ MODFILES = \
|
|||
filter_step_ahead/fs2000_filter_step_ahead_ML.mod \
|
||||
loglinear/example4_loglinear.mod \
|
||||
smoother2histval/fs2000_simul.mod \
|
||||
smoother2histval/fs2000_smooth.mod
|
||||
smoother2histval/fs2000_smooth.mod \
|
||||
smoother2histval/fs2000_smooth_stoch_simul.mod
|
||||
|
||||
XFAIL_MODFILES = ramst_xfail.mod \
|
||||
estim_param_in_shock_value.mod
|
||||
|
|
|
@ -40,6 +40,7 @@ ts = extended_path([],1000);
|
|||
|
||||
options_.ep.verbosity = 0;
|
||||
options_.ep.stochastic.order = 1;
|
||||
options_.ep.IntegrationAlgorithm='Tensor-Gaussian-Quadrature';
|
||||
options_.ep.stochastic.nodes = 3;
|
||||
options_.console_mode = 0;
|
||||
|
||||
|
|
|
@ -53,6 +53,7 @@ set_dynare_seed('default');
|
|||
ts = extended_path([],5000);
|
||||
|
||||
options_.ep.stochastic.order = 2;
|
||||
options_.ep.IntegrationAlgorithm='Tensor-Gaussian-Quadrature';
|
||||
set_dynare_seed('default');
|
||||
ts1_4 = extended_path([],5000);
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ options_.console_mode = 0;
|
|||
ts = extended_path([],100);
|
||||
|
||||
options_.ep.stochastic.status = 1;
|
||||
options_.ep.IntegrationAlgorithm='Tensor-Gaussian-Quadrature';
|
||||
options_.ep.order = 1;
|
||||
options_.ep.nnodes = 3;
|
||||
sts = extended_path([],100);
|
||||
|
|
|
@ -79,6 +79,7 @@ ts0 = extended_path([],100);
|
|||
|
||||
options_.ep.stochastic.order = 1;
|
||||
options_.ep.stochastic.nodes = 3;
|
||||
options_.ep.IntegrationAlgorithm='Tensor-Gaussian-Quadrature';
|
||||
ts1_3 = extended_path([],100);
|
||||
|
||||
options_.ep.stochastic.nodes = 5;
|
||||
|
|
|
@ -76,5 +76,6 @@ end;
|
|||
|
||||
steady;
|
||||
|
||||
options_.ep.IntegrationAlgorithm='Tensor-Gaussian-Quadrature';
|
||||
extended_path(periods=100);
|
||||
|
||||
|
|
|
@ -75,6 +75,7 @@ copyfile('rbcii_steady_state.m','rbcii_steadystate2.m');
|
|||
ts = extended_path([],100);
|
||||
|
||||
options_.ep.stochastic.order = 1;
|
||||
options_.ep.IntegrationAlgorithm='Tensor-Gaussian-Quadrature';
|
||||
// profile on
|
||||
ts1_4 = extended_path([],100);
|
||||
// profile off
|
||||
|
|
|
@ -15,7 +15,7 @@ theta = 2.95;
|
|||
phi = 0.1;
|
||||
|
||||
model;
|
||||
c*theta*h^(1+psi)=expectation(1)((1-alpha)*y)+expectation(-2)((1-alpha)*y);
|
||||
c*theta*h^(1+psi)=(expectation(1)((1-alpha)*y)+expectation(-2)((1-alpha)*y))/2;
|
||||
k = beta*(((exp(b)*c)/(exp(b(+1))*c(+1)))
|
||||
*(exp(b(+1))*alpha*y(+1)+(1-delta)*k));
|
||||
y = exp(a)*(k(-1)^alpha)*(h^(1-alpha));
|
||||
|
|
|
@ -15,7 +15,7 @@ theta = 2.95;
|
|||
phi = 0.1;
|
||||
|
||||
model;
|
||||
c*theta*h^(1+psi)=expectation(1)(((1-alpha)*y)+expectation(3)((1-alpha)*y));
|
||||
c*theta*h^(1+psi)=expectation(1)(((1-alpha)*y)+expectation(3)((1-alpha)*y))/2;
|
||||
k = beta*(((exp(b)*c)/(exp(b(+1))*c(+1)))
|
||||
*(exp(b(+1))*alpha*y(+1)+(1-delta)*k));
|
||||
y = exp(a)*(k(-1)^alpha)*(h^(1-alpha));
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
## Copyright (C) 2009-2012 Dynare Team
|
||||
## Copyright (C) 2009-2014 Dynare Team
|
||||
##
|
||||
## This file is part of Dynare.
|
||||
##
|
||||
|
@ -31,9 +31,8 @@ if !strcmp(dynare_version(), getenv("DYNARE_VERSION"))
|
|||
endif
|
||||
|
||||
## Ask gnuplot to create graphics in text mode
|
||||
## Note that setenv() was introduced in Octave 3.0.2, for compatibility
|
||||
## with MATLAB
|
||||
putenv("GNUTERM", "dumb")
|
||||
graphics_toolkit gnuplot;
|
||||
setenv("GNUTERM", "dumb");
|
||||
|
||||
## Test MOD files listed in Makefile.am
|
||||
name = getenv("FILESTEM");
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
// Test that smoother2histval works (everything in a single file)
|
||||
// Note that an observation equation has been modified in order to have an aux var for lagged endo
|
||||
|
||||
var m P c e W R k d n l gy_obs gp_obs y dA;
|
||||
varexo e_a e_m;
|
||||
|
||||
parameters alp bet gam mst rho psi del;
|
||||
|
||||
alp = 0.33;
|
||||
bet = 0.99;
|
||||
gam = 0.003;
|
||||
mst = 1.011;
|
||||
rho = 0.7;
|
||||
psi = 0.787;
|
||||
del = 0.02;
|
||||
|
||||
model;
|
||||
dA = exp(gam+e_a);
|
||||
log(m) = (1-rho)*log(mst) + rho*log(m(-1))+e_m;
|
||||
-P/(c(+1)*P(+1)*m)+bet*P(+1)*(alp*exp(-alp*(gam+log(e(+1))))*k^(alp-1)*n(+1)^(1-alp)+(1-del)*exp(-(gam+log(e(+1)))))/(c(+2)*P(+2)*m(+1))=0;
|
||||
W = l/n;
|
||||
-(psi/(1-psi))*(c*P/(1-n))+l/n = 0;
|
||||
R = P*(1-alp)*exp(-alp*(gam+e_a))*k(-1)^alp*n^(-alp)/W;
|
||||
1/(c*P)-bet*P*(1-alp)*exp(-alp*(gam+e_a))*k(-1)^alp*n^(1-alp)/(m*l*c(+1)*P(+1)) = 0;
|
||||
c+k = exp(-alp*(gam+e_a))*k(-1)^alp*n^(1-alp)+(1-del)*exp(-(gam+e_a))*k(-1);
|
||||
P*c = m;
|
||||
m-1+d = l;
|
||||
e = exp(e_a);
|
||||
y = k(-1)^alp*n^(1-alp)*exp(-alp*(gam+e_a(-1)));
|
||||
gy_obs = dA*y/y(-2);
|
||||
gp_obs = (P/P(-1))*m(-1)/dA;
|
||||
end;
|
||||
|
||||
initval;
|
||||
k = 6;
|
||||
m = mst;
|
||||
P = 2.25;
|
||||
c = 0.45;
|
||||
e = 1;
|
||||
W = 4;
|
||||
R = 1.02;
|
||||
d = 0.85;
|
||||
n = 0.19;
|
||||
l = 0.86;
|
||||
y = 0.6;
|
||||
gy_obs = exp(gam);
|
||||
gp_obs = exp(-gam);
|
||||
dA = exp(gam);
|
||||
end;
|
||||
|
||||
shocks;
|
||||
var e_a; stderr 0.014;
|
||||
var e_m; stderr 0.005;
|
||||
end;
|
||||
|
||||
steady;
|
||||
|
||||
check;
|
||||
|
||||
estimated_params;
|
||||
alp, beta_pdf, 0.356, 0.02;
|
||||
bet, beta_pdf, 0.993, 0.002;
|
||||
gam, normal_pdf, 0.0085, 0.003;
|
||||
mst, normal_pdf, 1.0002, 0.007;
|
||||
rho, beta_pdf, 0.129, 0.223;
|
||||
psi, beta_pdf, 0.65, 0.05;
|
||||
del, beta_pdf, 0.01, 0.005;
|
||||
stderr e_a, inv_gamma_pdf, 0.035449, inf;
|
||||
stderr e_m, inv_gamma_pdf, 0.008862, inf;
|
||||
end;
|
||||
|
||||
varobs gp_obs gy_obs;
|
||||
|
||||
options_.solve_tolf = 1e-12;
|
||||
|
||||
estimation(order=1,datafile=fsdat_simul,nobs=192,mh_replic=1500,mh_nblocks=1,mh_jscale=0.8,smoother,consider_all_endogenous);
|
||||
|
||||
smoother2histval(period = 5);
|
||||
|
||||
stoch_simul(nomoments);
|
||||
|
||||
forecast;
|
Loading…
Reference in New Issue