Dynare++: use std::unique_ptr for memory management of workers in thread groups

time-shift
Sébastien Villemot 2019-01-14 12:19:00 +01:00
parent 3c5afded8e
commit 4a72266d05
No known key found for this signature in database
GPG Key ID: 2CECE9350ECEBE4A
4 changed files with 34 additions and 65 deletions

View File

@ -169,10 +169,8 @@ public:
out.zeros();
THREAD_GROUP gr;
for (int ti = 0; ti < fs.getNum(); ti++)
{
gr.insert(new IntegrationWorker<_Tpit>(*this, fs.getFunc(ti),
level, ti, fs.getNum(), out));
}
gr.insert(std::make_unique<IntegrationWorker<_Tpit>>(*this, fs.getFunc(ti),
level, ti, fs.getNum(), out));
gr.run();
}
void

View File

@ -10,6 +10,7 @@
#include <limits>
#include <utility>
#include <memory>
template <>
int DRFixPoint<KOrder::fold>::max_iter = 10000;
@ -94,10 +95,8 @@ SimResults::simulate(int num_sim, const DecisionRule &dr, const Vector &start,
{
RandomShockRealization sr(vcov, system_random_generator.int_uniform());
rsrs.push_back(sr);
THREAD *worker = new
SimulationWorker(*this, dr, DecisionRule::horner,
num_per+num_burn, start, rsrs.back());
gr.insert(worker);
gr.insert(std::make_unique<SimulationWorker>(*this, dr, DecisionRule::horner,
num_per+num_burn, start, rsrs.back()));
}
gr.run();
}
@ -334,12 +333,8 @@ SimResultsIRF::simulate(const DecisionRule &dr)
{
THREAD_GROUP gr;
for (int idata = 0; idata < control.getNumSets(); idata++)
{
THREAD *worker = new
SimulationIRFWorker(*this, dr, DecisionRule::horner,
num_per, idata, ishock, imp);
gr.insert(worker);
}
gr.insert(std::make_unique<SimulationIRFWorker>(*this, dr, DecisionRule::horner,
num_per, idata, ishock, imp));
gr.run();
}
@ -420,10 +415,8 @@ RTSimResultsStats::simulate(int num_sim, const DecisionRule &dr, const Vector &s
{
RandomShockRealization sr(vcov, system_random_generator.int_uniform());
rsrs.push_back(sr);
THREAD *worker = new
RTSimulationWorker(*this, dr, DecisionRule::horner,
num_per, start, rsrs.back());
gr.insert(worker);
gr.insert(std::make_unique<RTSimulationWorker>(*this, dr, DecisionRule::horner,
num_per, start, rsrs.back()));
}
gr.run();
}

View File

@ -4,6 +4,8 @@
#include "pyramid_prod2.hh"
#include "ps_tensor.hh"
#include <memory>
double FoldedStackContainer::fill_threshold = 0.00005;
double UnfoldedStackContainer::fill_threshold = 0.00005;
@ -42,13 +44,9 @@ FoldedStackContainer::multAndAdd(int dim, const FGSContainer &c, FGSTensor &out)
THREAD_GROUP gr;
SymmetrySet ss(dim, c.num());
for (symiterator si(ss); !si.isEnd(); ++si)
{
if (c.check(*si))
{
THREAD *worker = new WorkerFoldMAADense(*this, *si, c, out);
gr.insert(worker);
}
}
if (c.check(*si))
gr.insert(std::make_unique<WorkerFoldMAADense>(*this, *si, c, out));
gr.run();
}
@ -81,10 +79,8 @@ FoldedStackContainer::multAndAddSparse1(const FSSparseTensor &t,
THREAD_GROUP gr;
UFSTensor dummy(0, numStacks(), t.dimen());
for (Tensor::index ui = dummy.begin(); ui != dummy.end(); ++ui)
{
THREAD *worker = new WorkerFoldMAASparse1(*this, t, out, ui.getCoor());
gr.insert(worker);
}
gr.insert(std::make_unique<WorkerFoldMAASparse1>(*this, t, out, ui.getCoor()));
gr.run();
}
@ -155,10 +151,8 @@ FoldedStackContainer::multAndAddSparse2(const FSSparseTensor &t,
THREAD_GROUP gr;
FFSTensor dummy_f(0, numStacks(), t.dimen());
for (Tensor::index fi = dummy_f.begin(); fi != dummy_f.end(); ++fi)
{
THREAD *worker = new WorkerFoldMAASparse2(*this, t, out, fi.getCoor());
gr.insert(worker);
}
gr.insert(std::make_unique<WorkerFoldMAASparse2>(*this, t, out, fi.getCoor()));
gr.run();
}
@ -253,10 +247,8 @@ FoldedStackContainer::multAndAddSparse4(const FSSparseTensor &t, FGSTensor &out)
THREAD_GROUP gr;
FFSTensor dummy_f(0, numStacks(), t.dimen());
for (Tensor::index fi = dummy_f.begin(); fi != dummy_f.end(); ++fi)
{
THREAD *worker = new WorkerFoldMAASparse4(*this, t, out, fi.getCoor());
gr.insert(worker);
}
gr.insert(std::make_unique<WorkerFoldMAASparse4>(*this, t, out, fi.getCoor()));
gr.run();
}
@ -405,13 +397,9 @@ UnfoldedStackContainer::multAndAdd(int dim, const UGSContainer &c,
THREAD_GROUP gr;
SymmetrySet ss(dim, c.num());
for (symiterator si(ss); !si.isEnd(); ++si)
{
if (c.check(*si))
{
THREAD *worker = new WorkerUnfoldMAADense(*this, *si, c, out);
gr.insert(worker);
}
}
if (c.check(*si))
gr.insert(std::make_unique<WorkerUnfoldMAADense>(*this, *si, c, out));
gr.run();
}
@ -456,10 +444,8 @@ UnfoldedStackContainer::multAndAddSparse1(const FSSparseTensor &t,
THREAD_GROUP gr;
UFSTensor dummy(0, numStacks(), t.dimen());
for (Tensor::index ui = dummy.begin(); ui != dummy.end(); ++ui)
{
THREAD *worker = new WorkerUnfoldMAASparse1(*this, t, out, ui.getCoor());
gr.insert(worker);
}
gr.insert(std::make_unique<WorkerUnfoldMAASparse1>(*this, t, out, ui.getCoor()));
gr.run();
}
@ -562,10 +548,8 @@ UnfoldedStackContainer::multAndAddSparse2(const FSSparseTensor &t,
THREAD_GROUP gr;
FFSTensor dummy_f(0, numStacks(), t.dimen());
for (Tensor::index fi = dummy_f.begin(); fi != dummy_f.end(); ++fi)
{
THREAD *worker = new WorkerUnfoldMAASparse2(*this, t, out, fi.getCoor());
gr.insert(worker);
}
gr.insert(std::make_unique<WorkerUnfoldMAASparse2>(*this, t, out, fi.getCoor()));
gr.run();
}

View File

@ -69,6 +69,8 @@
#include <list>
#include <map>
#include <type_traits>
#include <memory>
#include <utility>
namespace sthread
{
@ -517,29 +519,21 @@ namespace sthread
using _Ttraits = thread_traits<thread_impl>;
using _Ctraits = cond_traits<thread_impl>;
using _Ctype = detach_thread<thread_impl>;
list<_Ctype *> tlist;
using iterator = typename list<_Ctype *>::iterator;
list<unique_ptr<_Ctype>> tlist;
using iterator = typename list<unique_ptr<_Ctype>>::iterator;
condition_counter<thread_impl> counter;
public:
static int max_parallel_threads;
/* When inserting, the counter is installed to the thread. */
void
insert(_Ctype *c)
insert(unique_ptr<_Ctype> c)
{
tlist.push_back(c);
c->installCounter(&counter);
tlist.push_back(move(c));
}
/* The destructor is clear. */
~detach_thread_group()
{
while (!tlist.empty())
{
delete tlist.front();
tlist.pop_front();
}
}
~detach_thread_group() = default;
/* We cycle through all threads in the group, and in each cycle we wait
for the change in the |counter|. If the counter indicates less than