diff --git a/dynare++/tl/cc/equivalence.cc b/dynare++/tl/cc/equivalence.cc index 0b84089eb..ca0a525ab 100644 --- a/dynare++/tl/cc/equivalence.cc +++ b/dynare++/tl/cc/equivalence.cc @@ -4,7 +4,7 @@ #include "permutation.hh" #include "tl_exception.hh" -#include +#include int OrdSequence::operator[](int i) const @@ -93,12 +93,12 @@ OrdSequence::average() const /* Debug print. */ void -OrdSequence::print(const char *prefix) const +OrdSequence::print(const std::string &prefix) const { - printf("%s", prefix); + std::cout << prefix; for (int i : data) - printf("%d ", i); - printf("\n"); + std::cout << i << ' '; + std::cout << '\n'; } Equivalence::Equivalence(int num) @@ -112,7 +112,7 @@ Equivalence::Equivalence(int num) } } -Equivalence::Equivalence(int num, const char *dummy) +Equivalence::Equivalence(int num, const std::string &dummy) : n(num) { OrdSequence s; @@ -121,12 +121,7 @@ Equivalence::Equivalence(int num, const char *dummy) classes.push_back(s); } -/* Copy constructors. The second also glues a given couple. */ - -Equivalence::Equivalence(const Equivalence &e) - - -= default; +/* Copy constructor that also glues a given couple. */ Equivalence::Equivalence(const Equivalence &e, int i1, int i2) : n(e.n), @@ -144,15 +139,6 @@ Equivalence::Equivalence(const Equivalence &e, int i1, int i2) } } -const Equivalence & -Equivalence::operator=(const Equivalence &e) -{ - classes.clear(); - n = e.n; - classes = e.classes; - return *this; -} - bool Equivalence::operator==(const Equivalence &e) const { @@ -173,7 +159,7 @@ Equivalence::findHaving(int i) const auto si = classes.begin(); while (si != classes.end()) { - if ((*si).has(i)) + if (si->has(i)) return si; ++si; } @@ -188,7 +174,7 @@ Equivalence::findHaving(int i) auto si = classes.begin(); while (si != classes.end()) { - if ((*si).has(i)) + if (si->has(i)) return si; ++si; } @@ -251,7 +237,7 @@ Equivalence::trace(IntSequence &out, int num) const int i = 0; int nc = 0; for (auto it = begin(); it != end() && nc < num; ++it, ++nc) - for (int j = 0; j < (*it).length(); j++, i++) + for (int j = 0; j < it->length(); j++, i++) { TL_RAISE_IF(i >= out.size(), "Wrong size of output sequence in Equivalence::trace"); @@ -270,22 +256,22 @@ Equivalence::trace(IntSequence &out, const Permutation &per) const for (int iclass = 0; iclass < numClasses(); iclass++) { auto itper = find(per.getMap()[iclass]); - for (int j = 0; j < (*itper).length(); j++, i++) + for (int j = 0; j < itper->length(); j++, i++) out[i] = (*itper)[j]; } } /* Debug print. */ void -Equivalence::print(const char *prefix) const +Equivalence::print(const std::string &prefix) const { int i = 0; for (auto it = classes.begin(); it != classes.end(); ++it, i++) { - printf("%sclass %d: ", prefix, i); - (*it).print(""); + std::cout << prefix << "class " << i << ": "; + it->print(""); } } @@ -310,8 +296,7 @@ Equivalence::print(const char *prefix) const (since it is constructed by gluing attempts). */ EquivalenceSet::EquivalenceSet(int num) - : n(num), - equis() + : n(num) { std::list added; Equivalence first(n); @@ -323,10 +308,7 @@ EquivalenceSet::EquivalenceSet(int num) added.pop_front(); } if (n > 1) - { - Equivalence last(n, ""); - equis.push_back(last); - } + equis.emplace_back(n, ""); } /* This method is used in |addParents| and returns |true| if the object @@ -371,25 +353,23 @@ EquivalenceSet::addParents(const Equivalence &e, if (!has(ns)) { added.push_back(ns); - equis.push_back(ns); + equis.push_back(std::move(ns)); } } } /* Debug print. */ void -EquivalenceSet::print(const char *prefix) const +EquivalenceSet::print(const std::string &prefix) const { - char tmp[100]; - strcpy(tmp, prefix); - strcat(tmp, " "); int i = 0; for (auto it = equis.begin(); it != equis.end(); ++it, i++) { - printf("%sequivalence %d:(classes %d)\n", prefix, i, (*it).numClasses()); - (*it).print(tmp); + std::cout << prefix << "equivalence " << i << ":(classes " + << it->numClasses() << ")\n"; + it->print(prefix + " "); } } @@ -404,15 +384,13 @@ EquivalenceBundle::EquivalenceBundle(int nmax) const EquivalenceSet & EquivalenceBundle::get(int n) const { - if (n > (int) (bundle.size()) || n < 1) + if (n > static_cast(bundle.size()) || n < 1) { TL_RAISE("Equivalence set not found in EquivalenceBundle::get"); return bundle[0]; } else - { - return bundle[n-1]; - } + return bundle[n-1]; } /* Get |curmax| which is a maximum size in the bundle, and generate for diff --git a/dynare++/tl/cc/equivalence.hh b/dynare++/tl/cc/equivalence.hh index 34bc61140..5149e44ba 100644 --- a/dynare++/tl/cc/equivalence.hh +++ b/dynare++/tl/cc/equivalence.hh @@ -43,6 +43,7 @@ #include #include +#include /* Here is the abstraction for an equivalence class. We implement it as |vector|. We have a constructor for empty class, copy @@ -58,11 +59,10 @@ public: OrdSequence() : data() { } - OrdSequence(const OrdSequence &s) - = default; - OrdSequence & - operator=(const OrdSequence &s) - = default; + OrdSequence(const OrdSequence &) = default; + OrdSequence(OrdSequence &&) = default; + OrdSequence &operator=(const OrdSequence &) = default; + OrdSequence &operator=(OrdSequence &&) = default; bool operator==(const OrdSequence &s) const; int operator[](int i) const; bool operator<(const OrdSequence &s) const; @@ -79,7 +79,7 @@ public: void add(int i); void add(const OrdSequence &s); bool has(int i) const; - void print(const char *prefix) const; + void print(const std::string &prefix) const; private: double average() const; }; @@ -107,11 +107,13 @@ public: The third is the copy constructor. And the fourth is the copy constructor plus gluing |i1| and |i2| in one class. */ Equivalence(int num); - Equivalence(int num, const char *dummy); - Equivalence(const Equivalence &e); + Equivalence(int num, const std::string &dummy); + Equivalence(const Equivalence &) = default; + Equivalence(Equivalence &&) = default; Equivalence(const Equivalence &e, int i1, int i2); - const Equivalence &operator=(const Equivalence &e); + Equivalence &operator=(const Equivalence &) = default; + Equivalence &operator=(Equivalence &&) = default; bool operator==(const Equivalence &e) const; bool operator!=(const Equivalence &e) const @@ -135,7 +137,7 @@ public: trace(out, numClasses()); } void trace(IntSequence &out, const Permutation &per) const; - void print(const char *prefix) const; + void print(const std::string &prefix) const; seqit begin() { @@ -185,7 +187,7 @@ class EquivalenceSet public: using const_iterator = std::list::const_iterator; EquivalenceSet(int num); - void print(const char *prefix) const; + void print(const std::string &prefix) const; const_iterator begin() const { @@ -214,7 +216,7 @@ class EquivalenceBundle public: EquivalenceBundle(int nmax); ~EquivalenceBundle() = default; - const EquivalenceSet&get(int n) const; + const EquivalenceSet &get(int n) const; void generateUpTo(int nmax); }; diff --git a/dynare++/tl/cc/fs_tensor.cc b/dynare++/tl/cc/fs_tensor.cc index 13e29450b..2c7590d9a 100644 --- a/dynare++/tl/cc/fs_tensor.cc +++ b/dynare++/tl/cc/fs_tensor.cc @@ -16,7 +16,7 @@ by an appropriate item of |x| and added to the column of $[g]$ tensor. */ FFSTensor::FFSTensor(const FFSTensor &t, const ConstVector &x) - : FTensor(along_col, IntSequence(t.dimen()-1, t.nvar()), + : FTensor(indor::along_col, IntSequence(t.dimen()-1, t.nvar()), t.nrows(), calcMaxOffset(t.nvar(), t.dimen()-1), t.dimen()-1), nv(t.nvar()) { @@ -32,7 +32,7 @@ FFSTensor::FFSTensor(const FFSTensor &t, const ConstVector &x) for (int i = 0; i < nvar(); i++) { IntSequence from_ind(i, to.getCoor()); - Tensor::index from(&t, from_ind); + Tensor::index from(t, from_ind); addColumn(x[i], t, *from, *to); } } @@ -55,14 +55,14 @@ FFSTensor::calcMaxOffset(int nvar, int d) /* The conversion from sparse tensor is clear. We go through all the tensor and write to the dense what is found. */ FFSTensor::FFSTensor(const FSSparseTensor &t) - : FTensor(along_col, IntSequence(t.dimen(), t.nvar()), + : FTensor(indor::along_col, IntSequence(t.dimen(), t.nvar()), t.nrows(), calcMaxOffset(t.nvar(), t.dimen()), t.dimen()), nv(t.nvar()) { zeros(); for (const auto & it : t.getMap()) { - index ind(this, it.first); + index ind(*this, it.first); get(it.second.first, *ind) = it.second.second; } } @@ -73,13 +73,13 @@ FFSTensor::FFSTensor(const FSSparseTensor &t) copy the column. */ FFSTensor::FFSTensor(const UFSTensor &ut) - : FTensor(along_col, IntSequence(ut.dimen(), ut.nvar()), + : FTensor(indor::along_col, IntSequence(ut.dimen(), ut.nvar()), ut.nrows(), calcMaxOffset(ut.nvar(), ut.dimen()), ut.dimen()), nv(ut.nvar()) { for (index in = begin(); in != end(); ++in) { - index src(&ut, in.getCoor()); + index src(ut, in.getCoor()); copyColumn(ut, *src, *in); } } @@ -156,7 +156,7 @@ FFSTensor::addSubTensor(const FGSTensor &t) IntSequence c(ind.getCoor()); c.add(1, shift); c.sort(); - Tensor::index tar(this, c); + Tensor::index tar(*this, c); addColumn(t, *ind, *tar); } } @@ -167,7 +167,7 @@ FFSTensor::addSubTensor(const FGSTensor &t) regularity of the unfolded tensor. */ UFSTensor::UFSTensor(const UFSTensor &t, const ConstVector &x) - : UTensor(along_col, IntSequence(t.dimen()-1, t.nvar()), + : UTensor(indor::along_col, IntSequence(t.dimen()-1, t.nvar()), t.nrows(), calcMaxOffset(t.nvar(), t.dimen()-1), t.dimen()-1), nv(t.nvar()) { @@ -190,13 +190,13 @@ UFSTensor::UFSTensor(const UFSTensor &t, const ConstVector &x) columns of folded tensor, and then call |unfoldData()|. */ UFSTensor::UFSTensor(const FFSTensor &ft) - : UTensor(along_col, IntSequence(ft.dimen(), ft.nvar()), + : UTensor(indor::along_col, IntSequence(ft.dimen(), ft.nvar()), ft.nrows(), calcMaxOffset(ft.nvar(), ft.dimen()), ft.dimen()), nv(ft.nvar()) { for (index src = ft.begin(); src != ft.end(); ++src) { - index in(this, src.getCoor()); + index in(*this, src.getCoor()); copyColumn(ft, *src, *in); } unfoldData(); @@ -266,7 +266,7 @@ UFSTensor::addSubTensor(const UGSTensor &t) c.add(-1, shift); if (c.isPositive() && c.less(t.getDims().getNVX())) { - Tensor::index from(&t, c); + Tensor::index from(t, c); addColumn(t, *from, *tar); } } @@ -283,7 +283,7 @@ UFSTensor::unfoldData() { IntSequence v(in.getCoor()); v.sort(); - index tmp(this, v); + index tmp(*this, v); copyColumn(*tmp, *in); } } diff --git a/dynare++/tl/cc/fs_tensor.hh b/dynare++/tl/cc/fs_tensor.hh index 5417841c2..9ea024601 100644 --- a/dynare++/tl/cc/fs_tensor.hh +++ b/dynare++/tl/cc/fs_tensor.hh @@ -52,7 +52,7 @@ public: The fifth constructs a subtensor of selected rows. */ FFSTensor(int r, int nvar, int d) - : FTensor(along_col, IntSequence(d, nvar), + : FTensor(indor::along_col, IntSequence(d, nvar), r, calcMaxOffset(nvar, d), d), nv(nvar) { } @@ -97,7 +97,7 @@ class UFSTensor : public UTensor int nv; public: UFSTensor(int r, int nvar, int d) - : UTensor(along_col, IntSequence(d, nvar), + : UTensor(indor::along_col, IntSequence(d, nvar), r, calcMaxOffset(nvar, d), d), nv(nvar) { } diff --git a/dynare++/tl/cc/gs_tensor.cc b/dynare++/tl/cc/gs_tensor.cc index cec65663c..00484fa16 100644 --- a/dynare++/tl/cc/gs_tensor.cc +++ b/dynare++/tl/cc/gs_tensor.cc @@ -138,13 +138,13 @@ TensorDimens::decrement(IntSequence &v) const /* Here we go through columns of folded, calculate column of unfolded, and copy data. */ FGSTensor::FGSTensor(const UGSTensor &ut) - : FTensor(along_col, ut.tdims.getNVX(), ut.nrows(), + : FTensor(indor::along_col, ut.tdims.getNVX(), ut.nrows(), ut.tdims.calcFoldMaxOffset(), ut.dimen()), tdims(ut.tdims) { for (index ti = begin(); ti != end(); ++ti) { - index ui(&ut, ti.getCoor()); + index ui(ut, ti.getCoor()); copyColumn(ut, *ui, *ti); } } @@ -160,7 +160,7 @@ FGSTensor::FGSTensor(const UGSTensor &ut) obtain coordinates in the |this| tensor and we copy the item. */ FGSTensor::FGSTensor(const FSSparseTensor &t, const IntSequence &ss, const IntSequence &coor, const TensorDimens &td) - : FTensor(along_col, td.getNVX(), t.nrows(), + : FTensor(indor::along_col, td.getNVX(), t.nrows(), td.calcFoldMaxOffset(), td.dimen()), tdims(td) { @@ -192,7 +192,7 @@ FGSTensor::FGSTensor(const FSSparseTensor &t, const IntSequence &ss, { IntSequence c((*run).first); c.add(-1, lb); - Tensor::index ind(this, c); + Tensor::index ind(*this, c); TL_RAISE_IF(*ind < 0 || *ind >= ncols(), "Internal error in slicing constructor of FGSTensor"); get((*run).second.first, *ind) = (*run).second.second; @@ -204,7 +204,7 @@ FGSTensor::FGSTensor(const FSSparseTensor &t, const IntSequence &ss, /* The code is similar to |@<|FGSTensor| slicing from |FSSparseTensor|@>|. */ FGSTensor::FGSTensor(const FFSTensor &t, const IntSequence &ss, const IntSequence &coor, const TensorDimens &td) - : FTensor(along_col, td.getNVX(), t.nrows(), + : FTensor(indor::along_col, td.getNVX(), t.nrows(), td.calcFoldMaxOffset(), td.dimen()), tdims(td) { @@ -226,8 +226,8 @@ FGSTensor::FGSTensor(const FFSTensor &t, const IntSequence &ss, } zeros(); - Tensor::index lbi(&t, lb); - Tensor::index ubi(&t, ub); + Tensor::index lbi(t, lb); + Tensor::index ubi(t, ub); ++ubi; for (Tensor::index run = lbi; run != ubi; ++run) { @@ -235,7 +235,7 @@ FGSTensor::FGSTensor(const FFSTensor &t, const IntSequence &ss, { IntSequence c(run.getCoor()); c.add(-1, lb); - Tensor::index ind(this, c); + Tensor::index ind(*this, c); TL_RAISE_IF(*ind < 0 || *ind >= ncols(), "Internal error in slicing constructor of FGSTensor"); copyColumn(t, *run, *ind); @@ -245,13 +245,13 @@ FGSTensor::FGSTensor(const FFSTensor &t, const IntSequence &ss, // |FGSTensor| conversion from |GSSparseTensor| FGSTensor::FGSTensor(const GSSparseTensor &t) - : FTensor(along_col, t.getDims().getNVX(), t.nrows(), + : FTensor(indor::along_col, t.getDims().getNVX(), t.nrows(), t.getDims().calcFoldMaxOffset(), t.dimen()), tdims(t.getDims()) { zeros(); for (const auto & it : t.getMap()) { - index ind(this, it.first); + index ind(*this, it.first); get(it.second.first, *ind) = it.second.second; } } @@ -338,13 +338,13 @@ FGSTensor::contractAndAdd(int i, FGSTensor &out, unfold data within the unfolded tensor. */ UGSTensor::UGSTensor(const FGSTensor &ft) - : UTensor(along_col, ft.tdims.getNVX(), ft.nrows(), + : UTensor(indor::along_col, ft.tdims.getNVX(), ft.nrows(), ft.tdims.calcUnfoldMaxOffset(), ft.dimen()), tdims(ft.tdims) { for (index fi = ft.begin(); fi != ft.end(); ++fi) { - index ui(this, fi.getCoor()); + index ui(*this, fi.getCoor()); copyColumn(ft, *fi, *ui); } unfoldData(); @@ -354,7 +354,7 @@ UGSTensor::UGSTensor(const FGSTensor &ft) /* This makes a folded slice from the sparse tensor and unfolds it. */ UGSTensor::UGSTensor(const FSSparseTensor &t, const IntSequence &ss, const IntSequence &coor, const TensorDimens &td) - : UTensor(along_col, td.getNVX(), t.nrows(), + : UTensor(indor::along_col, td.getNVX(), t.nrows(), td.calcUnfoldMaxOffset(), td.dimen()), tdims(td) { @@ -364,7 +364,7 @@ UGSTensor::UGSTensor(const FSSparseTensor &t, const IntSequence &ss, FGSTensor ft(t, ss, coor, td); for (index fi = ft.begin(); fi != ft.end(); ++fi) { - index ui(this, fi.getCoor()); + index ui(*this, fi.getCoor()); copyColumn(ft, *fi, *ui); } unfoldData(); @@ -374,7 +374,7 @@ UGSTensor::UGSTensor(const FSSparseTensor &t, const IntSequence &ss, /* This makes a folded slice from dense and unfolds it. */ UGSTensor::UGSTensor(const UFSTensor &t, const IntSequence &ss, const IntSequence &coor, const TensorDimens &td) - : UTensor(along_col, td.getNVX(), t.nrows(), + : UTensor(indor::along_col, td.getNVX(), t.nrows(), td.calcUnfoldMaxOffset(), td.dimen()), tdims(td) { @@ -382,7 +382,7 @@ UGSTensor::UGSTensor(const UFSTensor &t, const IntSequence &ss, FGSTensor ft(folded, ss, coor, td); for (index fi = ft.begin(); fi != ft.end(); ++fi) { - index ui(this, fi.getCoor()); + index ui(*this, fi.getCoor()); copyColumn(ft, *fi, *ui); } unfoldData(); @@ -450,7 +450,7 @@ UGSTensor::getFirstIndexOf(const index &in) const vtmp.sort(); last += tdims.getSym()[i]; } - return index(this, v); + return index(*this, v); } /* Here is perfectly same code with the same semantics as in diff --git a/dynare++/tl/cc/gs_tensor.hh b/dynare++/tl/cc/gs_tensor.hh index 89bc51b45..d7b52e35d 100644 --- a/dynare++/tl/cc/gs_tensor.hh +++ b/dynare++/tl/cc/gs_tensor.hh @@ -147,7 +147,7 @@ public: from |FFSTensor| to |FGSTensor|. */ FGSTensor(int r, const TensorDimens &td) - : FTensor(along_col, td.getNVX(), r, + : FTensor(indor::along_col, td.getNVX(), r, td.calcFoldMaxOffset(), td.dimen()), tdims(td) { } @@ -216,7 +216,7 @@ public: constructor allows for in-place conversion from |UFSTensor| to |UGSTensor|. */ UGSTensor(int r, const TensorDimens &td) - : UTensor(along_col, td.getNVX(), r, + : UTensor(indor::along_col, td.getNVX(), r, td.calcUnfoldMaxOffset(), td.dimen()), tdims(td) { } diff --git a/dynare++/tl/cc/normal_moments.cc b/dynare++/tl/cc/normal_moments.cc index c4de8d498..f3b7b0877 100644 --- a/dynare++/tl/cc/normal_moments.cc +++ b/dynare++/tl/cc/normal_moments.cc @@ -62,7 +62,7 @@ UNormalMoments::generateMoments(int maxdim, const TwoDMatrix &v) { IntSequence ind(kronv->dimen()); per.apply(it.getCoor(), ind); - Tensor::index it2(mom, ind); + Tensor::index it2(*mom, ind); mom->get(*it2, 0) += kronv->get(*it, 0); } } diff --git a/dynare++/tl/cc/permutation.cc b/dynare++/tl/cc/permutation.cc index 2bae27771..0afbe6c3d 100644 --- a/dynare++/tl/cc/permutation.cc +++ b/dynare++/tl/cc/permutation.cc @@ -70,54 +70,34 @@ Permutation::computeSortingMap(const IntSequence &s) } PermutationSet::PermutationSet() - : pers(new const Permutation *[size]) { - pers[0] = new Permutation(1); + pers.emplace_back(1); } PermutationSet::PermutationSet(const PermutationSet &sp, int n) - : order(n), size(n*sp.size), - pers(new const Permutation *[size]) + : order(n), size(n*sp.size) { - for (int i = 0; i < size; i++) - pers[i] = nullptr; - TL_RAISE_IF(n != sp.order+1, "Wrong new order in PermutationSet constructor"); - int k = 0; for (int i = 0; i < sp.size; i++) - { - for (int j = 0; j < order; j++, k++) - { - pers[k] = new Permutation(*(sp.pers[i]), j); - } - } + for (int j = 0; j < order; j++) + pers.emplace_back(sp.pers[i], j); } -PermutationSet::~PermutationSet() -{ - for (int i = 0; i < size; i++) - if (pers[i]) - delete pers[i]; - delete [] pers; -} - -std::vector +std::vector PermutationSet::getPreserving(const IntSequence &s) const { TL_RAISE_IF(s.size() != order, "Wrong sequence length in PermutationSet::getPreserving"); - std::vector res; + std::vector res; IntSequence tmp(s.size()); for (int i = 0; i < size; i++) { - pers[i]->apply(s, tmp); + pers[i].apply(s, tmp); if (s == tmp) - { - res.push_back(pers[i]); - } + res.push_back(pers[i]); } return res; @@ -129,35 +109,25 @@ PermutationBundle::PermutationBundle(int nmax) generateUpTo(nmax); } -PermutationBundle::~PermutationBundle() -{ - for (auto & i : bundle) - delete i; -} - const PermutationSet & PermutationBundle::get(int n) const { - if (n > (int) (bundle.size()) || n < 1) + if (n > static_cast(bundle.size()) || n < 1) { TL_RAISE("Permutation set not found in PermutationSet::get"); - return *(bundle[0]); + return bundle[0]; } else - { - return *(bundle[n-1]); - } + return bundle[n-1]; } void PermutationBundle::generateUpTo(int nmax) { if (bundle.size() == 0) - bundle.push_back(new PermutationSet()); + bundle.emplace_back(); int curmax = bundle.size(); for (int n = curmax+1; n <= nmax; n++) - { - bundle.push_back(new PermutationSet(*(bundle.back()), n)); - } + bundle.emplace_back(bundle.back(), n); } diff --git a/dynare++/tl/cc/permutation.hh b/dynare++/tl/cc/permutation.hh index a8206d9b5..22d122f00 100644 --- a/dynare++/tl/cc/permutation.hh +++ b/dynare++/tl/cc/permutation.hh @@ -76,9 +76,8 @@ public: { computeSortingMap(s); }; - Permutation(const Permutation &p) - - = default; + Permutation(const Permutation &) = default; + Permutation(Permutation &&) = default; Permutation(const Permutation &p1, const Permutation &p2) : permap(p2.permap) { @@ -88,9 +87,8 @@ public: : permap(p.size(), p.permap, i) { } - Permutation & - operator=(const Permutation &p) - = default; + Permutation &operator=(const Permutation &) = default; + Permutation &operator=(Permutation &&) = default; bool operator==(const Permutation &p) { @@ -129,7 +127,7 @@ protected: element sets. The second constructor constructs a new permutation set over $n$ from all permutations over $n-1$. The parameter $n$ need not to be provided, but it serves to distinguish the constructor from copy - constructor, which is not provided. + constructor. The method |getPreserving| returns a factor subgroup of permutations, which are invariants with respect to the given sequence. This are all @@ -140,11 +138,11 @@ class PermutationSet { int order{1}; int size{1}; - const Permutation **const pers; + std::vector pers; public: PermutationSet(); PermutationSet(const PermutationSet &ps, int n); - ~PermutationSet(); + ~PermutationSet() = default; int getNum() const { @@ -153,9 +151,9 @@ public: const Permutation & get(int i) const { - return *(pers[i]); + return pers[i]; } - std::vector getPreserving(const IntSequence &s) const; + std::vector getPreserving(const IntSequence &s) const; }; /* The permutation bundle encapsulates all permutations sets up to some @@ -163,10 +161,10 @@ public: class PermutationBundle { - std::vector bundle; + std::vector bundle; public: PermutationBundle(int nmax); - ~PermutationBundle(); + ~PermutationBundle() = default; const PermutationSet&get(int n) const; void generateUpTo(int nmax); }; diff --git a/dynare++/tl/cc/ps_tensor.cc b/dynare++/tl/cc/ps_tensor.cc index bd86d5176..209d97dd8 100644 --- a/dynare++/tl/cc/ps_tensor.cc +++ b/dynare++/tl/cc/ps_tensor.cc @@ -25,7 +25,7 @@ UPSTensor::decideFillMethod(const FSSparseTensor &t) UPSTensor::UPSTensor(const FSSparseTensor &t, const IntSequence &ss, const IntSequence &coor, const PerTensorDimens &ptd) - : UTensor(along_col, ptd.getNVX(), + : UTensor(indor::along_col, ptd.getNVX(), t.nrows(), ptd.calcUnfoldMaxOffset(), ptd.dimen()), tdims(ptd) { @@ -84,7 +84,7 @@ UPSTensor::addTo(FGSTensor &out) const { IntSequence vtmp(dimen()); tdims.getPer().apply(in.getCoor(), vtmp); - index tin(this, vtmp); + index tin(*this, vtmp); out.addColumn(*this, *tin, *in); } } @@ -127,7 +127,7 @@ UPSTensor::addTo(UGSTensor &out) const // permute |outrun| IntSequence perrun(out.dimen()); tdims.getPer().apply(outrun, perrun); - index from(this, perrun); + index from(*this, perrun); // construct submatrices ConstTwoDMatrix subfrom(*this, *from, cols); TwoDMatrix subout(out, out_col, cols); @@ -215,7 +215,7 @@ UPSTensor::fillFromSparseTwo(const FSSparseTensor &t, const IntSequence &ss, } const PermutationSet &pset = tls.pbundle->get(coor.size()); - std::vector pp = pset.getPreserving(coor); + std::vector pp = pset.getPreserving(coor); Permutation unsort(coor); zeros(); @@ -231,8 +231,8 @@ UPSTensor::fillFromSparseTwo(const FSSparseTensor &t, const IntSequence &ss, for (auto & i : pp) { IntSequence cp(coor.size()); - i->apply(c, cp); - Tensor::index ind(this, cp); + i.apply(c, cp); + Tensor::index ind(*this, cp); TL_RAISE_IF(*ind < 0 || *ind >= ncols(), "Internal error in slicing constructor of UPSTensor"); get((*run).second.first, *ind) = (*run).second.second; @@ -345,7 +345,7 @@ FPSTensor::addTo(FGSTensor &out) const { IntSequence coor(dimen()); tdims.getPer().apply(tar.getCoor(), coor); - index src(this, coor); + index src(*this, coor); out.addColumn(*this, *src, *tar); } } @@ -372,7 +372,7 @@ FPSTensor::addTo(FGSTensor &out) const FPSTensor::FPSTensor(const TensorDimens &td, const Equivalence &e, const Permutation &p, const GSSparseTensor &a, const KronProdAll &kp) - : FTensor(along_col, PerTensorDimens(td, Permutation(e, p)).getNVX(), + : FTensor(indor::along_col, PerTensorDimens(td, Permutation(e, p)).getNVX(), a.nrows(), kp.ncols(), td.dimen()), tdims(td, e, p) { diff --git a/dynare++/tl/cc/ps_tensor.hh b/dynare++/tl/cc/ps_tensor.hh index 8827ce8f5..dd05cfd7d 100644 --- a/dynare++/tl/cc/ps_tensor.hh +++ b/dynare++/tl/cc/ps_tensor.hh @@ -172,28 +172,28 @@ public: |KronProdAllOptim|, which permutes the permuted equivalence classes. */ UPSTensor(const TensorDimens &td, const Equivalence &e, const ConstTwoDMatrix &a, const KronProdAll &kp) - : UTensor(along_col, PerTensorDimens(td, e).getNVX(), + : UTensor(indor::along_col, PerTensorDimens(td, e).getNVX(), a.nrows(), kp.ncols(), td.dimen()), tdims(td, e) { kp.mult(a, *this); } UPSTensor(const TensorDimens &td, const Equivalence &e, const ConstTwoDMatrix &a, const KronProdAllOptim &kp) - : UTensor(along_col, PerTensorDimens(td, Permutation(e, kp.getPer())).getNVX(), + : UTensor(indor::along_col, PerTensorDimens(td, Permutation(e, kp.getPer())).getNVX(), a.nrows(), kp.ncols(), td.dimen()), tdims(td, Permutation(e, kp.getPer())) { kp.mult(a, *this); } UPSTensor(const TensorDimens &td, const Equivalence &e, const Permutation &p, const ConstTwoDMatrix &a, const KronProdAll &kp) - : UTensor(along_col, PerTensorDimens(td, Permutation(e, p)).getNVX(), + : UTensor(indor::along_col, PerTensorDimens(td, Permutation(e, p)).getNVX(), a.nrows(), kp.ncols(), td.dimen()), tdims(td, Permutation(e, p)) { kp.mult(a, *this); } UPSTensor(const TensorDimens &td, const Equivalence &e, const Permutation &p, const ConstTwoDMatrix &a, const KronProdAllOptim &kp) - : UTensor(along_col, PerTensorDimens(td, Permutation(e, Permutation(p, kp.getPer()))).getNVX(), + : UTensor(indor::along_col, PerTensorDimens(td, Permutation(e, Permutation(p, kp.getPer()))).getNVX(), a.nrows(), kp.ncols(), td.dimen()), tdims(td, Permutation(e, Permutation(p, kp.getPer()))) { kp.mult(a, *this); @@ -334,28 +334,28 @@ public: sparse tensor). */ FPSTensor(const TensorDimens &td, const Equivalence &e, const ConstTwoDMatrix &a, const KronProdAll &kp) - : FTensor(along_col, PerTensorDimens(td, e).getNVX(), + : FTensor(indor::along_col, PerTensorDimens(td, e).getNVX(), a.nrows(), kp.ncols(), td.dimen()), tdims(td, e) { kp.mult(a, *this); } FPSTensor(const TensorDimens &td, const Equivalence &e, const ConstTwoDMatrix &a, const KronProdAllOptim &kp) - : FTensor(along_col, PerTensorDimens(td, Permutation(e, kp.getPer())).getNVX(), + : FTensor(indor::along_col, PerTensorDimens(td, Permutation(e, kp.getPer())).getNVX(), a.nrows(), kp.ncols(), td.dimen()), tdims(td, e, kp.getPer()) { kp.mult(a, *this); } FPSTensor(const TensorDimens &td, const Equivalence &e, const Permutation &p, const ConstTwoDMatrix &a, const KronProdAll &kp) - : FTensor(along_col, PerTensorDimens(td, Permutation(e, p)).getNVX(), + : FTensor(indor::along_col, PerTensorDimens(td, Permutation(e, p)).getNVX(), a.nrows(), kp.ncols(), td.dimen()), tdims(td, e, p) { kp.mult(a, *this); } FPSTensor(const TensorDimens &td, const Equivalence &e, const Permutation &p, const ConstTwoDMatrix &a, const KronProdAllOptim &kp) - : FTensor(along_col, PerTensorDimens(td, Permutation(e, Permutation(p, kp.getPer()))).getNVX(), + : FTensor(indor::along_col, PerTensorDimens(td, Permutation(e, Permutation(p, kp.getPer()))).getNVX(), a.nrows(), kp.ncols(), td.dimen()), tdims(td, e, Permutation(p, kp.getPer())) { kp.mult(a, *this); diff --git a/dynare++/tl/cc/pyramid_prod.cc b/dynare++/tl/cc/pyramid_prod.cc index 1bfcd7925..061df6fc1 100644 --- a/dynare++/tl/cc/pyramid_prod.cc +++ b/dynare++/tl/cc/pyramid_prod.cc @@ -67,7 +67,7 @@ USubTensor::addKronColumn(int i, const std::vector &ts, { IntSequence ind(pindex, lastdim, lastdim+t->dimen()); lastdim += t->dimen(); - index in(t, ind); + index in(*t, ind); tmpcols.push_back(t->getCol(*in)); } diff --git a/dynare++/tl/cc/pyramid_prod2.cc b/dynare++/tl/cc/pyramid_prod2.cc index 9252a1666..a37d9e377 100644 --- a/dynare++/tl/cc/pyramid_prod2.cc +++ b/dynare++/tl/cc/pyramid_prod2.cc @@ -79,7 +79,7 @@ IrregTensorHeader::calcMaxOffset() const multiply all columns of the header. */ IrregTensor::IrregTensor(const IrregTensorHeader &h) - : Tensor(along_row, IntSequence(h.dimen(), 0), h.end_seq, + : Tensor(indor::along_row, IntSequence(h.dimen(), 0), h.end_seq, h.calcMaxOffset(), 1, h.dimen()), header(h) { @@ -110,7 +110,7 @@ IrregTensor::addTo(FRSingleTensor &out) const { IntSequence tmp(it.getCoor()); tmp.sort(); - Tensor::index ind(&out, tmp); + Tensor::index ind(out, tmp); out.get(*ind, 0) += get(*it, 0); } } diff --git a/dynare++/tl/cc/rfs_tensor.cc b/dynare++/tl/cc/rfs_tensor.cc index 944af83c8..c0e3126eb 100644 --- a/dynare++/tl/cc/rfs_tensor.cc +++ b/dynare++/tl/cc/rfs_tensor.cc @@ -10,7 +10,7 @@ rows in the unfolded tensor |ut|, make an index of the folded tensor by sorting the coordinates, and add the row. */ FRTensor::FRTensor(const URTensor &ut) - : FTensor(along_row, IntSequence(ut.dimen(), ut.nvar()), + : FTensor(indor::along_row, IntSequence(ut.dimen(), ut.nvar()), FFSTensor::calcMaxOffset(ut.nvar(), ut.dimen()), ut.ncols(), ut.dimen()), nv(ut.nvar()) @@ -20,7 +20,7 @@ FRTensor::FRTensor(const URTensor &ut) { IntSequence vtmp(in.getCoor()); vtmp.sort(); - index tar(this, vtmp); + index tar(*this, vtmp); addRow(ut, *in, *tar); } } @@ -62,7 +62,7 @@ FRTensor::decrement(IntSequence &v) const (duplicates) zero. In this way, if the unfolded tensor is folded back, we should get the same data. */ URTensor::URTensor(const FRTensor &ft) - : UTensor(along_row, IntSequence(ft.dimen(), ft.nvar()), + : UTensor(indor::along_row, IntSequence(ft.dimen(), ft.nvar()), UFSTensor::calcMaxOffset(ft.nvar(), ft.dimen()), ft.ncols(), ft.dimen()), nv(ft.nvar()) @@ -70,7 +70,7 @@ URTensor::URTensor(const FRTensor &ft) zeros(); for (index src = ft.begin(); src != ft.end(); ++src) { - index in(this, src.getCoor()); + index in(*this, src.getCoor()); copyRow(ft, *src, *in); } } @@ -181,7 +181,7 @@ FRSingleTensor::FRSingleTensor(const URSingleTensor &ut) { IntSequence vtmp(in.getCoor()); vtmp.sort(); - index tar(this, vtmp); + index tar(*this, vtmp); get(*tar, 0) += ut.get(*in, 0); } } diff --git a/dynare++/tl/cc/rfs_tensor.hh b/dynare++/tl/cc/rfs_tensor.hh index adfd013c0..c97862919 100644 --- a/dynare++/tl/cc/rfs_tensor.hh +++ b/dynare++/tl/cc/rfs_tensor.hh @@ -48,7 +48,7 @@ class URTensor : public UTensor int nv; public: URTensor(int c, int nvar, int d) - : UTensor(along_row, IntSequence(d, nvar), + : UTensor(indor::along_row, IntSequence(d, nvar), UFSTensor::calcMaxOffset(nvar, d), c, d), nv(nvar) { } @@ -84,7 +84,7 @@ class FRTensor : public FTensor int nv; public: FRTensor(int c, int nvar, int d) - : FTensor(along_row, IntSequence(d, nvar), + : FTensor(indor::along_row, IntSequence(d, nvar), FFSTensor::calcMaxOffset(nvar, d), c, d), nv(nvar) { } diff --git a/dynare++/tl/cc/stack_container.hh b/dynare++/tl/cc/stack_container.hh index b40b6774d..a960e3d1f 100644 --- a/dynare++/tl/cc/stack_container.hh +++ b/dynare++/tl/cc/stack_container.hh @@ -247,7 +247,7 @@ public: while (i < numStacks() && getType(i, s) == _Stype::matrix) { const _Ttype *t = getMatrix(i, s); - Tensor::index ind(t, coor); + Tensor::index ind(*t, coor); Vector subres(*res, stack_offsets[i], stack_sizes[i]); subres = ConstGeneralMatrix(*t).getCol(*ind); i++; diff --git a/dynare++/tl/cc/tensor.hh b/dynare++/tl/cc/tensor.hh index 2e22f59b1..3174425fa 100644 --- a/dynare++/tl/cc/tensor.hh +++ b/dynare++/tl/cc/tensor.hh @@ -49,96 +49,7 @@ #include "int_sequence.hh" #include "twod_matrix.hh" -/* The index represents $n$-tuple $\alpha_1\ldots\alpha_n$. Since its - movement is dependent on the underlying tensor (with storage and - symmetry), we maintain a pointer to that tensor, we maintain the - $n$-tuple (or coordinates) as |IntSequence| and also we maintain the - offset number (column, or row) of the index in the tensor. The pointer - is const, since we do not need to change data through the index. - - Here we require the |tensor| to implement |increment| and |decrement| - methods, which calculate following and preceding $n$-tuple. Also, we - need to calculate offset number from the given coordinates, so the - tensor must implement method |getOffset|. This method is used only in - construction of the index from the given coordinates. As the index is - created, the offset is automatically incremented, and decremented - together with index. The|getOffset| method can be relatively - computationally complex. This must be kept in mind. Also we generally - suppose that n-tuple of all zeros is the first offset (first columns - or row). - - What follows is a definition of index class, the only - interesting point is |operator==| which decides only according to - offset, not according to the coordinates. This is useful since there - can be more than one of coordinate representations of past-the-end - index. */ - -template -class _index -{ - using _Self = _index<_Tptr>; - _Tptr tensor; - int offset; - IntSequence coor; -public: - _index(_Tptr t, int n) - : tensor(t), offset(0), coor(n, 0) - { - } - _index(_Tptr t, const IntSequence &cr, int c) - : tensor(t), offset(c), coor(cr) - { - } - _index(_Tptr t, const IntSequence &cr) - : tensor(t), offset(tensor->getOffset(cr)), coor(cr) - { - } - _index(const _index &ind) - : tensor(ind.tensor), offset(ind.offset), coor(ind.coor) - { - } - const _Self & - operator=(const _Self &in) - { - tensor = in.tensor; offset = in.offset; coor = in.coor; - return *this; - } - _Self & - operator++() - { - tensor->increment(coor); offset++; return *this; - } - _Self & - operator--() - { - tensor->decrement(coor); offset--; return *this; - } - int - operator*() const - { - return offset; - } - bool - operator==(const _index &n) const - { - return offset == n.offset; - } - bool - operator!=(const _index &n) const - { - return offset != n.offset; - } - const IntSequence & - getCoor() const - { - return coor; - } - void - print() const - { - printf("%4d: ", offset); coor.print(); - } -}; +#include /* Here is the |Tensor| class, which is nothing else than a simple subclass of |TwoDMatrix|. The unique semantically new member is |dim| which is tensor @@ -161,8 +72,95 @@ public: class Tensor : public TwoDMatrix { public: - enum indor {along_row, along_col}; - using index = _index; + enum class indor {along_row, along_col}; + + /* The index represents $n$-tuple $\alpha_1\ldots\alpha_n$. Since its + movement is dependent on the underlying tensor (with storage and + symmetry), we maintain a reference to that tensor, we maintain the + $n$-tuple (or coordinates) as |IntSequence| and also we maintain the + offset number (column, or row) of the index in the tensor. The reference + is const, since we do not need to change data through the index. + + Here we require the |tensor| to implement |increment| and |decrement| + methods, which calculate following and preceding $n$-tuple. Also, we + need to calculate offset number from the given coordinates, so the + tensor must implement method |getOffset|. This method is used only in + construction of the index from the given coordinates. As the index is + created, the offset is automatically incremented, and decremented + together with index. The |getOffset| method can be relatively + computationally complex. This must be kept in mind. Also we generally + suppose that n-tuple of all zeros is the first offset (first columns + or row). + + What follows is a definition of index class, the only + interesting point is |operator==| which decides only according to + offset, not according to the coordinates. This is useful since there + can be more than one of coordinate representations of past-the-end + index. */ + class index + { + const Tensor &tensor; + int offset; + IntSequence coor; + public: + index(const Tensor &t, int n) + : tensor(t), offset(0), coor(n, 0) + { + } + index(const Tensor &t, const IntSequence &cr, int c) + : tensor(t), offset(c), coor(cr) + { + } + index(const Tensor &t, const IntSequence &cr) + : tensor(t), offset(tensor.getOffset(cr)), coor(cr) + { + } + index(const index &) = default; + index(index &&) = default; + index &operator=(const index &) = delete; + index &operator=(index &&) = delete; + index & + operator++() + { + tensor.increment(coor); + offset++; + return *this; + } + index & + operator--() + { + tensor.decrement(coor); + offset--; + return *this; + } + int + operator*() const + { + return offset; + } + bool + operator==(const index &n) const + { + return offset == n.offset; + } + bool + operator!=(const index &n) const + { + return offset != n.offset; + } + const IntSequence & + getCoor() const + { + return coor; + } + void + print() const + { + std::cout << offset << ": "; + coor.print(); + } + }; + protected: const index in_beg; const index in_end; @@ -170,16 +168,16 @@ protected: public: Tensor(indor io, const IntSequence &last, int r, int c, int d) : TwoDMatrix(r, c), - in_beg(this, d), - in_end(this, last, (io == along_row) ? r : c), + in_beg(*this, d), + in_end(*this, last, (io == indor::along_row) ? r : c), dim(d) { } Tensor(indor io, const IntSequence &first, const IntSequence &last, int r, int c, int d) : TwoDMatrix(r, c), - in_beg(this, first, 0), - in_end(this, last, (io == along_row) ? r : c), + in_beg(*this, first, 0), + in_end(*this, last, (io == indor::along_row) ? r : c), dim(d) { } @@ -192,13 +190,17 @@ public: } Tensor(const Tensor &t) : TwoDMatrix(t), - in_beg(this, t.in_beg.getCoor(), *(t.in_beg)), - in_end(this, t.in_end.getCoor(), *(t.in_end)), + in_beg(*this, t.in_beg.getCoor(), *(t.in_beg)), + in_end(*this, t.in_end.getCoor(), *(t.in_end)), dim(t.dim) { } - ~Tensor() - override = default; + Tensor(Tensor &&) = default; + ~Tensor() override = default; + + Tensor &operator=(const Tensor &) = delete; + Tensor &operator=(Tensor &&) = delete; + virtual void increment(IntSequence &v) const = 0; virtual void decrement(IntSequence &v) const = 0; virtual int getOffset(const IntSequence &v) const = 0; @@ -245,16 +247,17 @@ public: : Tensor(io, last, r, c, d) { } - UTensor(const UTensor &ut) - - = default; + UTensor(const UTensor &) = default; + UTensor(UTensor &&) = default; UTensor(int first_row, int num, UTensor &t) : Tensor(first_row, num, t) { } - ~UTensor() - override = default; - virtual FTensor&fold() const = 0; + ~UTensor() override = default; + virtual FTensor &fold() const = 0; + + UTensor &operator=(const UTensor &) = delete; + UTensor &operator=(UTensor &&) = delete; static void increment(IntSequence &v, int nv); static void decrement(IntSequence &v, int nv); @@ -280,22 +283,24 @@ public: : Tensor(io, last, r, c, d) { } - FTensor(const FTensor &ft) - - = default; + FTensor(const FTensor &) = default; + FTensor(FTensor &&) = default; FTensor(int first_row, int num, FTensor &t) : Tensor(first_row, num, t) { } - ~FTensor() - override = default; - virtual UTensor&unfold() const = 0; + ~FTensor() override = default; + virtual UTensor &unfold() const = 0; + + FTensor &operator=(const FTensor &) = delete; + FTensor &operator=(FTensor &&) = delete; static void decrement(IntSequence &v, int nv); static int getOffset(const IntSequence &v, int nv) { - IntSequence vtmp(v); return getOffsetRecurse(vtmp, nv); + IntSequence vtmp(v); + return getOffsetRecurse(vtmp, nv); } private: static int getOffsetRecurse(IntSequence &v, int nv); diff --git a/dynare++/tl/cc/twod_matrix.cc b/dynare++/tl/cc/twod_matrix.cc index 5a4ccfb64..dfddc8b7b 100644 --- a/dynare++/tl/cc/twod_matrix.cc +++ b/dynare++/tl/cc/twod_matrix.cc @@ -3,6 +3,11 @@ #include "twod_matrix.hh" #include "tl_exception.hh" +#include +#include +#include +#include + ConstTwoDMatrix::ConstTwoDMatrix(const TwoDMatrix &m) : ConstGeneralMatrix(m) { @@ -29,23 +34,22 @@ ConstTwoDMatrix::ConstTwoDMatrix(int first_row, int num, const ConstTwoDMatrix & } void -ConstTwoDMatrix::writeMat(mat_t *fd, const char *vname) const +ConstTwoDMatrix::writeMat(mat_t *fd, const std::string &vname) const { size_t dims[2]; dims[0] = nrows(); dims[1] = ncols(); - auto *data = new double[nrows()*ncols()]; + std::vector data(nrows()*ncols()); for (int j = 0; j < ncols(); j++) for (int i = 0; i < nrows(); i++) data[j*nrows()+i] = get(i, j); - matvar_t *v = Mat_VarCreate(vname, MAT_C_DOUBLE, MAT_T_DOUBLE, 2, dims, data, 0); + matvar_t *v = Mat_VarCreate(vname.c_str(), MAT_C_DOUBLE, MAT_T_DOUBLE, 2, dims, data.data(), 0); Mat_VarWrite(fd, v, MAT_COMPRESSION_NONE); Mat_VarFree(v); - delete[] data; } TwoDMatrix & @@ -94,18 +98,19 @@ TwoDMatrix::addColumn(double d, const ConstTwoDMatrix &m, int from, int to) } void -TwoDMatrix::save(const char *fname) const +TwoDMatrix::save(const std::string &fname) const { - FILE *fd; - if (nullptr == (fd = fopen(fname, "w"))) - { - TL_RAISE("Cannot open file for writing in TwoDMatrix::save"); - } + std::ofstream fd{fname, std::ios::out | std::ios::trunc}; + if (fd.fail()) + TL_RAISE("Cannot open file for writing in TwoDMatrix::save"); + + fd << std::setprecision(std::numeric_limits::digits10 + 1); + for (int row = 0; row < nrows(); row++) { for (int col = 0; col < ncols(); col++) - fprintf(fd, " %20.10g", get(row, col)); - fprintf(fd, "\n"); + fd << ' ' << get(row, col); + fd << '\n'; } - fclose(fd); + fd.close(); } diff --git a/dynare++/tl/cc/twod_matrix.hh b/dynare++/tl/cc/twod_matrix.hh index d0d463f95..f7af7d093 100644 --- a/dynare++/tl/cc/twod_matrix.hh +++ b/dynare++/tl/cc/twod_matrix.hh @@ -17,8 +17,9 @@ #include "GeneralMatrix.hh" -#include #include + +#include #include class TwoDMatrix; @@ -46,8 +47,7 @@ public: : ConstGeneralMatrix(m, first_row, first_col, rows, cols) { } - ~ConstTwoDMatrix() - override = default; + ~ConstTwoDMatrix() override = default; ConstTwoDMatrix &operator=(const ConstTwoDMatrix &v) = delete; ConstTwoDMatrix &operator=(ConstTwoDMatrix &&v) = delete; @@ -62,7 +62,7 @@ public: { return numCols(); } - void writeMat(mat_t *fd, const char *vname) const; + void writeMat(mat_t *fd, const std::string &vname) const; }; /* Here we do the same as for |ConstTwoDMatrix| plus define @@ -192,9 +192,9 @@ public: addColumn(d, ConstTwoDMatrix(m), from, to); } - void save(const char *fname) const; + void save(const std::string &fname) const; void - writeMat(mat_t *fd, const char *vname) const + writeMat(mat_t *fd, const std::string &vname) const { ConstTwoDMatrix(*this).writeMat(fd, vname); } diff --git a/dynare++/tl/testing/tests.cc b/dynare++/tl/testing/tests.cc index 2b19ffa39..d3fa67673 100644 --- a/dynare++/tl/testing/tests.cc +++ b/dynare++/tl/testing/tests.cc @@ -181,7 +181,7 @@ TestRunnable::index_offset(const Symmetry &s, const IntSequence &nvs) for (typename _Ttype::index run = dummy.begin(); run != dummy.end(); ++run, nincr++) { - typename _Ttype::index run2(&dummy, run.getCoor()); + typename _Ttype::index run2(dummy, run.getCoor()); if (!(run == run2)) fails++; }