@q $Id: ps_tensor.cweb 148 2005-04-19 15:12:26Z kamenik $ @> @q Copyright 2004, Ondra Kamenik @> @ Start of {\tt ps\_tensor.cpp} file. @c #include "ps_tensor.h" #include "fs_tensor.h" #include "tl_exception.h" #include "tl_static.h" #include "stack_container.h" @<|UPSTensor::decideFillMethod| code@>; @<|UPSTensor| slicing constructor code@>; @<|UPSTensor| increment and decrement@>; @<|UPSTensor::fold| code@>; @<|UPSTensor::getOffset| code@>; @<|UPSTensor::addTo| folded code@>; @<|UPSTensor::addTo| unfolded code@>; @<|UPSTensor::tailIdentitySize| code@>; @<|UPSTensor::fillFromSparseOne| code@>; @<|UPSTensor::fillFromSparseTwo| code@>; @<|PerTensorDimens2::setDimensionSizes| code@>; @<|PerTensorDimens2::calcOffset| code@>; @<|PerTensorDimens2::print| code@>; @<|FPSTensor::increment| code@>; @<|FPSTensor::decrement| code@>; @<|FPSTensor::unfold| code@>; @<|FPSTensor::getOffset| code@>; @<|FPSTensor::addTo| code@>; @<|FPSTensor| sparse constructor@>; @ Here we decide, what method for filling a slice in slicing constructor to use. A few experiments suggest, that if the tensor is more than 8\% filled, the first method (|fillFromSparseOne|) is better. For fill factors less than 1\%, the second can be 3 times quicker. @<|UPSTensor::decideFillMethod| code@>= UPSTensor::fill_method UPSTensor::decideFillMethod(const FSSparseTensor& t) { if (t.getFillFactor() > 0.08) return first; else return second; } @ Here we make a slice. We decide what fill method to use and set it. @<|UPSTensor| slicing constructor code@>= UPSTensor::UPSTensor(const FSSparseTensor& t, const IntSequence& ss, const IntSequence& coor, const PerTensorDimens& ptd) : UTensor(along_col, ptd.getNVX(), t.nrows(), ptd.calcUnfoldMaxOffset(), ptd.dimen()), tdims(ptd) { TL_RAISE_IF(coor.size() != t.dimen(), "Wrong coordinates length of stacks for UPSTensor slicing constructor"); TL_RAISE_IF(ss.sum() != t.nvar(), "Wrong length of stacks for UPSTensor slicing constructor"); if (first == decideFillMethod(t)) fillFromSparseOne(t, ss, coor); else fillFromSparseTwo(t, ss, coor); } @ @<|UPSTensor| increment and decrement@>= void UPSTensor::increment(IntSequence& v) const { TL_RAISE_IF(v.size() != dimen(), "Wrong input/output vector size in UPSTensor::increment"); UTensor::increment(v, tdims.getNVX()); } void UPSTensor::decrement(IntSequence& v) const { TL_RAISE_IF(v.size() != dimen(), "Wrong input/output vector size in UPSTensor::decrement"); UTensor::decrement(v, tdims.getNVX()); } @ @<|UPSTensor::fold| code@>= FTensor& UPSTensor::fold() const { TL_RAISE("Never should come to this place in UPSTensor::fold"); FFSTensor* nothing = new FFSTensor(0,0,0); return *nothing; } @ @<|UPSTensor::getOffset| code@>= int UPSTensor::getOffset(const IntSequence& v) const { TL_RAISE_IF(v.size() != dimen(), "Wrong input vector size in UPSTensor::getOffset"); return UTensor::getOffset(v, tdims.getNVX()); } @ @<|UPSTensor::addTo| folded code@>= void UPSTensor::addTo(FGSTensor& out) const { TL_RAISE_IF(out.getDims() != tdims, "Tensors have incompatible dimens in UPSTensor::addTo"); for (index in = out.begin(); in != out.end(); ++in) { IntSequence vtmp(dimen()); tdims.getPer().apply(in.getCoor(), vtmp); index tin(this, vtmp); out.addColumn(*this, *tin, *in); } } @ In here, we have to add this permuted symmetry unfolded tensor to an unfolded not permuted tensor. One easy way would be to go through the target tensor, permute each index, and add the column. However, it may happen, that the permutation has some non-empty identity tail. In this case, we can add not only individual columns, but much bigger data chunks, which is usually more efficient. Therefore, the code is quite dirty, because we have not an iterator, which iterates over tensor at some higher levels. So we simulate it by the following code. First we set |cols| to the length of the data chunk and |off| to its dimension. Then we need a front part of |nvmax| of |out|, which is |nvmax_part|. Our iterator here is an integer sequence |outrun| with full length, and |outrun_part| its front part. The |outrun| is initialized to zeros. In each step we need to increment |outrun| |cols|-times, this is done by incrementing its prefix |outrun_part|. So we loop over all |cols|wide partitions of |out|, permute |outrun| to obtain |perrun| to obtain column of this matrix. (note that the trailing part of |perrun| is the same as of |outrun|. Then we construct submatrices, add them, and increment |outrun|. @<|UPSTensor::addTo| unfolded code@>= void UPSTensor::addTo(UGSTensor& out) const { TL_RAISE_IF(out.getDims() != tdims, "Tensors have incompatible dimens in UPSTensor::addTo"); int cols = tailIdentitySize(); int off = tdims.tailIdentity(); IntSequence outrun(out.dimen(), 0); IntSequence outrun_part(outrun, 0, out.dimen()-off); IntSequence nvmax_part(out.getDims().getNVX(), 0, out.dimen()-off); for (int out_col = 0; out_col < out.ncols(); out_col+=cols) { // permute |outrun| IntSequence perrun(out.dimen()); tdims.getPer().apply(outrun, perrun); index from(this, perrun); // construct submatrices ConstTwoDMatrix subfrom(*this, *from, cols); TwoDMatrix subout(out, out_col, cols); // add subout.add(1, subfrom); // increment |outrun| by cols UTensor::increment(outrun_part, nvmax_part); } } @ This returns a product of all items in |nvmax| which make up the trailing identity part. @<|UPSTensor::tailIdentitySize| code@>= int UPSTensor::tailIdentitySize() const { return tdims.getNVX().mult(dimen()-tdims.tailIdentity(), dimen()); } @ This fill method is pretty dumb. We go through all columns in |this| tensor, translate coordinates to sparse tensor, sort them and find an item in the sparse tensor. There are many not successful lookups for really sparse tensor, that is why the second method works better for really sparse tensors. @<|UPSTensor::fillFromSparseOne| code@>= void UPSTensor::fillFromSparseOne(const FSSparseTensor& t, const IntSequence& ss, const IntSequence& coor) { IntSequence cumtmp(ss.size()); cumtmp[0] = 0; for (int i = 1; i < ss.size(); i++) cumtmp[i] = cumtmp[i-1] + ss[i-1]; IntSequence cum(coor.size()); for (int i = 0; i < coor.size(); i++) cum[i] = cumtmp[coor[i]]; zeros(); for (Tensor::index run = begin(); run != end(); ++run) { IntSequence c(run.getCoor()); c.add(1, cum); c.sort(); FSSparseTensor::const_iterator sl = t.getMap().lower_bound(c); if (sl != t.getMap().end()) { FSSparseTensor::const_iterator su = t.getMap().upper_bound(c); for (FSSparseTensor::const_iterator srun = sl; srun != su; ++srun) get((*srun).second.first, *run) = (*srun).second.second; } } } @ This is the second way of filling the slice. For instance, let the slice correspond to partitions $abac$. In here we first calculate lower and upper bounds for index of the sparse tensor for the slice. These are |lb_srt| and |ub_srt| respectively. They corresponds to ordering $aabc$. Then we go through that interval, and select items which are really between the bounds. Then we take the index, subtract the lower bound to get it to coordinates of the slice. We get something like $(i_a,j_a,k_b,l_c)$. Then we apply the inverse permutation as of the sorting form $abac\mapsto aabc$ to get index $(i_a,k_b,j_a,l_c)$. Recall that the slice is unfolded, so we have to apply all permutations preserving the stack coordinates $abac$. In our case we get list of indices $(i_a,k_b,j_a,l_c)$ and $(j_a,k_b,i_a,l_c)$. For all we copy the item of the sparse tensor to the appropriate column. @<|UPSTensor::fillFromSparseTwo| code@>= void UPSTensor::fillFromSparseTwo(const FSSparseTensor& t, const IntSequence& ss, const IntSequence& coor) { IntSequence coor_srt(coor); coor_srt.sort(); IntSequence cum(ss.size()); cum[0] = 0; for (int i = 1; i < ss.size(); i++) cum[i] = cum[i-1] + ss[i-1]; IntSequence lb_srt(coor.size()); IntSequence ub_srt(coor.size()); for (int i = 0; i < coor.size(); i++) { lb_srt[i] = cum[coor_srt[i]]; ub_srt[i] = cum[coor_srt[i]] + ss[coor_srt[i]] - 1; } const PermutationSet& pset = tls.pbundle->get(coor.size()); vector pp = pset.getPreserving(coor); Permutation unsort(coor); zeros(); FSSparseTensor::const_iterator lbi = t.getMap().lower_bound(lb_srt); FSSparseTensor::const_iterator ubi = t.getMap().upper_bound(ub_srt); for (FSSparseTensor::const_iterator run = lbi; run != ubi; ++run) { if (lb_srt.lessEq((*run).first) && (*run).first.lessEq(ub_srt)) { IntSequence c((*run).first); c.add(-1, lb_srt); unsort.apply(c); for (unsigned int i = 0; i < pp.size(); i++) { IntSequence cp(coor.size()); pp[i]->apply(c, cp); Tensor::index ind(this, cp); TL_RAISE_IF(*ind < 0 || *ind >= ncols(), "Internal error in slicing constructor of UPSTensor"); get((*run).second.first, *ind) = (*run).second.second; } } } } @ Here we calculate the maximum offsets in each folded dimension (dimension sizes, hence |ds|). @<|PerTensorDimens2::setDimensionSizes| code@>= void PerTensorDimens2::setDimensionSizes() { const IntSequence& nvs = getNVS(); for (int i = 0; i < numSyms(); i++) { TensorDimens td(syms[i], nvs); ds[i] = td.calcFoldMaxOffset(); } } @ If there are two folded dimensions, the offset in such a dimension is offset of the second plus offset of the first times the maximum offset of the second. If there are $n+1$ dimensions, the offset is a sum of offsets of the last dimension plus the offset in the first $n$ dimensions multiplied by the maximum offset of the last dimension. This is exactly what the following code does. @<|PerTensorDimens2::calcOffset| code@>= int PerTensorDimens2::calcOffset(const IntSequence& coor) const { TL_RAISE_IF(coor.size() != dimen(), "Wrong length of coordinates in PerTensorDimens2::calcOffset"); IntSequence cc(coor); int ret = 0; int off = 0; for (int i = 0; i < numSyms(); i++) { TensorDimens td(syms[i], getNVS()); IntSequence c(cc, off, off+syms[i].dimen()); int a = td.calcFoldOffset(c); ret = ret*ds[i] + a; off += syms[i].dimen(); } return ret; } @ @<|PerTensorDimens2::print| code@>= void PerTensorDimens2::print() const { printf("nvmax: "); nvmax.print(); printf("per: "); per.print(); printf("syms: "); syms.print(); printf("dims: "); ds.print(); } @ Here we increment the given integer sequence. It corresponds to |UTensor::increment| of the whole sequence, and then partial monotonizing of the subsequences with respect to the symmetries of each dimension. @<|FPSTensor::increment| code@>= void FPSTensor::increment(IntSequence& v) const { TL_RAISE_IF(v.size() != dimen(), "Wrong length of coordinates in FPSTensor::increment"); UTensor::increment(v, tdims.getNVX()); int off = 0; for (int i = 0; i < tdims.numSyms(); i++) { IntSequence c(v, off, off+tdims.getSym(i).dimen()); c.pmonotone(tdims.getSym(i)); off += tdims.getSym(i).dimen(); } } @ @<|FPSTensor::decrement| code@>= void FPSTensor::decrement(IntSequence& v) const { TL_RAISE("FPSTensor::decrement not implemented"); } @ @<|FPSTensor::unfold| code@>= UTensor& FPSTensor::unfold() const { TL_RAISE("Unfolding of FPSTensor not implemented"); UFSTensor* nothing = new UFSTensor(0,0,0); return *nothing; } @ We only call |calcOffset| of the |PerTensorDimens2|. @<|FPSTensor::getOffset| code@>= int FPSTensor::getOffset(const IntSequence& v) const { return tdims.calcOffset(v); } @ Here we add the tensor to |out|. We go through all columns of the |out|, apply the permutation to get index in the tensor, and add the column. Note that if the permutation is identity, then the dimensions of the tensors might not be the same (since this tensor is partially folded). @<|FPSTensor::addTo| code@>= void FPSTensor::addTo(FGSTensor& out) const { for (index tar = out.begin(); tar != out.end(); ++tar) { IntSequence coor(dimen()); tdims.getPer().apply(tar.getCoor(), coor); index src(this, coor); out.addColumn(*this, *src, *tar); } } @ Here is the constructor which multiplies the Kronecker product with the general symmetry sparse tensor |GSSparseTensor|. The main idea is to go through items in the sparse tensor (each item selects rows in the matrices form the Kornecker product), then to Kronecker multiply the rows and multiply with the item, and to add the resulting row to the appropriate row of the resulting |FPSTensor|. The realization of this idea is a bit more complicated since we have to go through all items, and each item must be added as many times as it has its symmetric elements. Moreover, the permutations shuffle order of rows in their Kronecker product. So, we through all unfolded indices in a tensor with the same dimensions as the |GSSparseTensor| (sparse slice). For each such index we calculate its folded version (corresponds to ordering of subsequences within symmetries), we test if there is an item in the sparse slice with such coordinates, and if there is, we construct the Kronecker product of the rows, and go through all of items with the coordinates, and add to appropriate rows of |this| tensor. @<|FPSTensor| sparse constructor@>= FPSTensor::FPSTensor(const TensorDimens& td, const Equivalence& e, const Permutation& p, const GSSparseTensor& a, const KronProdAll& kp) : FTensor(along_col, PerTensorDimens(td, Permutation(e, p)).getNVX(), a.nrows(), kp.ncols(), td.dimen()), tdims(td, e, p) { zeros(); UGSTensor dummy(0, a.getDims()); for (Tensor::index run = dummy.begin(); run != dummy.end(); ++run) { Tensor::index fold_ind = dummy.getFirstIndexOf(run); const IntSequence& c = fold_ind.getCoor(); GSSparseTensor::const_iterator sl = a.getMap().lower_bound(c); if (sl != a.getMap().end()) { Vector* row_prod = kp.multRows(run.getCoor()); GSSparseTensor::const_iterator su = a.getMap().upper_bound(c); for (GSSparseTensor::const_iterator srun = sl; srun != su; ++srun) { Vector out_row((*srun).second.first, *this); out_row.add((*srun).second.second, *row_prod); } delete row_prod; } } } @ End of {\tt ps\_tensor.cpp} file.