TriangularMatrixMatrix.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // Eigen is free software; you can redistribute it and/or
7 // modify it under the terms of the GNU Lesser General Public
8 // License as published by the Free Software Foundation; either
9 // version 3 of the License, or (at your option) any later version.
10 //
11 // Alternatively, you can redistribute it and/or
12 // modify it under the terms of the GNU General Public License as
13 // published by the Free Software Foundation; either version 2 of
14 // the License, or (at your option) any later version.
15 //
16 // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
17 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
19 // GNU General Public License for more details.
20 //
21 // You should have received a copy of the GNU Lesser General Public
22 // License and a copy of the GNU General Public License along with
23 // Eigen. If not, see <http://www.gnu.org/licenses/>.
24 
25 #ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_H
26 #define EIGEN_TRIANGULAR_MATRIX_MATRIX_H
27 
28 namespace Eigen {
29 
30 namespace internal {
31 
32 // template<typename Scalar, int mr, int StorageOrder, bool Conjugate, int Mode>
33 // struct gemm_pack_lhs_triangular
34 // {
35 // Matrix<Scalar,mr,mr,
36 // void operator()(Scalar* blockA, const EIGEN_RESTRICT Scalar* _lhs, int lhsStride, int depth, int rows)
37 // {
38 // conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
39 // const_blas_data_mapper<Scalar, StorageOrder> lhs(_lhs,lhsStride);
40 // int count = 0;
41 // const int peeled_mc = (rows/mr)*mr;
42 // for(int i=0; i<peeled_mc; i+=mr)
43 // {
44 // for(int k=0; k<depth; k++)
45 // for(int w=0; w<mr; w++)
46 // blockA[count++] = cj(lhs(i+w, k));
47 // }
48 // for(int i=peeled_mc; i<rows; i++)
49 // {
50 // for(int k=0; k<depth; k++)
51 // blockA[count++] = cj(lhs(i, k));
52 // }
53 // }
54 // };
55 
56 /* Optimized triangular matrix * matrix (_TRMM++) product built on top of
57  * the general matrix matrix product.
58  */
59 template <typename Scalar, typename Index,
60  int Mode, bool LhsIsTriangular,
61  int LhsStorageOrder, bool ConjugateLhs,
62  int RhsStorageOrder, bool ConjugateRhs,
63  int ResStorageOrder, int Version = Specialized>
65 
66 template <typename Scalar, typename Index,
67  int Mode, bool LhsIsTriangular,
68  int LhsStorageOrder, bool ConjugateLhs,
69  int RhsStorageOrder, bool ConjugateRhs, int Version>
70 struct product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
71  LhsStorageOrder,ConjugateLhs,
72  RhsStorageOrder,ConjugateRhs,RowMajor,Version>
73 {
74  static EIGEN_STRONG_INLINE void run(
75  Index rows, Index cols, Index depth,
76  const Scalar* lhs, Index lhsStride,
77  const Scalar* rhs, Index rhsStride,
78  Scalar* res, Index resStride,
79  Scalar alpha)
80  {
82  (Mode&(UnitDiag|ZeroDiag)) | ((Mode&Upper) ? Lower : Upper),
83  (!LhsIsTriangular),
84  RhsStorageOrder==RowMajor ? ColMajor : RowMajor,
85  ConjugateRhs,
86  LhsStorageOrder==RowMajor ? ColMajor : RowMajor,
87  ConjugateLhs,
88  ColMajor>
89  ::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha);
90  }
91 };
92 
93 // implements col-major += alpha * op(triangular) * op(general)
94 template <typename Scalar, typename Index, int Mode,
95  int LhsStorageOrder, bool ConjugateLhs,
96  int RhsStorageOrder, bool ConjugateRhs, int Version>
97 struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
98  LhsStorageOrder,ConjugateLhs,
99  RhsStorageOrder,ConjugateRhs,ColMajor,Version>
100 {
101 
102  typedef gebp_traits<Scalar,Scalar> Traits;
103  enum {
104  SmallPanelWidth = 2 * EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
105  IsLower = (Mode&Lower) == Lower,
106  SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
107  };
108 
109  static EIGEN_DONT_INLINE void run(
110  Index _rows, Index _cols, Index _depth,
111  const Scalar* _lhs, Index lhsStride,
112  const Scalar* _rhs, Index rhsStride,
113  Scalar* res, Index resStride,
114  Scalar alpha)
115  {
116  // strip zeros
117  Index diagSize = (std::min)(_rows,_depth);
118  Index rows = IsLower ? _rows : diagSize;
119  Index depth = IsLower ? diagSize : _depth;
120  Index cols = _cols;
121 
122  const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
123  const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
124 
125  Index kc = depth; // cache block size along the K direction
126  Index mc = rows; // cache block size along the M direction
127  Index nc = cols; // cache block size along the N direction
128  computeProductBlockingSizes<Scalar,Scalar,4>(kc, mc, nc);
129  std::size_t sizeW = kc*Traits::WorkSpaceFactor;
130  std::size_t sizeB = sizeW + kc*cols;
131  ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
132  ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
133  Scalar* blockB = allocatedBlockB + sizeW;
134 
135  Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,LhsStorageOrder> triangularBuffer;
136  triangularBuffer.setZero();
137  if((Mode&ZeroDiag)==ZeroDiag)
138  triangularBuffer.diagonal().setZero();
139  else
140  triangularBuffer.diagonal().setOnes();
141 
142  gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
143  gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
144  gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
145 
146  for(Index k2=IsLower ? depth : 0;
147  IsLower ? k2>0 : k2<depth;
148  IsLower ? k2-=kc : k2+=kc)
149  {
150  Index actual_kc = (std::min)(IsLower ? k2 : depth-k2, kc);
151  Index actual_k2 = IsLower ? k2-actual_kc : k2;
152 
153  // align blocks with the end of the triangular part for trapezoidal lhs
154  if((!IsLower)&&(k2<rows)&&(k2+actual_kc>rows))
155  {
156  actual_kc = rows-k2;
157  k2 = k2+actual_kc-kc;
158  }
159 
160  pack_rhs(blockB, &rhs(actual_k2,0), rhsStride, actual_kc, cols);
161 
162  // the selected lhs's panel has to be split in three different parts:
163  // 1 - the part which is zero => skip it
164  // 2 - the diagonal block => special kernel
165  // 3 - the dense panel below (lower case) or above (upper case) the diagonal block => GEPP
166 
167  // the block diagonal, if any:
168  if(IsLower || actual_k2<rows)
169  {
170  // for each small vertical panels of lhs
171  for (Index k1=0; k1<actual_kc; k1+=SmallPanelWidth)
172  {
173  Index actualPanelWidth = std::min<Index>(actual_kc-k1, SmallPanelWidth);
174  Index lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1;
175  Index startBlock = actual_k2+k1;
176  Index blockBOffset = k1;
177 
178  // => GEBP with the micro triangular block
179  // The trick is to pack this micro block while filling the opposite triangular part with zeros.
180  // To this end we do an extra triangular copy to a small temporary buffer
181  for (Index k=0;k<actualPanelWidth;++k)
182  {
183  if (SetDiag)
184  triangularBuffer.coeffRef(k,k) = lhs(startBlock+k,startBlock+k);
185  for (Index i=IsLower ? k+1 : 0; IsLower ? i<actualPanelWidth : i<k; ++i)
186  triangularBuffer.coeffRef(i,k) = lhs(startBlock+i,startBlock+k);
187  }
188  pack_lhs(blockA, triangularBuffer.data(), triangularBuffer.outerStride(), actualPanelWidth, actualPanelWidth);
189 
190  gebp_kernel(res+startBlock, resStride, blockA, blockB, actualPanelWidth, actualPanelWidth, cols, alpha,
191  actualPanelWidth, actual_kc, 0, blockBOffset);
192 
193  // GEBP with remaining micro panel
194  if (lengthTarget>0)
195  {
196  Index startTarget = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2;
197 
198  pack_lhs(blockA, &lhs(startTarget,startBlock), lhsStride, actualPanelWidth, lengthTarget);
199 
200  gebp_kernel(res+startTarget, resStride, blockA, blockB, lengthTarget, actualPanelWidth, cols, alpha,
201  actualPanelWidth, actual_kc, 0, blockBOffset);
202  }
203  }
204  }
205  // the part below (lower case) or above (upper case) the diagonal => GEPP
206  {
207  Index start = IsLower ? k2 : 0;
208  Index end = IsLower ? rows : (std::min)(actual_k2,rows);
209  for(Index i2=start; i2<end; i2+=mc)
210  {
211  const Index actual_mc = (std::min)(i2+mc,end)-i2;
212  gemm_pack_lhs<Scalar, Index, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()
213  (blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
214 
215  gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
216  }
217  }
218  }
219  }
220 };
221 
222 // implements col-major += alpha * op(general) * op(triangular)
223 template <typename Scalar, typename Index, int Mode,
224  int LhsStorageOrder, bool ConjugateLhs,
225  int RhsStorageOrder, bool ConjugateRhs, int Version>
226 struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
227  LhsStorageOrder,ConjugateLhs,
228  RhsStorageOrder,ConjugateRhs,ColMajor,Version>
229 {
230  typedef gebp_traits<Scalar,Scalar> Traits;
231  enum {
232  SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
233  IsLower = (Mode&Lower) == Lower,
234  SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
235  };
236 
237  static EIGEN_DONT_INLINE void run(
238  Index _rows, Index _cols, Index _depth,
239  const Scalar* _lhs, Index lhsStride,
240  const Scalar* _rhs, Index rhsStride,
241  Scalar* res, Index resStride,
242  Scalar alpha)
243  {
244  // strip zeros
245  Index diagSize = (std::min)(_cols,_depth);
246  Index rows = _rows;
247  Index depth = IsLower ? _depth : diagSize;
248  Index cols = IsLower ? diagSize : _cols;
249 
250  const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
251  const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
252 
253  Index kc = depth; // cache block size along the K direction
254  Index mc = rows; // cache block size along the M direction
255  Index nc = cols; // cache block size along the N direction
256  computeProductBlockingSizes<Scalar,Scalar,4>(kc, mc, nc);
257 
258  std::size_t sizeW = kc*Traits::WorkSpaceFactor;
259  std::size_t sizeB = sizeW + kc*cols;
260  ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
261  ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
262  Scalar* blockB = allocatedBlockB + sizeW;
263 
264  Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,RhsStorageOrder> triangularBuffer;
265  triangularBuffer.setZero();
266  if((Mode&ZeroDiag)==ZeroDiag)
267  triangularBuffer.diagonal().setZero();
268  else
269  triangularBuffer.diagonal().setOnes();
270 
271  gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
272  gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
273  gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
274  gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder,false,true> pack_rhs_panel;
275 
276  for(Index k2=IsLower ? 0 : depth;
277  IsLower ? k2<depth : k2>0;
278  IsLower ? k2+=kc : k2-=kc)
279  {
280  Index actual_kc = (std::min)(IsLower ? depth-k2 : k2, kc);
281  Index actual_k2 = IsLower ? k2 : k2-actual_kc;
282 
283  // align blocks with the end of the triangular part for trapezoidal rhs
284  if(IsLower && (k2<cols) && (actual_k2+actual_kc>cols))
285  {
286  actual_kc = cols-k2;
287  k2 = actual_k2 + actual_kc - kc;
288  }
289 
290  // remaining size
291  Index rs = IsLower ? (std::min)(cols,actual_k2) : cols - k2;
292  // size of the triangular part
293  Index ts = (IsLower && actual_k2>=cols) ? 0 : actual_kc;
294 
295  Scalar* geb = blockB+ts*ts;
296 
297  pack_rhs(geb, &rhs(actual_k2,IsLower ? 0 : k2), rhsStride, actual_kc, rs);
298 
299  // pack the triangular part of the rhs padding the unrolled blocks with zeros
300  if(ts>0)
301  {
302  for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
303  {
304  Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
305  Index actual_j2 = actual_k2 + j2;
306  Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
307  Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
308  // general part
309  pack_rhs_panel(blockB+j2*actual_kc,
310  &rhs(actual_k2+panelOffset, actual_j2), rhsStride,
311  panelLength, actualPanelWidth,
312  actual_kc, panelOffset);
313 
314  // append the triangular part via a temporary buffer
315  for (Index j=0;j<actualPanelWidth;++j)
316  {
317  if (SetDiag)
318  triangularBuffer.coeffRef(j,j) = rhs(actual_j2+j,actual_j2+j);
319  for (Index k=IsLower ? j+1 : 0; IsLower ? k<actualPanelWidth : k<j; ++k)
320  triangularBuffer.coeffRef(k,j) = rhs(actual_j2+k,actual_j2+j);
321  }
322 
323  pack_rhs_panel(blockB+j2*actual_kc,
324  triangularBuffer.data(), triangularBuffer.outerStride(),
325  actualPanelWidth, actualPanelWidth,
326  actual_kc, j2);
327  }
328  }
329 
330  for (Index i2=0; i2<rows; i2+=mc)
331  {
332  const Index actual_mc = (std::min)(mc,rows-i2);
333  pack_lhs(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
334 
335  // triangular kernel
336  if(ts>0)
337  {
338  for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
339  {
340  Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
341  Index panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth;
342  Index blockOffset = IsLower ? j2 : 0;
343 
344  gebp_kernel(res+i2+(actual_k2+j2)*resStride, resStride,
345  blockA, blockB+j2*actual_kc,
346  actual_mc, panelLength, actualPanelWidth,
347  alpha,
348  actual_kc, actual_kc, // strides
349  blockOffset, blockOffset,// offsets
350  allocatedBlockB); // workspace
351  }
352  }
353  gebp_kernel(res+i2+(IsLower ? 0 : k2)*resStride, resStride,
354  blockA, geb, actual_mc, actual_kc, rs,
355  alpha,
356  -1, -1, 0, 0, allocatedBlockB);
357  }
358  }
359  }
360 };
361 
362 /***************************************************************************
363 * Wrapper to product_triangular_matrix_matrix
364 ***************************************************************************/
365 
366 template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
367 struct traits<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false> >
368  : traits<ProductBase<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>, Lhs, Rhs> >
369 {};
370 
371 } // end namespace internal
372 
373 template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
374 struct TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
375  : public ProductBase<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>, Lhs, Rhs >
376 {
377  EIGEN_PRODUCT_PUBLIC_INTERFACE(TriangularProduct)
378 
379  TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
380 
381  template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
382  {
383  typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
384  typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
385 
386  Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
387  * RhsBlasTraits::extractScalarFactor(m_rhs);
388 
389  internal::product_triangular_matrix_matrix<Scalar, Index,
390  Mode, LhsIsTriangular,
391  (internal::traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
392  (internal::traits<_ActualRhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
393  (internal::traits<Dest >::Flags&RowMajorBit) ? RowMajor : ColMajor>
394  ::run(
395  lhs.rows(), rhs.cols(), lhs.cols(),// LhsIsTriangular ? rhs.cols() : lhs.rows(), // sizes
396  &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
397  &rhs.coeffRef(0,0), rhs.outerStride(), // rhs info
398  &dst.coeffRef(0,0), dst.outerStride(), // result info
399  actualAlpha // alpha
400  );
401  }
402 };
403 
404 } // end namespace Eigen
405 
406 #endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_H