Assign.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2007 Michael Olbrich <michael.olbrich@gmx.net>
5 // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
6 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
7 //
8 // Eigen is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 3 of the License, or (at your option) any later version.
12 //
13 // Alternatively, you can redistribute it and/or
14 // modify it under the terms of the GNU General Public License as
15 // published by the Free Software Foundation; either version 2 of
16 // the License, or (at your option) any later version.
17 //
18 // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
19 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
20 // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
21 // GNU General Public License for more details.
22 //
23 // You should have received a copy of the GNU Lesser General Public
24 // License and a copy of the GNU General Public License along with
25 // Eigen. If not, see <http://www.gnu.org/licenses/>.
26 
27 #ifndef EIGEN_ASSIGN_H
28 #define EIGEN_ASSIGN_H
29 
30 namespace Eigen {
31 
32 namespace internal {
33 
34 /***************************************************************************
35 * Part 1 : the logic deciding a strategy for traversal and unrolling *
36 ***************************************************************************/
37 
38 template <typename Derived, typename OtherDerived>
39 struct assign_traits
40 {
41 public:
42  enum {
43  DstIsAligned = Derived::Flags & AlignedBit,
44  DstHasDirectAccess = Derived::Flags & DirectAccessBit,
45  SrcIsAligned = OtherDerived::Flags & AlignedBit,
46  JointAlignment = bool(DstIsAligned) && bool(SrcIsAligned) ? Aligned : Unaligned
47  };
48 
49 private:
50  enum {
51  InnerSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::SizeAtCompileTime)
52  : int(Derived::Flags)&RowMajorBit ? int(Derived::ColsAtCompileTime)
53  : int(Derived::RowsAtCompileTime),
54  InnerMaxSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::MaxSizeAtCompileTime)
55  : int(Derived::Flags)&RowMajorBit ? int(Derived::MaxColsAtCompileTime)
56  : int(Derived::MaxRowsAtCompileTime),
57  MaxSizeAtCompileTime = Derived::SizeAtCompileTime,
58  PacketSize = packet_traits<typename Derived::Scalar>::size
59  };
60 
61  enum {
62  StorageOrdersAgree = (int(Derived::IsRowMajor) == int(OtherDerived::IsRowMajor)),
63  MightVectorize = StorageOrdersAgree
64  && (int(Derived::Flags) & int(OtherDerived::Flags) & ActualPacketAccessBit),
65  MayInnerVectorize = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0
66  && int(DstIsAligned) && int(SrcIsAligned),
67  MayLinearize = StorageOrdersAgree && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit),
68  MayLinearVectorize = MightVectorize && MayLinearize && DstHasDirectAccess
69  && (DstIsAligned || MaxSizeAtCompileTime == Dynamic),
70  /* If the destination isn't aligned, we have to do runtime checks and we don't unroll,
71  so it's only good for large enough sizes. */
72  MaySliceVectorize = MightVectorize && DstHasDirectAccess
73  && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=3*PacketSize)
74  /* slice vectorization can be slow, so we only want it if the slices are big, which is
75  indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block
76  in a fixed-size matrix */
77  };
78 
79 public:
80  enum {
81  Traversal = int(MayInnerVectorize) ? int(InnerVectorizedTraversal)
82  : int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
83  : int(MaySliceVectorize) ? int(SliceVectorizedTraversal)
84  : int(MayLinearize) ? int(LinearTraversal)
86  Vectorized = int(Traversal) == InnerVectorizedTraversal
87  || int(Traversal) == LinearVectorizedTraversal
88  || int(Traversal) == SliceVectorizedTraversal
89  };
90 
91 private:
92  enum {
93  UnrollingLimit = EIGEN_UNROLLING_LIMIT * (Vectorized ? int(PacketSize) : 1),
94  MayUnrollCompletely = int(Derived::SizeAtCompileTime) != Dynamic
95  && int(OtherDerived::CoeffReadCost) != Dynamic
96  && int(Derived::SizeAtCompileTime) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit),
97  MayUnrollInner = int(InnerSize) != Dynamic
98  && int(OtherDerived::CoeffReadCost) != Dynamic
99  && int(InnerSize) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit)
100  };
101 
102 public:
103  enum {
104  Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal))
105  ? (
106  int(MayUnrollCompletely) ? int(CompleteUnrolling)
107  : int(MayUnrollInner) ? int(InnerUnrolling)
108  : int(NoUnrolling)
109  )
110  : int(Traversal) == int(LinearVectorizedTraversal)
111  ? ( bool(MayUnrollCompletely) && bool(DstIsAligned) ? int(CompleteUnrolling) : int(NoUnrolling) )
112  : int(Traversal) == int(LinearTraversal)
113  ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) : int(NoUnrolling) )
114  : int(NoUnrolling)
115  };
116 
117 #ifdef EIGEN_DEBUG_ASSIGN
118  static void debug()
119  {
120  EIGEN_DEBUG_VAR(DstIsAligned)
121  EIGEN_DEBUG_VAR(SrcIsAligned)
122  EIGEN_DEBUG_VAR(JointAlignment)
123  EIGEN_DEBUG_VAR(InnerSize)
124  EIGEN_DEBUG_VAR(InnerMaxSize)
125  EIGEN_DEBUG_VAR(PacketSize)
126  EIGEN_DEBUG_VAR(StorageOrdersAgree)
127  EIGEN_DEBUG_VAR(MightVectorize)
128  EIGEN_DEBUG_VAR(MayLinearize)
129  EIGEN_DEBUG_VAR(MayInnerVectorize)
130  EIGEN_DEBUG_VAR(MayLinearVectorize)
131  EIGEN_DEBUG_VAR(MaySliceVectorize)
132  EIGEN_DEBUG_VAR(Traversal)
133  EIGEN_DEBUG_VAR(UnrollingLimit)
134  EIGEN_DEBUG_VAR(MayUnrollCompletely)
135  EIGEN_DEBUG_VAR(MayUnrollInner)
136  EIGEN_DEBUG_VAR(Unrolling)
137  }
138 #endif
139 };
140 
141 /***************************************************************************
142 * Part 2 : meta-unrollers
143 ***************************************************************************/
144 
145 /************************
146 *** Default traversal ***
147 ************************/
148 
149 template<typename Derived1, typename Derived2, int Index, int Stop>
150 struct assign_DefaultTraversal_CompleteUnrolling
151 {
152  enum {
153  outer = Index / Derived1::InnerSizeAtCompileTime,
154  inner = Index % Derived1::InnerSizeAtCompileTime
155  };
156 
157  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
158  {
159  dst.copyCoeffByOuterInner(outer, inner, src);
160  assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src);
161  }
162 };
163 
164 template<typename Derived1, typename Derived2, int Stop>
165 struct assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
166 {
167  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {}
168 };
169 
170 template<typename Derived1, typename Derived2, int Index, int Stop>
171 struct assign_DefaultTraversal_InnerUnrolling
172 {
173  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, int outer)
174  {
175  dst.copyCoeffByOuterInner(outer, Index, src);
176  assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src, outer);
177  }
178 };
179 
180 template<typename Derived1, typename Derived2, int Stop>
181 struct assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, Stop, Stop>
182 {
183  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, int) {}
184 };
185 
186 /***********************
187 *** Linear traversal ***
188 ***********************/
189 
190 template<typename Derived1, typename Derived2, int Index, int Stop>
191 struct assign_LinearTraversal_CompleteUnrolling
192 {
193  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
194  {
195  dst.copyCoeff(Index, src);
196  assign_LinearTraversal_CompleteUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src);
197  }
198 };
199 
200 template<typename Derived1, typename Derived2, int Stop>
201 struct assign_LinearTraversal_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
202 {
203  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {}
204 };
205 
206 /**************************
207 *** Inner vectorization ***
208 **************************/
209 
210 template<typename Derived1, typename Derived2, int Index, int Stop>
211 struct assign_innervec_CompleteUnrolling
212 {
213  enum {
214  outer = Index / Derived1::InnerSizeAtCompileTime,
215  inner = Index % Derived1::InnerSizeAtCompileTime,
216  JointAlignment = assign_traits<Derived1,Derived2>::JointAlignment
217  };
218 
219  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
220  {
221  dst.template copyPacketByOuterInner<Derived2, Aligned, JointAlignment>(outer, inner, src);
222  assign_innervec_CompleteUnrolling<Derived1, Derived2,
223  Index+packet_traits<typename Derived1::Scalar>::size, Stop>::run(dst, src);
224  }
225 };
226 
227 template<typename Derived1, typename Derived2, int Stop>
228 struct assign_innervec_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
229 {
230  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {}
231 };
232 
233 template<typename Derived1, typename Derived2, int Index, int Stop>
234 struct assign_innervec_InnerUnrolling
235 {
236  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, int outer)
237  {
238  dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, Index, src);
239  assign_innervec_InnerUnrolling<Derived1, Derived2,
240  Index+packet_traits<typename Derived1::Scalar>::size, Stop>::run(dst, src, outer);
241  }
242 };
243 
244 template<typename Derived1, typename Derived2, int Stop>
245 struct assign_innervec_InnerUnrolling<Derived1, Derived2, Stop, Stop>
246 {
247  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, int) {}
248 };
249 
250 /***************************************************************************
251 * Part 3 : implementation of all cases
252 ***************************************************************************/
253 
254 template<typename Derived1, typename Derived2,
255  int Traversal = assign_traits<Derived1, Derived2>::Traversal,
256  int Unrolling = assign_traits<Derived1, Derived2>::Unrolling,
257  int Version = Specialized>
258 struct assign_impl;
259 
260 /************************
261 *** Default traversal ***
262 ************************/
263 
264 template<typename Derived1, typename Derived2, int Unrolling, int Version>
265 struct assign_impl<Derived1, Derived2, InvalidTraversal, Unrolling, Version>
266 {
267  static inline void run(Derived1 &, const Derived2 &) { }
268 };
269 
270 template<typename Derived1, typename Derived2, int Version>
271 struct assign_impl<Derived1, Derived2, DefaultTraversal, NoUnrolling, Version>
272 {
273  typedef typename Derived1::Index Index;
274  static inline void run(Derived1 &dst, const Derived2 &src)
275  {
276  const Index innerSize = dst.innerSize();
277  const Index outerSize = dst.outerSize();
278  for(Index outer = 0; outer < outerSize; ++outer)
279  for(Index inner = 0; inner < innerSize; ++inner)
280  dst.copyCoeffByOuterInner(outer, inner, src);
281  }
282 };
283 
284 template<typename Derived1, typename Derived2, int Version>
285 struct assign_impl<Derived1, Derived2, DefaultTraversal, CompleteUnrolling, Version>
286 {
287  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
288  {
289  assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
290  ::run(dst, src);
291  }
292 };
293 
294 template<typename Derived1, typename Derived2, int Version>
295 struct assign_impl<Derived1, Derived2, DefaultTraversal, InnerUnrolling, Version>
296 {
297  typedef typename Derived1::Index Index;
298  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
299  {
300  const Index outerSize = dst.outerSize();
301  for(Index outer = 0; outer < outerSize; ++outer)
302  assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, 0, Derived1::InnerSizeAtCompileTime>
303  ::run(dst, src, outer);
304  }
305 };
306 
307 /***********************
308 *** Linear traversal ***
309 ***********************/
310 
311 template<typename Derived1, typename Derived2, int Version>
312 struct assign_impl<Derived1, Derived2, LinearTraversal, NoUnrolling, Version>
313 {
314  typedef typename Derived1::Index Index;
315  static inline void run(Derived1 &dst, const Derived2 &src)
316  {
317  const Index size = dst.size();
318  for(Index i = 0; i < size; ++i)
319  dst.copyCoeff(i, src);
320  }
321 };
322 
323 template<typename Derived1, typename Derived2, int Version>
324 struct assign_impl<Derived1, Derived2, LinearTraversal, CompleteUnrolling, Version>
325 {
326  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
327  {
328  assign_LinearTraversal_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
329  ::run(dst, src);
330  }
331 };
332 
333 /**************************
334 *** Inner vectorization ***
335 **************************/
336 
337 template<typename Derived1, typename Derived2, int Version>
338 struct assign_impl<Derived1, Derived2, InnerVectorizedTraversal, NoUnrolling, Version>
339 {
340  typedef typename Derived1::Index Index;
341  static inline void run(Derived1 &dst, const Derived2 &src)
342  {
343  const Index innerSize = dst.innerSize();
344  const Index outerSize = dst.outerSize();
345  const Index packetSize = packet_traits<typename Derived1::Scalar>::size;
346  for(Index outer = 0; outer < outerSize; ++outer)
347  for(Index inner = 0; inner < innerSize; inner+=packetSize)
348  dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, inner, src);
349  }
350 };
351 
352 template<typename Derived1, typename Derived2, int Version>
353 struct assign_impl<Derived1, Derived2, InnerVectorizedTraversal, CompleteUnrolling, Version>
354 {
355  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
356  {
357  assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
358  ::run(dst, src);
359  }
360 };
361 
362 template<typename Derived1, typename Derived2, int Version>
363 struct assign_impl<Derived1, Derived2, InnerVectorizedTraversal, InnerUnrolling, Version>
364 {
365  typedef typename Derived1::Index Index;
366  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
367  {
368  const Index outerSize = dst.outerSize();
369  for(Index outer = 0; outer < outerSize; ++outer)
370  assign_innervec_InnerUnrolling<Derived1, Derived2, 0, Derived1::InnerSizeAtCompileTime>
371  ::run(dst, src, outer);
372  }
373 };
374 
375 /***************************
376 *** Linear vectorization ***
377 ***************************/
378 
379 template <bool IsAligned = false>
380 struct unaligned_assign_impl
381 {
382  template <typename Derived, typename OtherDerived>
383  static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, typename Derived::Index, typename Derived::Index) {}
384 };
385 
386 template <>
387 struct unaligned_assign_impl<false>
388 {
389  // MSVC must not inline this functions. If it does, it fails to optimize the
390  // packet access path.
391 #ifdef _MSC_VER
392  template <typename Derived, typename OtherDerived>
393  static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end)
394 #else
395  template <typename Derived, typename OtherDerived>
396  static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end)
397 #endif
398  {
399  for (typename Derived::Index index = start; index < end; ++index)
400  dst.copyCoeff(index, src);
401  }
402 };
403 
404 template<typename Derived1, typename Derived2, int Version>
405 struct assign_impl<Derived1, Derived2, LinearVectorizedTraversal, NoUnrolling, Version>
406 {
407  typedef typename Derived1::Index Index;
408  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
409  {
410  const Index size = dst.size();
411  typedef packet_traits<typename Derived1::Scalar> PacketTraits;
412  enum {
413  packetSize = PacketTraits::size,
414  dstAlignment = PacketTraits::AlignedOnScalar ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
415  srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
416  };
417  const Index alignedStart = assign_traits<Derived1,Derived2>::DstIsAligned ? 0
418  : internal::first_aligned(&dst.coeffRef(0), size);
419  const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
420 
421  unaligned_assign_impl<assign_traits<Derived1,Derived2>::DstIsAligned!=0>::run(src,dst,0,alignedStart);
422 
423  for(Index index = alignedStart; index < alignedEnd; index += packetSize)
424  {
425  dst.template copyPacket<Derived2, dstAlignment, srcAlignment>(index, src);
426  }
427 
428  unaligned_assign_impl<>::run(src,dst,alignedEnd,size);
429  }
430 };
431 
432 template<typename Derived1, typename Derived2, int Version>
433 struct assign_impl<Derived1, Derived2, LinearVectorizedTraversal, CompleteUnrolling, Version>
434 {
435  typedef typename Derived1::Index Index;
436  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
437  {
438  enum { size = Derived1::SizeAtCompileTime,
439  packetSize = packet_traits<typename Derived1::Scalar>::size,
440  alignedSize = (size/packetSize)*packetSize };
441 
442  assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, alignedSize>::run(dst, src);
443  assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, alignedSize, size>::run(dst, src);
444  }
445 };
446 
447 /**************************
448 *** Slice vectorization ***
449 ***************************/
450 
451 template<typename Derived1, typename Derived2, int Version>
452 struct assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling, Version>
453 {
454  typedef typename Derived1::Index Index;
455  static inline void run(Derived1 &dst, const Derived2 &src)
456  {
457  typedef packet_traits<typename Derived1::Scalar> PacketTraits;
458  enum {
459  packetSize = PacketTraits::size,
460  alignable = PacketTraits::AlignedOnScalar,
461  dstAlignment = alignable ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
462  srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
463  };
464  const Index packetAlignedMask = packetSize - 1;
465  const Index innerSize = dst.innerSize();
466  const Index outerSize = dst.outerSize();
467  const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
468  Index alignedStart = ((!alignable) || assign_traits<Derived1,Derived2>::DstIsAligned) ? 0
469  : internal::first_aligned(&dst.coeffRef(0,0), innerSize);
470 
471  for(Index outer = 0; outer < outerSize; ++outer)
472  {
473  const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask);
474  // do the non-vectorizable part of the assignment
475  for(Index inner = 0; inner<alignedStart ; ++inner)
476  dst.copyCoeffByOuterInner(outer, inner, src);
477 
478  // do the vectorizable part of the assignment
479  for(Index inner = alignedStart; inner<alignedEnd; inner+=packetSize)
480  dst.template copyPacketByOuterInner<Derived2, dstAlignment, Unaligned>(outer, inner, src);
481 
482  // do the non-vectorizable part of the assignment
483  for(Index inner = alignedEnd; inner<innerSize ; ++inner)
484  dst.copyCoeffByOuterInner(outer, inner, src);
485 
486  alignedStart = std::min<Index>((alignedStart+alignedStep)%packetSize, innerSize);
487  }
488  }
489 };
490 
491 } // end namespace internal
492 
493 /***************************************************************************
494 * Part 4 : implementation of DenseBase methods
495 ***************************************************************************/
496 
497 template<typename Derived>
498 template<typename OtherDerived>
499 EIGEN_STRONG_INLINE Derived& DenseBase<Derived>
500  ::lazyAssign(const DenseBase<OtherDerived>& other)
501 {
502  enum{
503  SameType = internal::is_same<typename Derived::Scalar,typename OtherDerived::Scalar>::value
504  };
505 
507  EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived)
508  EIGEN_STATIC_ASSERT(SameType,YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
509 
510 #ifdef EIGEN_DEBUG_ASSIGN
511  internal::assign_traits<Derived, OtherDerived>::debug();
512 #endif
513  eigen_assert(rows() == other.rows() && cols() == other.cols());
514  internal::assign_impl<Derived, OtherDerived, int(SameType) ? int(internal::assign_traits<Derived, OtherDerived>::Traversal)
515  : int(InvalidTraversal)>::run(derived(),other.derived());
516 #ifndef EIGEN_NO_DEBUG
517  checkTransposeAliasing(other.derived());
518 #endif
519  return derived();
520 }
521 
522 namespace internal {
523 
524 template<typename Derived, typename OtherDerived,
525  bool EvalBeforeAssigning = (int(OtherDerived::Flags) & EvalBeforeAssigningBit) != 0,
526  bool NeedToTranspose = Derived::IsVectorAtCompileTime
527  && OtherDerived::IsVectorAtCompileTime
528  && ((int(Derived::RowsAtCompileTime) == 1 && int(OtherDerived::ColsAtCompileTime) == 1)
529  | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
530  // revert to || as soon as not needed anymore.
531  (int(Derived::ColsAtCompileTime) == 1 && int(OtherDerived::RowsAtCompileTime) == 1))
532  && int(Derived::SizeAtCompileTime) != 1>
533 struct assign_selector;
534 
535 template<typename Derived, typename OtherDerived>
536 struct assign_selector<Derived,OtherDerived,false,false> {
537  static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); }
538 };
539 template<typename Derived, typename OtherDerived>
540 struct assign_selector<Derived,OtherDerived,true,false> {
541  static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.eval()); }
542 };
543 template<typename Derived, typename OtherDerived>
544 struct assign_selector<Derived,OtherDerived,false,true> {
545  static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); }
546 };
547 template<typename Derived, typename OtherDerived>
548 struct assign_selector<Derived,OtherDerived,true,true> {
549  static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose().eval()); }
550 };
551 
552 } // end namespace internal
553 
554 template<typename Derived>
555 template<typename OtherDerived>
557 {
558  return internal::assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
559 }
560 
561 template<typename Derived>
563 {
564  return internal::assign_selector<Derived,Derived>::run(derived(), other.derived());
565 }
566 
567 template<typename Derived>
569 {
570  return internal::assign_selector<Derived,Derived>::run(derived(), other.derived());
571 }
572 
573 template<typename Derived>
574 template <typename OtherDerived>
576 {
577  return internal::assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
578 }
579 
580 template<typename Derived>
581 template <typename OtherDerived>
583 {
584  other.derived().evalTo(derived());
585  return derived();
586 }
587 
588 template<typename Derived>
589 template<typename OtherDerived>
591 {
592  other.evalTo(derived());
593  return derived();
594 }
595 
596 } // end namespace Eigen
597 
598 #endif // EIGEN_ASSIGN_H