My Project  UNKNOWN_GIT_VERSION
svd_si.h
Go to the documentation of this file.
1 /* svd_si.h automatically generated by makeheader from svd.template */
2 
3 #define assert(A)
4 /* stuff included from libs/ap.h */
5 
6 /********************************************************************
7 AP library.
8 See www.alglib.net or alglib.sources.ru for details.
9 ********************************************************************/
10 
11 #ifndef AP_H
12 #define AP_H
13 
14 #include <stdlib.h>
15 #include <math.h>
16 #include "resources/feFopen.h"
17 #include "kernel/mod2.h"
18 
19 #ifdef HAVE_SVD
20 /********************************************************************
21 Checking of the array boundaries mode.
22 ********************************************************************/
23 //#define NO_AP_ASSERT
24 #define AP_ASSERT
25 
26 #ifndef AP_ASSERT
27 #define NO_AP_ASSERT
28 #endif
29 
30 #ifdef NO_AP_ASSERT
31 #ifdef AP_ASSERT
32 #undef NO_AP_ASSERT
33 #endif
34 #endif
35 
36 
37 /********************************************************************
38 This symbol is used for debugging. Do not define it and do not remove
39 comments.
40 ********************************************************************/
41 //#define UNSAFE_MEM_COPY
42 
43 
44 /********************************************************************
45 Namespace of a standard library AlgoPascal.
46 ********************************************************************/
47 namespace ap
48 {
49 
50 
51 /********************************************************************
52 Exception class.
53 ********************************************************************/
54 class ap_error
55 {
56 public:
57  static void make_assertion(bool bClause)
58  { if(!bClause) /*throw ap_error();*/ ::WerrorS("ap_error"); };
59 private:
60 };
61 
62 /********************************************************************
63 Class defining a complex number with double precision.
64 ********************************************************************/
65 class complex;
66 
67 class complex
68 {
69 public:
70  complex():x(0.0),y(0.0){};
71  complex(const double &_x):x(_x),y(0.0){};
72  complex(const double &_x, const double &_y):x(_x),y(_y){};
73  complex(const complex &z):x(z.x),y(z.y){};
74 
75  complex& operator= (const double& v){ x = v; y = 0.0; return *this; };
76  complex& operator+=(const double& v){ x += v; return *this; };
77  complex& operator-=(const double& v){ x -= v; return *this; };
78  complex& operator*=(const double& v){ x *= v; y *= v; return *this; };
79  complex& operator/=(const double& v){ x /= v; y /= v; return *this; };
80 
81  complex& operator= (const complex& z){ x = z.x; y = z.y; return *this; };
82  complex& operator+=(const complex& z){ x += z.x; y += z.y; return *this; };
83  complex& operator-=(const complex& z){ x -= z.x; y -= z.y; return *this; };
84  complex& operator*=(const complex& z){ double t = x*z.x-y*z.y; y = x*z.y+y*z.x; x = t; return *this; };
86  {
88  double e;
89  double f;
90  if( fabs(z.y)<fabs(z.x) )
91  {
92  e = z.y/z.x;
93  f = z.x+z.y*e;
94  result.x = (z.x+z.y*e)/f;
95  result.y = (z.y-z.x*e)/f;
96  }
97  else
98  {
99  e = z.x/z.y;
100  f = z.y+z.x*e;
101  result.x = (z.y+z.x*e)/f;
102  result.y = (-z.x+z.y*e)/f;
103  }
104  *this = result;
105  return *this;
106  };
107 
108  double x, y;
109 };
110 
111 const complex operator/(const complex& lhs, const complex& rhs);
112 bool operator==(const complex& lhs, const complex& rhs);
113 bool operator!=(const complex& lhs, const complex& rhs);
114 const complex operator+(const complex& lhs);
115 const complex operator-(const complex& lhs);
116 const complex operator+(const complex& lhs, const complex& rhs);
117 const complex operator+(const complex& lhs, const double& rhs);
118 const complex operator+(const double& lhs, const complex& rhs);
119 const complex operator-(const complex& lhs, const complex& rhs);
120 const complex operator-(const complex& lhs, const double& rhs);
121 const complex operator-(const double& lhs, const complex& rhs);
122 const complex operator*(const complex& lhs, const complex& rhs);
123 const complex operator*(const complex& lhs, const double& rhs);
124 const complex operator*(const double& lhs, const complex& rhs);
125 const complex operator/(const complex& lhs, const complex& rhs);
126 const complex operator/(const double& lhs, const complex& rhs);
127 const complex operator/(const complex& lhs, const double& rhs);
128 double abscomplex(const complex &z);
129 const complex conj(const complex &z);
130 const complex csqr(const complex &z);
131 
132 
133 /********************************************************************
134 Template defining vector in memory. It is used by the basic
135 subroutines of linear algebra.
136 
137 Vector consists of Length elements of type T, starting from an element,
138 which Data is pointed to. Interval between adjacent elements equals
139 the value of Step.
140 
141 The class provides an access for reading only.
142 ********************************************************************/
143 template<class T>
144 class const_raw_vector
145 {
146 public:
147  const_raw_vector(const T *Data, int Length, int Step):
148  pData(const_cast<T*>(Data)),iLength(Length),iStep(Step){};
149 
150  const T* GetData() const
151  { return pData; };
152 
153  int GetLength() const
154  { return iLength; };
155 
156  int GetStep() const
157  { return iStep; };
158 protected:
159  T *pData;
160  int iLength, iStep;
161 };
162 
163 
164 /********************************************************************
165 Template defining vector in memory, derived from const_raw_vector.
166 It is used by the basic subroutines of linear algebra.
167 
168 Vector consists of Length elements of type T, starting from an element,
169 which Data is pointed to. Interval between adjacent elements equals
170 the value of Step.
171 
172 The class provides an access both for reading and writing.
173 ********************************************************************/
174 template<class T>
175 class raw_vector : public const_raw_vector<T>
176 {
177 public:
178  raw_vector(T *Data, int Length, int Step):const_raw_vector<T>(Data, Length, Step){};
179 
181  { return const_raw_vector<T>::pData; };
182 };
183 
184 
185 /********************************************************************
186 Scalar product
187 ********************************************************************/
188 template<class T>
189 T vdotproduct(const_raw_vector<T> v1, const_raw_vector<T> v2)
190 {
191  ap_error::make_assertion(v1.GetLength()==v2.GetLength());
192  if( v1.GetStep()==1 && v2.GetStep()==1 )
193  {
194  //
195  // fast
196  //
197  T r = 0;
198  const T *p1 = v1.GetData();
199  const T *p2 = v2.GetData();
200  int imax = v1.GetLength()/4;
201  int i;
202  for(i=imax; i!=0; i--)
203  {
204  r += (*p1)*(*p2) + p1[1]*p2[1] + p1[2]*p2[2] + p1[3]*p2[3];
205  p1+=4;
206  p2+=4;
207  }
208  for(i=0; i<v1.GetLength()%4; i++)
209  r += (*(p1++))*(*(p2++));
210  return r;
211  }
212  else
213  {
214  //
215  // general
216  //
217  int offset11 = v1.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
218  int offset21 = v2.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
219  T r = 0;
220  const T *p1 = v1.GetData();
221  const T *p2 = v2.GetData();
222  int imax = v1.GetLength()/4;
223  int i;
224  for(i=0; i<imax; i++)
225  {
226  r += (*p1)*(*p2) + p1[offset11]*p2[offset21] + p1[offset12]*p2[offset22] + p1[offset13]*p2[offset23];
227  p1+=offset14;
228  p2+=offset24;
229  }
230  for(i=0; i<v1.GetLength()%4; i++)
231  {
232  r += (*p1)*(*p2);
233  p1+=offset11;
234  p2+=offset21;
235  }
236  return r;
237  }
238 }
239 
240 
241 /********************************************************************
242 Copy one vector into another
243 ********************************************************************/
244 template<class T>
245 void vmove(raw_vector<T> vdst, const_raw_vector<T> vsrc)
246 {
247  ap_error::make_assertion(vdst.GetLength()==vsrc.GetLength());
248  if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
249  {
250  //
251  // fast
252  //
253  T *p1 = vdst.GetData();
254  const T *p2 = vsrc.GetData();
255  int imax = vdst.GetLength()/2;
256  int i;
257  for(i=imax; i!=0; i--)
258  {
259  *p1 = *p2;
260  p1[1] = p2[1];
261  p1 += 2;
262  p2 += 2;
263  }
264  if(vdst.GetLength()%2 != 0)
265  *p1 = *p2;
266  return;
267  }
268  else
269  {
270  //
271  // general
272  //
273  int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
274  int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
275  T *p1 = vdst.GetData();
276  const T *p2 = vsrc.GetData();
277  int imax = vdst.GetLength()/4;
278  int i;
279  for(i=0; i<imax; i++)
280  {
281  *p1 = *p2;
282  p1[offset11] = p2[offset21];
283  p1[offset12] = p2[offset22];
284  p1[offset13] = p2[offset23];
285  p1 += offset14;
286  p2 += offset24;
287  }
288  for(i=0; i<vdst.GetLength()%4; i++)
289  {
290  *p1 = *p2;
291  p1 += vdst.GetStep();
292  p2 += vsrc.GetStep();
293  }
294  return;
295  }
296 }
297 
298 
299 /********************************************************************
300 Copy one vector multiplied by -1 into another.
301 ********************************************************************/
302 template<class T>
303 void vmoveneg(raw_vector<T> vdst, const_raw_vector<T> vsrc)
304 {
305  ap_error::make_assertion(vdst.GetLength()==vsrc.GetLength());
306  if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
307  {
308  //
309  // fast
310  //
311  T *p1 = vdst.GetData();
312  const T *p2 = vsrc.GetData();
313  int imax = vdst.GetLength()/2;
314  int i;
315  for(i=0; i<imax; i++)
316  {
317  *p1 = -*p2;
318  p1[1] = -p2[1];
319  p1 += 2;
320  p2 += 2;
321  }
322  if(vdst.GetLength()%2 != 0)
323  *p1 = -*p2;
324  return;
325  }
326  else
327  {
328  //
329  // general
330  //
331  int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
332  int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
333  T *p1 = vdst.GetData();
334  const T *p2 = vsrc.GetData();
335  int imax = vdst.GetLength()/4;
336  int i;
337  for(i=imax; i!=0; i--)
338  {
339  *p1 = -*p2;
340  p1[offset11] = -p2[offset21];
341  p1[offset12] = -p2[offset22];
342  p1[offset13] = -p2[offset23];
343  p1 += offset14;
344  p2 += offset24;
345  }
346  for(i=0; i<vdst.GetLength()%4; i++)
347  {
348  *p1 = -*p2;
349  p1 += vdst.GetStep();
350  p2 += vsrc.GetStep();
351  }
352  return;
353  }
354 }
355 
356 
357 /********************************************************************
358 Copy one vector multiplied by a number into another vector.
359 ********************************************************************/
360 template<class T, class T2>
361 void vmove(raw_vector<T> vdst, const_raw_vector<T> vsrc, T2 alpha)
362 {
363  ap_error::make_assertion(vdst.GetLength()==vsrc.GetLength());
364  if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
365  {
366  //
367  // fast
368  //
369  T *p1 = vdst.GetData();
370  const T *p2 = vsrc.GetData();
371  int imax = vdst.GetLength()/4;
372  int i;
373  for(i=imax; i!=0; i--)
374  {
375  *p1 = alpha*(*p2);
376  p1[1] = alpha*p2[1];
377  p1[2] = alpha*p2[2];
378  p1[3] = alpha*p2[3];
379  p1 += 4;
380  p2 += 4;
381  }
382  for(i=0; i<vdst.GetLength()%4; i++)
383  *(p1++) = alpha*(*(p2++));
384  return;
385  }
386  else
387  {
388  //
389  // general
390  //
391  int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
392  int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
393  T *p1 = vdst.GetData();
394  const T *p2 = vsrc.GetData();
395  int imax = vdst.GetLength()/4;
396  int i;
397  for(i=0; i<imax; i++)
398  {
399  *p1 = alpha*(*p2);
400  p1[offset11] = alpha*p2[offset21];
401  p1[offset12] = alpha*p2[offset22];
402  p1[offset13] = alpha*p2[offset23];
403  p1 += offset14;
404  p2 += offset24;
405  }
406  for(i=0; i<vdst.GetLength()%4; i++)
407  {
408  *p1 = alpha*(*p2);
409  p1 += vdst.GetStep();
410  p2 += vsrc.GetStep();
411  }
412  return;
413  }
414 }
415 
416 
417 /********************************************************************
418 Vector addition
419 ********************************************************************/
420 template<class T>
421 void vadd(raw_vector<T> vdst, const_raw_vector<T> vsrc)
422 {
423  ap_error::make_assertion(vdst.GetLength()==vsrc.GetLength());
424  if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
425  {
426  //
427  // fast
428  //
429  T *p1 = vdst.GetData();
430  const T *p2 = vsrc.GetData();
431  int imax = vdst.GetLength()/4;
432  int i;
433  for(i=imax; i!=0; i--)
434  {
435  *p1 += *p2;
436  p1[1] += p2[1];
437  p1[2] += p2[2];
438  p1[3] += p2[3];
439  p1 += 4;
440  p2 += 4;
441  }
442  for(i=0; i<vdst.GetLength()%4; i++)
443  *(p1++) += *(p2++);
444  return;
445  }
446  else
447  {
448  //
449  // general
450  //
451  int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
452  int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
453  T *p1 = vdst.GetData();
454  const T *p2 = vsrc.GetData();
455  int imax = vdst.GetLength()/4;
456  int i;
457  for(i=0; i<imax; i++)
458  {
459  *p1 += *p2;
460  p1[offset11] += p2[offset21];
461  p1[offset12] += p2[offset22];
462  p1[offset13] += p2[offset23];
463  p1 += offset14;
464  p2 += offset24;
465  }
466  for(i=0; i<vdst.GetLength()%4; i++)
467  {
468  *p1 += *p2;
469  p1 += vdst.GetStep();
470  p2 += vsrc.GetStep();
471  }
472  return;
473  }
474 }
475 
476 
477 /********************************************************************
478 Add one vector multiplied by a number to another vector.
479 ********************************************************************/
480 template<class T, class T2>
481 void vadd(raw_vector<T> vdst, const_raw_vector<T> vsrc, T2 alpha)
482 {
483  ap_error::make_assertion(vdst.GetLength()==vsrc.GetLength());
484  if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
485  {
486  //
487  // fast
488  //
489  T *p1 = vdst.GetData();
490  const T *p2 = vsrc.GetData();
491  int imax = vdst.GetLength()/4;
492  int i;
493  for(i=imax; i!=0; i--)
494  {
495  *p1 += alpha*(*p2);
496  p1[1] += alpha*p2[1];
497  p1[2] += alpha*p2[2];
498  p1[3] += alpha*p2[3];
499  p1 += 4;
500  p2 += 4;
501  }
502  for(i=0; i<vdst.GetLength()%4; i++)
503  *(p1++) += alpha*(*(p2++));
504  return;
505  }
506  else
507  {
508  //
509  // general
510  //
511  int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
512  int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
513  T *p1 = vdst.GetData();
514  const T *p2 = vsrc.GetData();
515  int imax = vdst.GetLength()/4;
516  int i;
517  for(i=0; i<imax; i++)
518  {
519  *p1 += alpha*(*p2);
520  p1[offset11] += alpha*p2[offset21];
521  p1[offset12] += alpha*p2[offset22];
522  p1[offset13] += alpha*p2[offset23];
523  p1 += offset14;
524  p2 += offset24;
525  }
526  for(i=0; i<vdst.GetLength()%4; i++)
527  {
528  *p1 += alpha*(*p2);
529  p1 += vdst.GetStep();
530  p2 += vsrc.GetStep();
531  }
532  return;
533  }
534 }
535 
536 
537 /********************************************************************
538 Vector subtraction
539 ********************************************************************/
540 template<class T>
541 void vsub(raw_vector<T> vdst, const_raw_vector<T> vsrc)
542 {
543  ap_error::make_assertion(vdst.GetLength()==vsrc.GetLength());
544  if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
545  {
546  //
547  // fast
548  //
549  T *p1 = vdst.GetData();
550  const T *p2 = vsrc.GetData();
551  int imax = vdst.GetLength()/4;
552  int i;
553  for(i=imax; i!=0; i--)
554  {
555  *p1 -= *p2;
556  p1[1] -= p2[1];
557  p1[2] -= p2[2];
558  p1[3] -= p2[3];
559  p1 += 4;
560  p2 += 4;
561  }
562  for(i=0; i<vdst.GetLength()%4; i++)
563  *(p1++) -= *(p2++);
564  return;
565  }
566  else
567  {
568  //
569  // general
570  //
571  int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
572  int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
573  T *p1 = vdst.GetData();
574  const T *p2 = vsrc.GetData();
575  int imax = vdst.GetLength()/4;
576  int i;
577  for(i=0; i<imax; i++)
578  {
579  *p1 -= *p2;
580  p1[offset11] -= p2[offset21];
581  p1[offset12] -= p2[offset22];
582  p1[offset13] -= p2[offset23];
583  p1 += offset14;
584  p2 += offset24;
585  }
586  for(i=0; i<vdst.GetLength()%4; i++)
587  {
588  *p1 -= *p2;
589  p1 += vdst.GetStep();
590  p2 += vsrc.GetStep();
591  }
592  return;
593  }
594 }
595 
596 
597 /********************************************************************
598 Subtract one vector multiplied by a number from another vector.
599 ********************************************************************/
600 template<class T, class T2>
601 void vsub(raw_vector<T> vdst, const_raw_vector<T> vsrc, T2 alpha)
602 {
603  vadd(vdst, vsrc, -alpha);
604 }
605 
606 
607 /********************************************************************
608 In-place vector multiplication
609 ********************************************************************/
610 template<class T, class T2>
611 void vmul(raw_vector<T> vdst, T2 alpha)
612 {
613  if( vdst.GetStep()==1 )
614  {
615  //
616  // fast
617  //
618  T *p1 = vdst.GetData();
619  int imax = vdst.GetLength()/4;
620  int i;
621  for(i=imax; i!=0; i--)
622  {
623  *p1 *= alpha;
624  p1[1] *= alpha;
625  p1[2] *= alpha;
626  p1[3] *= alpha;
627  p1 += 4;
628  }
629  for(i=0; i<vdst.GetLength()%4; i++)
630  *(p1++) *= alpha;
631  return;
632  }
633  else
634  {
635  //
636  // general
637  //
638  int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
639  T *p1 = vdst.GetData();
640  int imax = vdst.GetLength()/4;
641  int i;
642  for(i=0; i<imax; i++)
643  {
644  *p1 *= alpha;
645  p1[offset11] *= alpha;
646  p1[offset12] *= alpha;
647  p1[offset13] *= alpha;
648  p1 += offset14;
649  }
650  for(i=0; i<vdst.GetLength()%4; i++)
651  {
652  *p1 *= alpha;
653  p1 += vdst.GetStep();
654  }
655  return;
656  }
657 }
658 
659 
660 /********************************************************************
661 Template of a dynamical one-dimensional array
662 ********************************************************************/
663 template<class T>
664 class template_1d_array
665 {
666 public:
668  {
669  m_Vec=0;
670  m_iVecSize = 0;
671  };
672 
674  {
675  if(m_Vec)
676  delete[] m_Vec;
677  };
678 
680  {
681  m_iVecSize = rhs.m_iVecSize;
682  m_iLow = rhs.m_iLow;
683  m_iHigh = rhs.m_iHigh;
684  if(rhs.m_Vec)
685  {
686  m_Vec = new T[m_iVecSize];
687  #ifndef UNSAFE_MEM_COPY
688  for(int i=0; i<m_iVecSize; i++)
689  m_Vec[i] = rhs.m_Vec[i];
690  #else
691  memcpy(m_Vec, rhs.m_Vec, m_iVecSize*sizeof(T));
692  #endif
693  }
694  else
695  m_Vec=0;
696  };
697 
698 
700  {
701  if( this==&rhs )
702  return *this;
703 
704  m_iLow = rhs.m_iLow;
705  m_iHigh = rhs.m_iHigh;
706  m_iVecSize = rhs.m_iVecSize;
707  if(m_Vec)
708  delete[] m_Vec;
709  if(rhs.m_Vec)
710  {
711  m_Vec = new T[m_iVecSize];
712  #ifndef UNSAFE_MEM_COPY
713  for(int i=0; i<m_iVecSize; i++)
714  m_Vec[i] = rhs.m_Vec[i];
715  #else
716  memcpy(m_Vec, rhs.m_Vec, m_iVecSize*sizeof(T));
717  #endif
718  }
719  else
720  m_Vec=0;
721  return *this;
722  };
723 
724 
725  const T& operator()(int i) const
726  {
727  #ifndef NO_AP_ASSERT
729  #endif
730  return m_Vec[ i-m_iLow ];
731  };
732 
733 
734  T& operator()(int i)
735  {
736  #ifndef NO_AP_ASSERT
738  #endif
739  return m_Vec[ i-m_iLow ];
740  };
741 
742 
743  void setbounds( int iLow, int iHigh )
744  {
745  if(m_Vec)
746  delete[] m_Vec;
747  m_iLow = iLow;
748  m_iHigh = iHigh;
749  m_iVecSize = iHigh-iLow+1;
750  m_Vec = new T[m_iVecSize];
751  };
752 
753 
754  void setcontent( int iLow, int iHigh, const T *pContent )
755  {
756  setbounds(iLow, iHigh);
757  for(int i=iLow; i<=iHigh; i++)
758  (*this)(i) = pContent[i-iLow];
759  };
760 
761 
763  {
764  return m_Vec;
765  };
766 
767  const T* getcontent() const
768  {
769  return m_Vec;
770  };
771 
772 
773  int getlowbound(int iBoundNum = 0) const
774  {
775  return m_iLow;
776  };
777 
778 
779  int gethighbound(int iBoundNum = 0) const
780  {
781  return m_iHigh;
782  };
783 
784  raw_vector<T> getvector(int iStart, int iEnd)
785  {
786  if( iStart>iEnd || wrongIdx(iStart) || wrongIdx(iEnd) )
787  return raw_vector<T>(0, 0, 1);
788  else
789  return raw_vector<T>(m_Vec+iStart-m_iLow, iEnd-iStart+1, 1);
790  };
791 
792 
793  const_raw_vector<T> getvector(int iStart, int iEnd) const
794  {
795  if( iStart>iEnd || wrongIdx(iStart) || wrongIdx(iEnd) )
796  return const_raw_vector<T>(0, 0, 1);
797  else
798  return const_raw_vector<T>(m_Vec+iStart-m_iLow, iEnd-iStart+1, 1);
799  };
800 private:
801  bool wrongIdx(int i) const { return i<m_iLow || i>m_iHigh; };
802 
803  T *m_Vec;
804  long m_iVecSize;
805  long m_iLow, m_iHigh;
806 };
807 
808 
809 
810 /********************************************************************
811 Template of a dynamical two-dimensional array
812 ********************************************************************/
813 template<class T>
814 class template_2d_array
815 {
816 public:
818  {
819  m_Vec=0;
820  m_iVecSize=0;
821  };
822 
824  {
825  if(m_Vec)
826  delete[] m_Vec;
827  };
828 
830  {
831  m_iVecSize = rhs.m_iVecSize;
832  m_iLow1 = rhs.m_iLow1;
833  m_iLow2 = rhs.m_iLow2;
834  m_iHigh1 = rhs.m_iHigh1;
835  m_iHigh2 = rhs.m_iHigh2;
836  m_iConstOffset = rhs.m_iConstOffset;
837  m_iLinearMember = rhs.m_iLinearMember;
838  if(rhs.m_Vec)
839  {
840  m_Vec = new T[m_iVecSize];
841  #ifndef UNSAFE_MEM_COPY
842  for(int i=0; i<m_iVecSize; i++)
843  m_Vec[i] = rhs.m_Vec[i];
844  #else
845  memcpy(m_Vec, rhs.m_Vec, m_iVecSize*sizeof(T));
846  #endif
847  }
848  else
849  m_Vec=0;
850  };
852  {
853  if( this==&rhs )
854  return *this;
855 
856  m_iLow1 = rhs.m_iLow1;
857  m_iLow2 = rhs.m_iLow2;
858  m_iHigh1 = rhs.m_iHigh1;
859  m_iHigh2 = rhs.m_iHigh2;
860  m_iConstOffset = rhs.m_iConstOffset;
861  m_iLinearMember = rhs.m_iLinearMember;
862  m_iVecSize = rhs.m_iVecSize;
863  if(m_Vec)
864  delete[] m_Vec;
865  if(rhs.m_Vec)
866  {
867  m_Vec = new T[m_iVecSize];
868  #ifndef UNSAFE_MEM_COPY
869  for(int i=0; i<m_iVecSize; i++)
870  m_Vec[i] = rhs.m_Vec[i];
871  #else
872  memcpy(m_Vec, rhs.m_Vec, m_iVecSize*sizeof(T));
873  #endif
874  }
875  else
876  m_Vec=0;
877  return *this;
878  };
879 
880  const T& operator()(int i1, int i2) const
881  {
882  #ifndef NO_AP_ASSERT
885  #endif
886  return m_Vec[ m_iConstOffset + i2 +i1*m_iLinearMember];
887  };
888 
889  T& operator()(int i1, int i2)
890  {
891  #ifndef NO_AP_ASSERT
894  #endif
895  return m_Vec[ m_iConstOffset + i2 +i1*m_iLinearMember];
896  };
897 
898  void setbounds( int iLow1, int iHigh1, int iLow2, int iHigh2 )
899  {
900  if(m_Vec)
901  delete[] m_Vec;
902  m_iVecSize = (iHigh1-iLow1+1)*(iHigh2-iLow2+1);
903  m_Vec = new T[m_iVecSize];
904  m_iLow1 = iLow1;
905  m_iHigh1 = iHigh1;
906  m_iLow2 = iLow2;
907  m_iHigh2 = iHigh2;
910  };
911 
912  void setcontent( int iLow1, int iHigh1, int iLow2, int iHigh2, const T *pContent )
913  {
914  setbounds(iLow1, iHigh1, iLow2, iHigh2);
915  for(int i=0; i<m_iVecSize; i++)
916  m_Vec[i]=pContent[i];
917  };
918 
920  {
921  return m_Vec;
922  };
923 
924  const T* getcontent() const
925  {
926  return m_Vec;
927  };
928 
929  int getlowbound(int iBoundNum) const
930  {
931  return iBoundNum==1 ? m_iLow1 : m_iLow2;
932  };
933 
934  int gethighbound(int iBoundNum) const
935  {
936  return iBoundNum==1 ? m_iHigh1 : m_iHigh2;
937  };
938 
939  raw_vector<T> getcolumn(int iColumn, int iRowStart, int iRowEnd)
940  {
941  if( (iRowStart>iRowEnd) || wrongColumn(iColumn) || wrongRow(iRowStart) ||wrongRow(iRowEnd) )
942  return raw_vector<T>(0, 0, 1);
943  else
944  return raw_vector<T>(&((*this)(iRowStart, iColumn)), iRowEnd-iRowStart+1, m_iLinearMember);
945  };
946 
947  raw_vector<T> getrow(int iRow, int iColumnStart, int iColumnEnd)
948  {
949  if( (iColumnStart>iColumnEnd) || wrongRow(iRow) || wrongColumn(iColumnStart) || wrongColumn(iColumnEnd))
950  return raw_vector<T>(0, 0, 1);
951  else
952  return raw_vector<T>(&((*this)(iRow, iColumnStart)), iColumnEnd-iColumnStart+1, 1);
953  };
954 
955  const_raw_vector<T> getcolumn(int iColumn, int iRowStart, int iRowEnd) const
956  {
957  if( (iRowStart>iRowEnd) || wrongColumn(iColumn) || wrongRow(iRowStart) ||wrongRow(iRowEnd) )
958  return const_raw_vector<T>(0, 0, 1);
959  else
960  return const_raw_vector<T>(&((*this)(iRowStart, iColumn)), iRowEnd-iRowStart+1, m_iLinearMember);
961  };
962 
963  const_raw_vector<T> getrow(int iRow, int iColumnStart, int iColumnEnd) const
964  {
965  if( (iColumnStart>iColumnEnd) || wrongRow(iRow) || wrongColumn(iColumnStart) || wrongColumn(iColumnEnd))
966  return const_raw_vector<T>(0, 0, 1);
967  else
968  return const_raw_vector<T>(&((*this)(iRow, iColumnStart)), iColumnEnd-iColumnStart+1, 1);
969  };
970 private:
971  bool wrongRow(int i) const { return i<m_iLow1 || i>m_iHigh1; };
972  bool wrongColumn(int j) const { return j<m_iLow2 || j>m_iHigh2; };
973 
974  T *m_Vec;
975  long m_iVecSize;
978 };
979 
980 
981 typedef template_1d_array<int> integer_1d_array;
982 typedef template_1d_array<double> real_1d_array;
983 typedef template_1d_array<complex> complex_1d_array;
984 typedef template_1d_array<bool> boolean_1d_array;
985 typedef template_2d_array<int> integer_2d_array;
986 typedef template_2d_array<double> real_2d_array;
987 typedef template_2d_array<complex> complex_2d_array;
988 typedef template_2d_array<bool> boolean_2d_array;
989 
990 
991 /********************************************************************
992 Constants and functions introduced for compatibility with AlgoPascal
993 ********************************************************************/
994 extern const double machineepsilon;
995 extern const double maxrealnumber;
996 extern const double minrealnumber;
997 
998 int sign(double x);
999 double randomreal();
1000 int randominteger(int maxv);
1001 int round(double x);
1002 int trunc(double x);
1003 int ifloor(double x);
1004 int iceil(double x);
1005 double pi();
1006 double sqr(double x);
1007 int maxint(int m1, int m2);
1008 int minint(int m1, int m2);
1009 double maxreal(double m1, double m2);
1010 double minreal(double m1, double m2);
1011 
1012 };//namespace ap
1013 
1014 
1015 /* stuff included from libs/amp.h */
1016 
1017 #include <omalloc/omalloc.h>
1018 #include <gmp.h>
1019 #include <mpfr.h>
1020 #include <stdexcept>
1021 #include <math.h>
1022 #include <string>
1023 #include <stdio.h>
1024 #include <time.h>
1025 #include <memory.h>
1026 #include <vector>
1027 #include <list>
1028 
1029 //#define _AMP_NO_TEMPLATE_CONSTRUCTORS
1030 
1031 namespace amp
1032 {
1033  class exception {};
1034  class incorrectPrecision : public exception {};
1035  class overflow : public exception {};
1036  class divisionByZero : public exception {};
1037  class sqrtOfNegativeNumber : public exception {};
1038  class invalidConversion : public exception {};
1039  class invalidString : public exception {};
1040  class internalError : public exception {};
1041  class domainError : public exception {};
1042 
1043  typedef unsigned long unsigned32;
1044  typedef signed long signed32;
1045 
1046  struct mpfr_record
1047  {
1048  unsigned int refCount;
1049  unsigned int Precision;
1050  mpfr_t value;
1051  mpfr_record *next;
1052  };
1053 
1054  typedef mpfr_record* mpfr_record_ptr;
1055 
1056  //
1057  // storage for mpfr_t instances
1058  //
1059  class mpfr_storage
1060  {
1061  public:
1062  static mpfr_record* newMpfr(unsigned int Precision);
1063  static void deleteMpfr(mpfr_record* ref);
1064  /*static void clearStorage();*/
1065  static gmp_randstate_t* getRandState();
1066  private:
1067  static mpfr_record_ptr& getList(unsigned int Precision);
1068  };
1069 
1070  //
1071  // mpfr_t reference
1072  //
1073  class mpfr_reference
1074  {
1075  public:
1080 
1081  void initialize(int Precision);
1082  void free();
1083 
1084  mpfr_srcptr getReadPtr() const;
1085  mpfr_ptr getWritePtr();
1086  private:
1087  mpfr_record *ref;
1088  };
1089 
1090  //
1091  // ampf template
1092  //
1093  template<unsigned int Precision>
1094  class ampf
1095  {
1096  public:
1097  //
1098  // Destructor
1099  //
1101  {
1102  rval->refCount--;
1103  if( rval->refCount==0 )
1105  }
1106 
1107  //
1108  // Initializing
1109  //
1112 
1113  ampf (long double v) { InitializeAsDouble(v); }
1114  ampf (double v) { InitializeAsDouble(v); }
1115  ampf (float v) { InitializeAsDouble(v); }
1116  ampf (signed long v) { InitializeAsSLong(v); }
1117  ampf (unsigned long v) { InitializeAsULong(v); }
1118  ampf (signed int v) { InitializeAsSLong(v); }
1119  ampf (unsigned int v) { InitializeAsULong(v); }
1120  ampf (signed short v) { InitializeAsSLong(v); }
1121  ampf (unsigned short v) { InitializeAsULong(v); }
1122  ampf (signed char v) { InitializeAsSLong(v); }
1123  ampf (unsigned char v) { InitializeAsULong(v); }
1124 
1125  //
1126  // initializing from string
1127  // string s must have format "X0.hhhhhhhh@eee" or "X-0.hhhhhhhh@eee"
1128  //
1129  ampf (const std::string &s) { InitializeAsString(s.c_str()); }
1130  ampf (const char *s) { InitializeAsString(s); }
1131 
1132  //
1133  // copy constructors
1134  //
1135  ampf(const ampf& r)
1136  {
1137  rval = r.rval;
1138  rval->refCount++;
1139  }
1140 #ifndef _AMP_NO_TEMPLATE_CONSTRUCTORS
1141  template<unsigned int Precision2>
1143  {
1144  CheckPrecision();
1145  rval = mpfr_storage::newMpfr(Precision);
1146  mpfr_set(getWritePtr(), r.getReadPtr(), GMP_RNDN);
1147  }
1148 #endif
1149 
1150  //
1151  // Assignment constructors
1152  //
1153  ampf& operator= (long double v) { mpfr_set_ld(getWritePtr(), v, GMP_RNDN); return *this; }
1154  ampf& operator= (double v) { mpfr_set_ld(getWritePtr(), v, GMP_RNDN); return *this; }
1155  ampf& operator= (float v) { mpfr_set_ld(getWritePtr(), v, GMP_RNDN); return *this; }
1156  ampf& operator= (signed long v) { mpfr_set_si(getWritePtr(), v, GMP_RNDN); return *this; }
1157  ampf& operator= (unsigned long v) { mpfr_set_ui(getWritePtr(), v, GMP_RNDN); return *this; }
1158  ampf& operator= (signed int v) { mpfr_set_si(getWritePtr(), v, GMP_RNDN); return *this; }
1159  ampf& operator= (unsigned int v) { mpfr_set_ui(getWritePtr(), v, GMP_RNDN); return *this; }
1160  ampf& operator= (signed short v) { mpfr_set_si(getWritePtr(), v, GMP_RNDN); return *this; }
1161  ampf& operator= (unsigned short v) { mpfr_set_ui(getWritePtr(), v, GMP_RNDN); return *this; }
1162  ampf& operator= (signed char v) { mpfr_set_si(getWritePtr(), v, GMP_RNDN); return *this; }
1163  ampf& operator= (unsigned char v) { mpfr_set_ui(getWritePtr(), v, GMP_RNDN); return *this; }
1164  ampf& operator= (const char *s) { mpfr_strtofr(getWritePtr(), s, NULL, 0, GMP_RNDN); return *this; }
1165  ampf& operator= (const std::string &s) { mpfr_strtofr(getWritePtr(), s.c_str(), NULL, 0, GMP_RNDN); return *this; }
1166  ampf& operator= (const ampf& r)
1167  {
1168  // TODO: may be copy ref
1169  if( this==&r )
1170  return *this;
1171  if( rval==r.rval )
1172  return *this;
1173  rval->refCount--;
1174  if( rval->refCount==0 )
1176  rval = r.rval;
1177  rval->refCount++;
1178  //mpfr_set(getWritePtr(), r.getReadPtr(), GMP_RNDN);
1179  return *this;
1180  }
1181 #ifndef _AMP_NO_TEMPLATE_CONSTRUCTORS
1182  template<unsigned int Precision2>
1184  {
1185  if( (void*)this==(void*)(&r) )
1186  return *this;
1187  mpfr_set(getWritePtr(), r.getReadPtr(), GMP_RNDN);
1188  return *this;
1189  }
1190 #endif
1191 
1192  //
1193  // in-place operators
1194  // TODO: optimize
1195  //
1196  template<class T> ampf& operator+=(const T& v){ *this = *this + v; return *this; };
1197  template<class T> ampf& operator-=(const T& v){ *this = *this - v; return *this; };
1198  template<class T> ampf& operator*=(const T& v){ *this = *this * v; return *this; };
1199  template<class T> ampf& operator/=(const T& v){ *this = *this / v; return *this; };
1200 
1201  //
1202  // MPFR access
1203  //
1204  mpfr_srcptr getReadPtr() const;
1205  mpfr_ptr getWritePtr();
1206 
1207  //
1208  // properties and information
1209  //
1210  bool isFiniteNumber() const;
1211  bool isPositiveNumber() const;
1212  bool isZero() const;
1213  bool isNegativeNumber() const;
1214  const ampf getUlpOf();
1215 
1216  //
1217  // conversions
1218  //
1219  double toDouble() const;
1222  char * toString() const;
1223 
1224 
1225  //
1226  // static methods
1227  //
1228  static const ampf getUlpOf(const ampf &x);
1229  static const ampf getUlp();
1230  static const ampf getUlp256();
1231  static const ampf getUlp512();
1232  static const ampf getMaxNumber();
1233  static const ampf getMinNumber();
1234  static const ampf getAlgoPascalEpsilon();
1237  static const ampf getRandom();
1238  private:
1241  void InitializeAsSLong(signed long v);
1242  void InitializeAsULong(unsigned long v);
1243  void InitializeAsDouble(long double v);
1244  void InitializeAsString(const char *s);
1245 
1246  //mpfr_reference ref;
1247  mpfr_record *rval;
1248  };
1249 
1250  /*void ampf<Precision>::CheckPrecision()
1251  {
1252  if( Precision<32 )
1253  throw incorrectPrecision();
1254  }***/
1255 
1256  template<unsigned int Precision>
1258  {
1259  if( Precision<32 )
1260  //throw incorrectPrecision();
1261  WerrorS("incorrectPrecision");
1262  }
1263 
1264  template<unsigned int Precision>
1266  {
1267  CheckPrecision();
1268  rval = mpfr_storage::newMpfr(Precision);
1269  mpfr_set_ui(getWritePtr(), 0, GMP_RNDN);
1270  }
1271 
1272  template<unsigned int Precision>
1273  void ampf<Precision>::InitializeAsSLong(signed long sv)
1274  {
1275  CheckPrecision();
1276  rval = mpfr_storage::newMpfr(Precision);
1277  mpfr_set_si(getWritePtr(), sv, GMP_RNDN);
1278  }
1279 
1280  template<unsigned int Precision>
1281  void ampf<Precision>::InitializeAsULong(unsigned long v)
1282  {
1283  CheckPrecision();
1284  rval = mpfr_storage::newMpfr(Precision);
1285  mpfr_set_ui(getWritePtr(), v, GMP_RNDN);
1286  }
1287 
1288  template<unsigned int Precision>
1289  void ampf<Precision>::InitializeAsDouble(long double v)
1290  {
1291  CheckPrecision();
1292  rval = mpfr_storage::newMpfr(Precision);
1293  mpfr_set_ld(getWritePtr(), v, GMP_RNDN);
1294  }
1295 
1296  template<unsigned int Precision>
1297  void ampf<Precision>::InitializeAsString(const char *s)
1298  {
1299  CheckPrecision();
1300  rval = mpfr_storage::newMpfr(Precision);
1301  mpfr_strtofr(getWritePtr(), s, NULL, 0, GMP_RNDN);
1302  }
1303 
1304  template<unsigned int Precision>
1305  mpfr_srcptr ampf<Precision>::getReadPtr() const
1306  {
1307  return rval->value;
1308  }
1309 
1310  template<unsigned int Precision>
1311  mpfr_ptr ampf<Precision>::getWritePtr()
1312  {
1313  if( rval->refCount==1 )
1314  return rval->value;
1315  mpfr_record *newrval = mpfr_storage::newMpfr(Precision);
1316  mpfr_set(newrval->value, rval->value, GMP_RNDN);
1317  rval->refCount--;
1318  rval = newrval;
1319  return rval->value;
1320  }
1321 
1322  template<unsigned int Precision>
1323  bool ampf<Precision>::isFiniteNumber() const
1324  {
1325  return mpfr_number_p(getReadPtr())!=0;
1326  }
1327 
1328  template<unsigned int Precision>
1330  {
1331  if( !isFiniteNumber() )
1332  return false;
1333  return mpfr_sgn(getReadPtr())>0;
1334  }
1335 
1336  template<unsigned int Precision>
1337  bool ampf<Precision>::isZero() const
1338  {
1339  return mpfr_zero_p(getReadPtr())!=0;
1340  }
1341 
1342  template<unsigned int Precision>
1344  {
1345  if( !isFiniteNumber() )
1346  return false;
1347  return mpfr_sgn(getReadPtr())<0;
1348  }
1349 
1350  template<unsigned int Precision>
1351  const ampf<Precision> ampf<Precision>::getUlpOf()
1352  {
1353  return getUlpOf(*this);
1354  }
1355 
1356  template<unsigned int Precision>
1357  double ampf<Precision>::toDouble() const
1358  {
1359  return mpfr_get_d(getReadPtr(), GMP_RNDN);
1360  }
1361 
1362  template<unsigned int Precision>
1364  {
1365  //
1366  // some special cases
1367  //
1368  if( !isFiniteNumber() )
1369  {
1370  std::string r;
1371  mp_exp_t _e;
1372  char *ptr;
1373  ptr = mpfr_get_str(NULL, &_e, 16, 0, getReadPtr(), GMP_RNDN);
1374  r = ptr;
1375  mpfr_free_str(ptr);
1376  return r;
1377  }
1378 
1379  //
1380  // general case
1381  //
1382  std::string r;
1383  char buf_e[128];
1384  signed long iexpval;
1385  mp_exp_t expval;
1386  char *ptr;
1387  char *ptr2;
1388  ptr = mpfr_get_str(NULL, &expval, 16, 0, getReadPtr(), GMP_RNDN);
1389  ptr2 = ptr;
1390  iexpval = expval;
1391  if( iexpval!=expval )
1392  // throw internalError();
1393  WerrorS("internalError");
1394  sprintf(buf_e, "%ld", long(iexpval));
1395  if( *ptr=='-' )
1396  {
1397  r = "-";
1398  ptr++;
1399  }
1400  r += "0x0.";
1401  r += ptr;
1402  r += "@";
1403  r += buf_e;
1404  mpfr_free_str(ptr2);
1405  return r;
1406  }
1407 
1408  template<unsigned int Precision>
1410  {
1411  // TODO: advanced output formatting (zero, integers)
1412 
1413  //
1414  // some special cases
1415  //
1416  if( !isFiniteNumber() )
1417  {
1418  std::string r;
1419  mp_exp_t _e;
1420  char *ptr;
1421  ptr = mpfr_get_str(NULL, &_e, 10, 0, getReadPtr(), GMP_RNDN);
1422  r = ptr;
1423  mpfr_free_str(ptr);
1424  return r;
1425  }
1426 
1427  //
1428  // general case
1429  //
1430  std::string r;
1431  char buf_e[128];
1432  signed long iexpval;
1433  mp_exp_t expval;
1434  char *ptr;
1435  char *ptr2;
1436  ptr = mpfr_get_str(NULL, &expval, 10, 0, getReadPtr(), GMP_RNDN);
1437  ptr2 = ptr;
1438  iexpval = expval;
1439  if( iexpval!=expval )
1440  // throw internalError();
1441  WerrorS("internalError");
1442  sprintf(buf_e, "%ld", long(iexpval));
1443  if( *ptr=='-' )
1444  {
1445  r = "-";
1446  ptr++;
1447  }
1448  r += "0.";
1449  r += ptr;
1450  r += "E";
1451  r += buf_e;
1452  mpfr_free_str(ptr2);
1453  return r;
1454  }
1455  template<unsigned int Precision>
1456  char * ampf<Precision>::toString() const
1457  {
1458  char *toString_Block=(char *)omAlloc(256);
1459  //
1460  // some special cases
1461  //
1462  if( !isFiniteNumber() )
1463  {
1464  mp_exp_t _e;
1465  char *ptr;
1466  ptr = mpfr_get_str(NULL, &_e, 10, 0, getReadPtr(), GMP_RNDN);
1467  strcpy(toString_Block, ptr);
1468  mpfr_free_str(ptr);
1469  return toString_Block;
1470  }
1471 
1472  //
1473  // general case
1474  //
1475 
1476  char buf_e[128];
1477  signed long iexpval;
1478  mp_exp_t expval;
1479  char *ptr;
1480  char *ptr2;
1481  ptr = mpfr_get_str(NULL, &expval, 10, 0, getReadPtr(), GMP_RNDN);
1482  ptr2 = ptr;
1483  iexpval = expval;
1484  if( iexpval!=expval )
1485  //throw internalError();
1486  WerrorS("internalError");
1487  sprintf(buf_e, "%ld", long(iexpval));
1488  if( *ptr=='-' )
1489  {
1490  ptr++;
1491  sprintf(toString_Block,"-0.%sE%s",ptr,buf_e);
1492  }
1493  else
1494  sprintf(toString_Block,"0.%sE%s",ptr,buf_e);
1495  mpfr_free_str(ptr2);
1496  return toString_Block;
1497  }
1498 
1499  template<unsigned int Precision>
1500  const ampf<Precision> ampf<Precision>::getUlpOf(const ampf<Precision> &x)
1501  {
1502  if( !x.isFiniteNumber() )
1503  return x;
1504  if( x.isZero() )
1505  return x;
1506  ampf<Precision> r(1);
1507  mpfr_nextabove(r.getWritePtr());
1508  mpfr_sub_ui(r.getWritePtr(), r.getWritePtr(), 1, GMP_RNDN);
1509  mpfr_mul_2si(
1510  r.getWritePtr(),
1511  r.getWritePtr(),
1512  mpfr_get_exp(x.getReadPtr()),
1513  GMP_RNDN);
1514  mpfr_div_2si(
1515  r.getWritePtr(),
1516  r.getWritePtr(),
1517  1,
1518  GMP_RNDN);
1519  return r;
1520  }
1521 
1522  template<unsigned int Precision>
1523  const ampf<Precision> ampf<Precision>::getUlp()
1524  {
1525  ampf<Precision> r(1);
1526  mpfr_nextabove(r.getWritePtr());
1527  mpfr_sub_ui(r.getWritePtr(), r.getWritePtr(), 1, GMP_RNDN);
1528  return r;
1529  }
1530 
1531  template<unsigned int Precision>
1532  const ampf<Precision> ampf<Precision>::getUlp256()
1533  {
1534  ampf<Precision> r(1);
1535  mpfr_nextabove(r.getWritePtr());
1536  mpfr_sub_ui(r.getWritePtr(), r.getWritePtr(), 1, GMP_RNDN);
1537  mpfr_mul_2si(
1538  r.getWritePtr(),
1539  r.getWritePtr(),
1540  8,
1541  GMP_RNDN);
1542  return r;
1543  }
1544 
1545  template<unsigned int Precision>
1546  const ampf<Precision> ampf<Precision>::getUlp512()
1547  {
1548  ampf<Precision> r(1);
1549  mpfr_nextabove(r.getWritePtr());
1550  mpfr_sub_ui(r.getWritePtr(), r.getWritePtr(), 1, GMP_RNDN);
1551  mpfr_mul_2si(
1552  r.getWritePtr(),
1553  r.getWritePtr(),
1554  9,
1555  GMP_RNDN);
1556  return r;
1557  }
1558 
1559  template<unsigned int Precision>
1560  const ampf<Precision> ampf<Precision>::getMaxNumber()
1561  {
1562  ampf<Precision> r(1);
1563  mpfr_nextbelow(r.getWritePtr());
1564  mpfr_set_exp(r.getWritePtr(),mpfr_get_emax());
1565  return r;
1566  }
1567 
1568  template<unsigned int Precision>
1569  const ampf<Precision> ampf<Precision>::getMinNumber()
1570  {
1571  ampf<Precision> r(1);
1572  mpfr_set_exp(r.getWritePtr(),mpfr_get_emin());
1573  return r;
1574  }
1575 
1576  template<unsigned int Precision>
1577  const ampf<Precision> ampf<Precision>::getAlgoPascalEpsilon()
1578  {
1579  return getUlp256();
1580  }
1581 
1582  template<unsigned int Precision>
1583  const ampf<Precision> ampf<Precision>::getAlgoPascalMaxNumber()
1584  {
1585  ampf<Precision> r(1);
1586  mp_exp_t e1 = mpfr_get_emax();
1587  mp_exp_t e2 = -mpfr_get_emin();
1588  mp_exp_t e = e1>e2 ? e1 : e2;
1589  mpfr_set_exp(r.getWritePtr(), e-5);
1590  return r;
1591  }
1592 
1593  template<unsigned int Precision>
1594  const ampf<Precision> ampf<Precision>::getAlgoPascalMinNumber()
1595  {
1596  ampf<Precision> r(1);
1597  mp_exp_t e1 = mpfr_get_emax();
1598  mp_exp_t e2 = -mpfr_get_emin();
1599  mp_exp_t e = e1>e2 ? e1 : e2;
1600  mpfr_set_exp(r.getWritePtr(), 2-(e-5));
1601  return r;
1602  }
1603 
1604  template<unsigned int Precision>
1605  const ampf<Precision> ampf<Precision>::getRandom()
1606  {
1607  ampf<Precision> r;
1608  while(mpfr_urandomb(r.getWritePtr(), *amp::mpfr_storage::getRandState()));
1609  return r;
1610  }
1611 
1612  //
1613  // comparison operators
1614  //
1615  template<unsigned int Precision>
1616  bool operator==(const ampf<Precision>& op1, const ampf<Precision>& op2)
1617  {
1618  return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())==0;
1619  }
1620 
1621  template<unsigned int Precision>
1622  bool operator!=(const ampf<Precision>& op1, const ampf<Precision>& op2)
1623  {
1624  return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())!=0;
1625  }
1626 
1627  template<unsigned int Precision>
1628  bool operator<(const ampf<Precision>& op1, const ampf<Precision>& op2)
1629  {
1630  return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())<0;
1631  }
1632 
1633  template<unsigned int Precision>
1634  bool operator>(const ampf<Precision>& op1, const ampf<Precision>& op2)
1635  {
1636  return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())>0;
1637  }
1638 
1639  template<unsigned int Precision>
1640  bool operator<=(const ampf<Precision>& op1, const ampf<Precision>& op2)
1641  {
1642  return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())<=0;
1643  }
1644 
1645  template<unsigned int Precision>
1646  bool operator>=(const ampf<Precision>& op1, const ampf<Precision>& op2)
1647  {
1648  return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())>=0;
1649  }
1650 
1651  //
1652  // arithmetic operators
1653  //
1654  template<unsigned int Precision>
1655  const ampf<Precision> operator+(const ampf<Precision>& op1)
1656  {
1657  return op1;
1658  }
1659 
1660  template<unsigned int Precision>
1661  const ampf<Precision> operator-(const ampf<Precision>& op1)
1662  {
1663  mpfr_record *v = mpfr_storage::newMpfr(Precision);
1664  mpfr_neg(v->value, op1.getReadPtr(), GMP_RNDN);
1665  return v;
1666  }
1667 
1668  template<unsigned int Precision>
1669  const ampf<Precision> operator+(const ampf<Precision>& op1, const ampf<Precision>& op2)
1670  {
1671  mpfr_record *v = mpfr_storage::newMpfr(Precision);
1672  mpfr_add(v->value, op1.getReadPtr(), op2.getReadPtr(), GMP_RNDN);
1673  return v;
1674  }
1675 
1676  template<unsigned int Precision>
1677  const ampf<Precision> operator-(const ampf<Precision>& op1, const ampf<Precision>& op2)
1678  {
1679  mpfr_record *v = mpfr_storage::newMpfr(Precision);
1680  mpfr_sub(v->value, op1.getReadPtr(), op2.getReadPtr(), GMP_RNDN);
1681  return v;
1682  }
1683 
1684 
1685  template<unsigned int Precision>
1686  const ampf<Precision> operator*(const ampf<Precision>& op1, const ampf<Precision>& op2)
1687  {
1688  mpfr_record *v = mpfr_storage::newMpfr(Precision);
1689  mpfr_mul(v->value, op1.getReadPtr(), op2.getReadPtr(), GMP_RNDN);
1690  return v;
1691  }
1692 
1693  template<unsigned int Precision>
1694  const ampf<Precision> operator/(const ampf<Precision>& op1, const ampf<Precision>& op2)
1695  {
1696  mpfr_record *v = mpfr_storage::newMpfr(Precision);
1697  mpfr_div(v->value, op1.getReadPtr(), op2.getReadPtr(), GMP_RNDN);
1698  return v;
1699  }
1700 
1701  //
1702  // basic functions
1703  //
1704  template<unsigned int Precision>
1705  const ampf<Precision> sqr(const ampf<Precision> &x)
1706  {
1707  // TODO: optimize temporary for return value
1708  ampf<Precision> res;
1709  mpfr_sqr(res.getWritePtr(), x.getReadPtr(), GMP_RNDN);
1710  return res;
1711  }
1712 
1713  template<unsigned int Precision>
1714  int sign(const ampf<Precision> &x)
1715  {
1716  int s = mpfr_sgn(x.getReadPtr());
1717  if( s>0 )
1718  return +1;
1719  if( s<0 )
1720  return -1;
1721  return 0;
1722  }
1723 
1724  template<unsigned int Precision>
1725  const ampf<Precision> abs(const ampf<Precision> &x)
1726  {
1727  // TODO: optimize temporary for return value
1728  ampf<Precision> res;
1729  mpfr_abs(res.getWritePtr(), x.getReadPtr(), GMP_RNDN);
1730  return res;
1731  }
1732 
1733  template<unsigned int Precision>
1734  const ampf<Precision> maximum(const ampf<Precision> &x, const ampf<Precision> &y)
1735  {
1736  // TODO: optimize temporary for return value
1737  ampf<Precision> res;
1738  mpfr_max(res.getWritePtr(), x.getReadPtr(), y.getReadPtr(), GMP_RNDN);
1739  return res;
1740  }
1741 
1742  template<unsigned int Precision>
1743  const ampf<Precision> minimum(const ampf<Precision> &x, const ampf<Precision> &y)
1744  {
1745  // TODO: optimize temporary for return value
1746  ampf<Precision> res;
1747  mpfr_min(res.getWritePtr(), x.getReadPtr(), y.getReadPtr(), GMP_RNDN);
1748  return res;
1749  }
1750 
1751  template<unsigned int Precision>
1752  const ampf<Precision> sqrt(const ampf<Precision> &x)
1753  {
1754  // TODO: optimize temporary for return value
1755  ampf<Precision> res;
1756  mpfr_sqrt(res.getWritePtr(), x.getReadPtr(), GMP_RNDN);
1757  return res;
1758  }
1759 
1760  template<unsigned int Precision>
1761  signed long trunc(const ampf<Precision> &x)
1762  {
1763  ampf<Precision> tmp;
1764  signed long r;
1765  mpfr_trunc(tmp.getWritePtr(), x.getReadPtr());
1766  if( mpfr_integer_p(tmp.getReadPtr())==0 )
1767  //throw invalidConversion();
1768  WerrorS("internalError");
1769  mpfr_clear_erangeflag();
1770  r = mpfr_get_si(tmp.getReadPtr(), GMP_RNDN);
1771  if( mpfr_erangeflag_p()!=0 )
1772  //throw invalidConversion();
1773  WerrorS("internalError");
1774  return r;
1775  }
1776 
1777  template<unsigned int Precision>
1778  const ampf<Precision> frac(const ampf<Precision> &x)
1779  {
1780  // TODO: optimize temporary for return value
1781  ampf<Precision> r;
1782  mpfr_frac(r.getWritePtr(), x.getReadPtr(), GMP_RNDN);
1783  return r;
1784  }
1785 
1786  template<unsigned int Precision>
1787  signed long floor(const ampf<Precision> &x)
1788  {
1789  ampf<Precision> tmp;
1790  signed long r;
1791  mpfr_floor(tmp.getWritePtr(), x.getReadPtr());
1792  if( mpfr_integer_p(tmp.getReadPtr())==0 )
1793  //throw invalidConversion();
1794  WerrorS("internalError");
1795  mpfr_clear_erangeflag();
1796  r = mpfr_get_si(tmp.getReadPtr(), GMP_RNDN);
1797  if( mpfr_erangeflag_p()!=0 )
1798  //throw invalidConversion();
1799  WerrorS("internalError");
1800  return r;
1801  }
1802 
1803  template<unsigned int Precision>
1804  signed long ceil(const ampf<Precision> &x)
1805  {
1806  ampf<Precision> tmp;
1807  signed long r;
1808  mpfr_ceil(tmp.getWritePtr(), x.getReadPtr());
1809  if( mpfr_integer_p(tmp.getReadPtr())==0 )
1810  //throw invalidConversion();
1811  WerrorS("internalError");
1812  mpfr_clear_erangeflag();
1813  r = mpfr_get_si(tmp.getReadPtr(), GMP_RNDN);
1814  if( mpfr_erangeflag_p()!=0 )
1815  //throw invalidConversion();
1816  WerrorS("internalError");
1817  return r;
1818  }
1819 
1820  template<unsigned int Precision>
1821  signed long round(const ampf<Precision> &x)
1822  {
1823  ampf<Precision> tmp;
1824  signed long r;
1825  mpfr_round(tmp.getWritePtr(), x.getReadPtr());
1826  if( mpfr_integer_p(tmp.getReadPtr())==0 )
1827  //throw invalidConversion();
1828  WerrorS("internalError");
1829  mpfr_clear_erangeflag();
1830  r = mpfr_get_si(tmp.getReadPtr(), GMP_RNDN);
1831  if( mpfr_erangeflag_p()!=0 )
1832  //throw invalidConversion();
1833  WerrorS("internalError");
1834  return r;
1835  }
1836 
1837  template<unsigned int Precision>
1838  const ampf<Precision> frexp2(const ampf<Precision> &x, mp_exp_t *exponent)
1839  {
1840  // TODO: optimize temporary for return value
1841  ampf<Precision> r;
1842  if( !x.isFiniteNumber() )
1843  //throw invalidConversion();
1844  WerrorS("internalError");
1845  if( x.isZero() )
1846  {
1847  *exponent = 0;
1848  r = 0;
1849  return r;
1850  }
1851  r = x;
1852  *exponent = mpfr_get_exp(r.getReadPtr());
1853  mpfr_set_exp(r.getWritePtr(),0);
1854  return r;
1855  }
1856 
1857  template<unsigned int Precision>
1858  const ampf<Precision> ldexp2(const ampf<Precision> &x, mp_exp_t exponent)
1859  {
1860  // TODO: optimize temporary for return value
1861  ampf<Precision> r;
1862  mpfr_mul_2si(r.getWritePtr(), x.getReadPtr(), exponent, GMP_RNDN);
1863  return r;
1864  }
1865 
1866  //
1867  // different types of arguments
1868  //
1869  #define __AMP_BINARY_OPI(type) \
1870  template<unsigned int Precision> const ampf<Precision> operator+(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)+op2; } \
1871  template<unsigned int Precision> const ampf<Precision> operator+(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)+op2; } \
1872  template<unsigned int Precision> const ampf<Precision> operator+(const ampf<Precision>& op1, const signed type& op2) { return op1+ampf<Precision>(op2); } \
1873  template<unsigned int Precision> const ampf<Precision> operator+(const ampf<Precision>& op1, const unsigned type& op2) { return op1+ampf<Precision>(op2); } \
1874  template<unsigned int Precision> const ampf<Precision> operator-(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)-op2; } \
1875  template<unsigned int Precision> const ampf<Precision> operator-(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)-op2; } \
1876  template<unsigned int Precision> const ampf<Precision> operator-(const ampf<Precision>& op1, const signed type& op2) { return op1-ampf<Precision>(op2); } \
1877  template<unsigned int Precision> const ampf<Precision> operator-(const ampf<Precision>& op1, const unsigned type& op2) { return op1-ampf<Precision>(op2); } \
1878  template<unsigned int Precision> const ampf<Precision> operator*(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)*op2; } \
1879  template<unsigned int Precision> const ampf<Precision> operator*(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)*op2; } \
1880  template<unsigned int Precision> const ampf<Precision> operator*(const ampf<Precision>& op1, const signed type& op2) { return op1*ampf<Precision>(op2); } \
1881  template<unsigned int Precision> const ampf<Precision> operator*(const ampf<Precision>& op1, const unsigned type& op2) { return op1*ampf<Precision>(op2); } \
1882  template<unsigned int Precision> const ampf<Precision> operator/(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)/op2; } \
1883  template<unsigned int Precision> const ampf<Precision> operator/(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)/op2; } \
1884  template<unsigned int Precision> const ampf<Precision> operator/(const ampf<Precision>& op1, const signed type& op2) { return op1/ampf<Precision>(op2); } \
1885  template<unsigned int Precision> const ampf<Precision> operator/(const ampf<Precision>& op1, const unsigned type& op2) { return op1/ampf<Precision>(op2); } \
1886  template<unsigned int Precision> bool operator==(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)==op2; } \
1887  template<unsigned int Precision> bool operator==(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)==op2; } \
1888  template<unsigned int Precision> bool operator==(const ampf<Precision>& op1, const signed type& op2) { return op1==ampf<Precision>(op2); } \
1889  template<unsigned int Precision> bool operator==(const ampf<Precision>& op1, const unsigned type& op2) { return op1==ampf<Precision>(op2); } \
1890  template<unsigned int Precision> bool operator!=(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)!=op2; } \
1891  template<unsigned int Precision> bool operator!=(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)!=op2; } \
1892  template<unsigned int Precision> bool operator!=(const ampf<Precision>& op1, const signed type& op2) { return op1!=ampf<Precision>(op2); } \
1893  template<unsigned int Precision> bool operator!=(const ampf<Precision>& op1, const unsigned type& op2) { return op1!=ampf<Precision>(op2); } \
1894  template<unsigned int Precision> bool operator<=(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<=op2; } \
1895  template<unsigned int Precision> bool operator<=(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<=op2; } \
1896  template<unsigned int Precision> bool operator<=(const ampf<Precision>& op1, const signed type& op2) { return op1<=ampf<Precision>(op2); } \
1897  template<unsigned int Precision> bool operator<=(const ampf<Precision>& op1, const unsigned type& op2) { return op1<=ampf<Precision>(op2); } \
1898  template<unsigned int Precision> bool operator>=(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>=op2; } \
1899  template<unsigned int Precision> bool operator>=(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>=op2; } \
1900  template<unsigned int Precision> bool operator>=(const ampf<Precision>& op1, const signed type& op2) { return op1>=ampf<Precision>(op2); } \
1901  template<unsigned int Precision> bool operator>=(const ampf<Precision>& op1, const unsigned type& op2) { return op1>=ampf<Precision>(op2); } \
1902  template<unsigned int Precision> bool operator<(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<op2; } \
1903  template<unsigned int Precision> bool operator<(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<op2; } \
1904  template<unsigned int Precision> bool operator<(const ampf<Precision>& op1, const signed type& op2) { return op1<ampf<Precision>(op2); } \
1905  template<unsigned int Precision> bool operator<(const ampf<Precision>& op1, const unsigned type& op2) { return op1<ampf<Precision>(op2); } \
1906  template<unsigned int Precision> bool operator>(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>op2; } \
1907  template<unsigned int Precision> bool operator>(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>op2; } \
1908  template<unsigned int Precision> bool operator>(const ampf<Precision>& op1, const signed type& op2) { return op1>ampf<Precision>(op2); } \
1909  template<unsigned int Precision> bool operator>(const ampf<Precision>& op1, const unsigned type& op2) { return op1>ampf<Precision>(op2); }
1910  __AMP_BINARY_OPI(char)
1911  __AMP_BINARY_OPI(short)
1912  __AMP_BINARY_OPI(long)
1913  __AMP_BINARY_OPI(int)
1914  #undef __AMP_BINARY_OPI
1915  #define __AMP_BINARY_OPF(type) \
1916  template<unsigned int Precision> const ampf<Precision> operator+(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)+op2; } \
1917  template<unsigned int Precision> const ampf<Precision> operator+(const ampf<Precision>& op1, const type& op2) { return op1+ampf<Precision>(op2); } \
1918  template<unsigned int Precision> const ampf<Precision> operator-(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)-op2; } \
1919  template<unsigned int Precision> const ampf<Precision> operator-(const ampf<Precision>& op1, const type& op2) { return op1-ampf<Precision>(op2); } \
1920  template<unsigned int Precision> const ampf<Precision> operator*(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)*op2; } \
1921  template<unsigned int Precision> const ampf<Precision> operator*(const ampf<Precision>& op1, const type& op2) { return op1*ampf<Precision>(op2); } \
1922  template<unsigned int Precision> const ampf<Precision> operator/(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)/op2; } \
1923  template<unsigned int Precision> const ampf<Precision> operator/(const ampf<Precision>& op1, const type& op2) { return op1/ampf<Precision>(op2); } \
1924  template<unsigned int Precision> bool operator==(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)==op2; } \
1925  template<unsigned int Precision> bool operator==(const ampf<Precision>& op1, const type& op2) { return op1==ampf<Precision>(op2); } \
1926  template<unsigned int Precision> bool operator!=(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)!=op2; } \
1927  template<unsigned int Precision> bool operator!=(const ampf<Precision>& op1, const type& op2) { return op1!=ampf<Precision>(op2); } \
1928  template<unsigned int Precision> bool operator<=(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<=op2; } \
1929  template<unsigned int Precision> bool operator<=(const ampf<Precision>& op1, const type& op2) { return op1<=ampf<Precision>(op2); } \
1930  template<unsigned int Precision> bool operator>=(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>=op2; } \
1931  template<unsigned int Precision> bool operator>=(const ampf<Precision>& op1, const type& op2) { return op1>=ampf<Precision>(op2); } \
1932  template<unsigned int Precision> bool operator<(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<op2; } \
1933  template<unsigned int Precision> bool operator<(const ampf<Precision>& op1, const type& op2) { return op1<ampf<Precision>(op2); } \
1934  template<unsigned int Precision> bool operator>(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>op2; } \
1935  template<unsigned int Precision> bool operator>(const ampf<Precision>& op1, const type& op2) { return op1>ampf<Precision>(op2); }
1936  __AMP_BINARY_OPF(float)
1937  __AMP_BINARY_OPF(double)
1938  __AMP_BINARY_OPF(long double)
1939  #undef __AMP_BINARY_OPF
1940 
1941  //
1942  // transcendent functions
1943  //
1944  template<unsigned int Precision>
1945  const ampf<Precision> pi()
1946  {
1947  mpfr_record *v = mpfr_storage::newMpfr(Precision);
1948  mpfr_const_pi(v->value, GMP_RNDN);
1949  return v;
1950  }
1951 
1952  template<unsigned int Precision>
1953  const ampf<Precision> halfpi()
1954  {
1955  mpfr_record *v = mpfr_storage::newMpfr(Precision);
1956  mpfr_const_pi(v->value, GMP_RNDN);
1957  mpfr_mul_2si(v->value, v->value, -1, GMP_RNDN);
1958  return v;
1959  }
1960 
1961  template<unsigned int Precision>
1962  const ampf<Precision> twopi()
1963  {
1964  mpfr_record *v = mpfr_storage::newMpfr(Precision);
1965  mpfr_const_pi(v->value, GMP_RNDN);
1966  mpfr_mul_2si(v->value, v->value, +1, GMP_RNDN);
1967  return v;
1968  }
1969 
1970  template<unsigned int Precision>
1971  const ampf<Precision> sin(const ampf<Precision> &x)
1972  {
1973  mpfr_record *v = mpfr_storage::newMpfr(Precision);
1974  mpfr_sin(v->value, x.getReadPtr(), GMP_RNDN);
1975  return v;
1976  }
1977 
1978  template<unsigned int Precision>
1979  const ampf<Precision> cos(const ampf<Precision> &x)
1980  {
1981  mpfr_record *v = mpfr_storage::newMpfr(Precision);
1982  mpfr_cos(v->value, x.getReadPtr(), GMP_RNDN);
1983  return v;
1984  }
1985 
1986  template<unsigned int Precision>
1987  const ampf<Precision> tan(const ampf<Precision> &x)
1988  {
1989  mpfr_record *v = mpfr_storage::newMpfr(Precision);
1990  mpfr_tan(v->value, x.getReadPtr(), GMP_RNDN);
1991  return v;
1992  }
1993 
1994  template<unsigned int Precision>
1995  const ampf<Precision> asin(const ampf<Precision> &x)
1996  {
1997  mpfr_record *v = mpfr_storage::newMpfr(Precision);
1998  mpfr_asin(v->value, x.getReadPtr(), GMP_RNDN);
1999  return v;
2000  }
2001 
2002  template<unsigned int Precision>
2003  const ampf<Precision> acos(const ampf<Precision> &x)
2004  {
2005  mpfr_record *v = mpfr_storage::newMpfr(Precision);
2006  mpfr_acos(v->value, x.getReadPtr(), GMP_RNDN);
2007  return v;
2008  }
2009 
2010  template<unsigned int Precision>
2011  const ampf<Precision> atan(const ampf<Precision> &x)
2012  {
2013  mpfr_record *v = mpfr_storage::newMpfr(Precision);
2014  mpfr_atan(v->value, x.getReadPtr(), GMP_RNDN);
2015  return v;
2016  }
2017 
2018  template<unsigned int Precision>
2019  const ampf<Precision> atan2(const ampf<Precision> &y, const ampf<Precision> &x)
2020  {
2021  mpfr_record *v = mpfr_storage::newMpfr(Precision);
2022  mpfr_atan2(v->value, y.getReadPtr(), x.getReadPtr(), GMP_RNDN);
2023  return v;
2024  }
2025 
2026  template<unsigned int Precision>
2027  const ampf<Precision> log(const ampf<Precision> &x)
2028  {
2029  mpfr_record *v = mpfr_storage::newMpfr(Precision);
2030  mpfr_log(v->value, x.getReadPtr(), GMP_RNDN);
2031  return v;
2032  }
2033 
2034  template<unsigned int Precision>
2035  const ampf<Precision> log2(const ampf<Precision> &x)
2036  {
2037  mpfr_record *v = mpfr_storage::newMpfr(Precision);
2038  mpfr_log2(v->value, x.getReadPtr(), GMP_RNDN);
2039  return v;
2040  }
2041 
2042  template<unsigned int Precision>
2043  const ampf<Precision> log10(const ampf<Precision> &x)
2044  {
2045  mpfr_record *v = mpfr_storage::newMpfr(Precision);
2046  mpfr_log10(v->value, x.getReadPtr(), GMP_RNDN);
2047  return v;
2048  }
2049 
2050  template<unsigned int Precision>
2051  const ampf<Precision> exp(const ampf<Precision> &x)
2052  {
2053  mpfr_record *v = mpfr_storage::newMpfr(Precision);
2054  mpfr_exp(v->value, x.getReadPtr(), GMP_RNDN);
2055  return v;
2056  }
2057 
2058  template<unsigned int Precision>
2059  const ampf<Precision> sinh(const ampf<Precision> &x)
2060  {
2061  mpfr_record *v = mpfr_storage::newMpfr(Precision);
2062  mpfr_sinh(v->value, x.getReadPtr(), GMP_RNDN);
2063  return v;
2064  }
2065 
2066  template<unsigned int Precision>
2067  const ampf<Precision> cosh(const ampf<Precision> &x)
2068  {
2069  mpfr_record *v = mpfr_storage::newMpfr(Precision);
2070  mpfr_cosh(v->value, x.getReadPtr(), GMP_RNDN);
2071  return v;
2072  }
2073 
2074  template<unsigned int Precision>
2075  const ampf<Precision> tanh(const ampf<Precision> &x)
2076  {
2077  mpfr_record *v = mpfr_storage::newMpfr(Precision);
2078  mpfr_tanh(v->value, x.getReadPtr(), GMP_RNDN);
2079  return v;
2080  }
2081 
2082  template<unsigned int Precision>
2083  const ampf<Precision> pow(const ampf<Precision> &x, const ampf<Precision> &y)
2084  {
2085  mpfr_record *v = mpfr_storage::newMpfr(Precision);
2086  mpfr_pow(v->value, x.getReadPtr(), y.getReadPtr(), GMP_RNDN);
2087  return v;
2088  }
2089 
2090  //
2091  // complex ampf
2092  //
2093  template<unsigned int Precision>
2094  class campf
2095  {
2096  public:
2097  campf():x(0),y(0){};
2098  campf(long double v) { x=v; y=0; }
2099  campf(double v) { x=v; y=0; }
2100  campf(float v) { x=v; y=0; }
2101  campf(signed long v) { x=v; y=0; }
2102  campf(unsigned long v) { x=v; y=0; }
2103  campf(signed int v) { x=v; y=0; }
2104  campf(unsigned int v) { x=v; y=0; }
2105  campf(signed short v) { x=v; y=0; }
2106  campf(unsigned short v) { x=v; y=0; }
2107  campf(signed char v) { x=v; y=0; }
2108  campf(unsigned char v) { x=v; y=0; }
2109  campf(const ampf<Precision> &_x):x(_x),y(0){};
2110  campf(const ampf<Precision> &_x, const ampf<Precision> &_y):x(_x),y(_y){};
2111  campf(const campf &z):x(z.x),y(z.y){};
2112 #ifndef _AMP_NO_TEMPLATE_CONSTRUCTORS
2113  template<unsigned int Prec2>
2114  campf(const campf<Prec2> &z):x(z.x),y(z.y){};
2115 #endif
2116 
2117  campf& operator= (long double v) { x=v; y=0; return *this; }
2118  campf& operator= (double v) { x=v; y=0; return *this; }
2119  campf& operator= (float v) { x=v; y=0; return *this; }
2120  campf& operator= (signed long v) { x=v; y=0; return *this; }
2121  campf& operator= (unsigned long v) { x=v; y=0; return *this; }
2122  campf& operator= (signed int v) { x=v; y=0; return *this; }
2123  campf& operator= (unsigned int v) { x=v; y=0; return *this; }
2124  campf& operator= (signed short v) { x=v; y=0; return *this; }
2125  campf& operator= (unsigned short v) { x=v; y=0; return *this; }
2126  campf& operator= (signed char v) { x=v; y=0; return *this; }
2127  campf& operator= (unsigned char v) { x=v; y=0; return *this; }
2128  campf& operator= (const char *s) { x=s; y=0; return *this; }
2129  campf& operator= (const std::string &s) { x=s; y=0; return *this; }
2131  {
2132  x = r.x;
2133  y = r.y;
2134  return *this;
2135  }
2136 #ifndef _AMP_NO_TEMPLATE_CONSTRUCTORS
2137  template<unsigned int Precision2>
2139  {
2140  x = r.x;
2141  y = r.y;
2142  return *this;
2143  }
2144 #endif
2145 
2146  ampf<Precision> x, y;
2147  };
2148 
2149  //
2150  // complex operations
2151  //
2152  template<unsigned int Precision>
2153  bool operator==(const campf<Precision>& lhs, const campf<Precision>& rhs)
2154  { return lhs.x==rhs.x && lhs.y==rhs.y; }
2155 
2156  template<unsigned int Precision>
2157  bool operator!=(const campf<Precision>& lhs, const campf<Precision>& rhs)
2158  { return lhs.x!=rhs.x || lhs.y!=rhs.y; }
2159 
2160  template<unsigned int Precision>
2161  const campf<Precision> operator+(const campf<Precision>& lhs)
2162  { return lhs; }
2163 
2164  template<unsigned int Precision>
2165  campf<Precision>& operator+=(campf<Precision>& lhs, const campf<Precision>& rhs)
2166  { lhs.x += rhs.x; lhs.y += rhs.y; return lhs; }
2167 
2168  template<unsigned int Precision>
2169  const campf<Precision> operator+(const campf<Precision>& lhs, const campf<Precision>& rhs)
2170  { campf<Precision> r = lhs; r += rhs; return r; }
2171 
2172  template<unsigned int Precision>
2173  const campf<Precision> operator-(const campf<Precision>& lhs)
2174  { return campf<Precision>(-lhs.x, -lhs.y); }
2175 
2176  template<unsigned int Precision>
2177  campf<Precision>& operator-=(campf<Precision>& lhs, const campf<Precision>& rhs)
2178  { lhs.x -= rhs.x; lhs.y -= rhs.y; return lhs; }
2179 
2180  template<unsigned int Precision>
2181  const campf<Precision> operator-(const campf<Precision>& lhs, const campf<Precision>& rhs)
2182  { campf<Precision> r = lhs; r -= rhs; return r; }
2183 
2184  template<unsigned int Precision>
2185  campf<Precision>& operator*=(campf<Precision>& lhs, const campf<Precision>& rhs)
2186  {
2187  ampf<Precision> xx(lhs.x*rhs.x), yy(lhs.y*rhs.y), mm((lhs.x+lhs.y)*(rhs.x+rhs.y));
2188  lhs.x = xx-yy;
2189  lhs.y = mm-xx-yy;
2190  return lhs;
2191  }
2192 
2193  template<unsigned int Precision>
2194  const campf<Precision> operator*(const campf<Precision>& lhs, const campf<Precision>& rhs)
2195  { campf<Precision> r = lhs; r *= rhs; return r; }
2196 
2197  template<unsigned int Precision>
2198  const campf<Precision> operator/(const campf<Precision>& lhs, const campf<Precision>& rhs)
2199  {
2200  campf<Precision> result;
2201  ampf<Precision> e;
2202  ampf<Precision> f;
2203  if( abs(rhs.y)<abs(rhs.x) )
2204  {
2205  e = rhs.y/rhs.x;
2206  f = rhs.x+rhs.y*e;
2207  result.x = (lhs.x+lhs.y*e)/f;
2208  result.y = (lhs.y-lhs.x*e)/f;
2209  }
2210  else
2211  {
2212  e = rhs.x/rhs.y;
2213  f = rhs.y+rhs.x*e;
2214  result.x = (lhs.y+lhs.x*e)/f;
2215  result.y = (-lhs.x+lhs.y*e)/f;
2216  }
2217  return result;
2218  }
2219 
2220  template<unsigned int Precision>
2221  campf<Precision>& operator/=(campf<Precision>& lhs, const campf<Precision>& rhs)
2222  {
2223  lhs = lhs/rhs;
2224  return lhs;
2225  }
2226 
2227  template<unsigned int Precision>
2228  const ampf<Precision> abscomplex(const campf<Precision> &z)
2229  {
2230  ampf<Precision> w, xabs, yabs, v;
2231 
2232  xabs = abs(z.x);
2233  yabs = abs(z.y);
2234  w = xabs>yabs ? xabs : yabs;
2235  v = xabs<yabs ? xabs : yabs;
2236  if( v==0 )
2237  return w;
2238  else
2239  {
2240  ampf<Precision> t = v/w;
2241  return w*sqrt(1+sqr(t));
2242  }
2243  }
2244 
2245  template<unsigned int Precision>
2246  const campf<Precision> conj(const campf<Precision> &z)
2247  {
2248  return campf<Precision>(z.x, -z.y);
2249  }
2250 
2251  template<unsigned int Precision>
2252  const campf<Precision> csqr(const campf<Precision> &z)
2253  {
2254  ampf<Precision> t = z.x*z.y;
2255  return campf<Precision>(sqr(z.x)-sqr(z.y), t+t);
2256  }
2257 
2258  //
2259  // different types of arguments
2260  //
2261  #define __AMP_BINARY_OPI(type) \
2262  template<unsigned int Precision> const campf<Precision> operator+ (const signed type& op1, const campf<Precision>& op2) { return campf<Precision>(op1+op2.x, op2.y); } \
2263  template<unsigned int Precision> const campf<Precision> operator+ (const unsigned type& op1, const campf<Precision>& op2) { return campf<Precision>(op1+op2.x, op2.y); } \
2264  template<unsigned int Precision> const campf<Precision> operator+ (const campf<Precision>& op1, const signed type& op2) { return campf<Precision>(op1.x+op2, op1.y); } \
2265  template<unsigned int Precision> const campf<Precision> operator+ (const campf<Precision>& op1, const unsigned type& op2) { return campf<Precision>(op1.x+op2, op1.y); } \
2266  template<unsigned int Precision> const campf<Precision> operator- (const signed type& op1, const campf<Precision>& op2) { return campf<Precision>(op1-op2.x, -op2.y); } \
2267  template<unsigned int Precision> const campf<Precision> operator- (const unsigned type& op1, const campf<Precision>& op2) { return campf<Precision>(op1-op2.x, -op2.y); } \
2268  template<unsigned int Precision> const campf<Precision> operator- (const campf<Precision>& op1, const signed type& op2) { return campf<Precision>(op1.x-op2, op1.y); } \
2269  template<unsigned int Precision> const campf<Precision> operator- (const campf<Precision>& op1, const unsigned type& op2) { return campf<Precision>(op1.x-op2, op1.y); } \
2270  template<unsigned int Precision> const campf<Precision> operator* (const signed type& op1, const campf<Precision>& op2) { return campf<Precision>(op1*op2.x, op1*op2.y); } \
2271  template<unsigned int Precision> const campf<Precision> operator* (const unsigned type& op1, const campf<Precision>& op2) { return campf<Precision>(op1*op2.x, op1*op2.y); } \
2272  template<unsigned int Precision> const campf<Precision> operator* (const campf<Precision>& op1, const signed type& op2) { return campf<Precision>(op2*op1.x, op2*op1.y); } \
2273  template<unsigned int Precision> const campf<Precision> operator* (const campf<Precision>& op1, const unsigned type& op2) { return campf<Precision>(op2*op1.x, op2*op1.y); } \
2274  template<unsigned int Precision> const campf<Precision> operator/ (const signed type& op1, const campf<Precision>& op2) { return campf<Precision>(ampf<Precision>(op1),ampf<Precision>(0))/op2; } \
2275  template<unsigned int Precision> const campf<Precision> operator/ (const unsigned type& op1, const campf<Precision>& op2) { return campf<Precision>(ampf<Precision>(op1),ampf<Precision>(0))/op2; } \
2276  template<unsigned int Precision> const campf<Precision> operator/ (const campf<Precision>& op1, const signed type& op2) { return campf<Precision>(op1.x/op2, op1.y/op2); } \
2277  template<unsigned int Precision> const campf<Precision> operator/ (const campf<Precision>& op1, const unsigned type& op2) { return campf<Precision>(op1.x/op2, op1.y/op2); } \
2278  template<unsigned int Precision> bool operator==(const signed type& op1, const campf<Precision>& op2) { return op1==op2.x && op2.y==0; } \
2279  template<unsigned int Precision> bool operator==(const unsigned type& op1, const campf<Precision>& op2) { return op1==op2.x && op2.y==0; } \
2280  template<unsigned int Precision> bool operator==(const campf<Precision>& op1, const signed type& op2) { return op1.x==op2 && op1.y==0; } \
2281  template<unsigned int Precision> bool operator==(const campf<Precision>& op1, const unsigned type& op2) { return op1.x==op2 && op1.y==0; } \
2282  template<unsigned int Precision> bool operator!=(const campf<Precision>& op1, const signed type& op2) { return op1.x!=op2 || op1.y!=0; } \
2283  template<unsigned int Precision> bool operator!=(const campf<Precision>& op1, const unsigned type& op2) { return op1.x!=op2 || op1.y!=0; } \
2284  template<unsigned int Precision> bool operator!=(const signed type& op1, const campf<Precision>& op2) { return op1!=op2.x || op2.y!=0; } \
2285  template<unsigned int Precision> bool operator!=(const unsigned type& op1, const campf<Precision>& op2) { return op1!=op2.x || op2.y!=0; }
2286  __AMP_BINARY_OPI(char)
2287  __AMP_BINARY_OPI(short)
2288  __AMP_BINARY_OPI(long)
2289  __AMP_BINARY_OPI(int)
2290  #undef __AMP_BINARY_OPI
2291  #define __AMP_BINARY_OPF(type) \
2292  template<unsigned int Precision> const campf<Precision> operator+ (const type& op1, const campf<Precision>& op2) { return campf<Precision>(op1+op2.x, op2.y); } \
2293  template<unsigned int Precision> const campf<Precision> operator+ (const campf<Precision>& op1, const type& op2) { return campf<Precision>(op1.x+op2, op1.y); } \
2294  template<unsigned int Precision> const campf<Precision> operator- (const type& op1, const campf<Precision>& op2) { return campf<Precision>(op1-op2.x, -op2.y); } \
2295  template<unsigned int Precision> const campf<Precision> operator- (const campf<Precision>& op1, const type& op2) { return campf<Precision>(op1.x-op2, op1.y); } \
2296  template<unsigned int Precision> const campf<Precision> operator* (const type& op1, const campf<Precision>& op2) { return campf<Precision>(op1*op2.x, op1*op2.y); } \
2297  template<unsigned int Precision> const campf<Precision> operator* (const campf<Precision>& op1, const type& op2) { return campf<Precision>(op2*op1.x, op2*op1.y); } \
2298  template<unsigned int Precision> const campf<Precision> operator/ (const type& op1, const campf<Precision>& op2) { return campf<Precision>(ampf<Precision>(op1),ampf<Precision>(0))/op2; } \
2299  template<unsigned int Precision> const campf<Precision> operator/ (const campf<Precision>& op1, const type& op2) { return campf<Precision>(op1.x/op2, op1.y/op2); } \
2300  template<unsigned int Precision> bool operator==(const type& op1, const campf<Precision>& op2) { return op1==op2.x && op2.y==0; } \
2301  template<unsigned int Precision> bool operator==(const campf<Precision>& op1, const type& op2) { return op1.x==op2 && op1.y==0; } \
2302  template<unsigned int Precision> bool operator!=(const type& op1, const campf<Precision>& op2) { return op1!=op2.x || op2.y!=0; } \
2303  template<unsigned int Precision> bool operator!=(const campf<Precision>& op1, const type& op2) { return op1.x!=op2 || op1.y!=0; }
2304  __AMP_BINARY_OPF(float)
2305  __AMP_BINARY_OPF(double)
2306  __AMP_BINARY_OPF(long double)
2307  __AMP_BINARY_OPF(ampf<Precision>)
2308  #undef __AMP_BINARY_OPF
2309 
2310  //
2311  // Real linear algebra
2312  //
2313  template<unsigned int Precision>
2314  ampf<Precision> vDotProduct(ap::const_raw_vector< ampf<Precision> > v1, ap::const_raw_vector< ampf<Precision> > v2)
2315  {
2316  ap::ap_error::make_assertion(v1.GetLength()==v2.GetLength());
2317  int i, cnt = v1.GetLength();
2318  const ampf<Precision> *p1 = v1.GetData();
2319  const ampf<Precision> *p2 = v2.GetData();
2320  mpfr_record *r = NULL;
2321  mpfr_record *t = NULL;
2322  //try
2323  {
2324  r = mpfr_storage::newMpfr(Precision);
2325  t = mpfr_storage::newMpfr(Precision);
2326  mpfr_set_ui(r->value, 0, GMP_RNDN);
2327  for(i=0; i<cnt; i++)
2328  {
2329  mpfr_mul(t->value, p1->getReadPtr(), p2->getReadPtr(), GMP_RNDN);
2330  mpfr_add(r->value, r->value, t->value, GMP_RNDN);
2331  p1 += v1.GetStep();
2332  p2 += v2.GetStep();
2333  }
2335  return r;
2336  }
2337  //catch(...)
2338  //{
2339  // if( r!=NULL )
2340  // mpfr_storage::deleteMpfr(r);
2341  // if( t!=NULL )
2342  // mpfr_storage::deleteMpfr(t);
2343  // throw;
2344  //}
2345  }
2346 
2347  template<unsigned int Precision>
2348  void vMove(ap::raw_vector< ampf<Precision> > vDst, ap::const_raw_vector< ampf<Precision> > vSrc)
2349  {
2350  ap::ap_error::make_assertion(vDst.GetLength()==vSrc.GetLength());
2351  int i, cnt = vDst.GetLength();
2352  ampf<Precision> *pDst = vDst.GetData();
2353  const ampf<Precision> *pSrc = vSrc.GetData();
2354  if( pDst==pSrc )
2355  return;
2356  for(i=0; i<cnt; i++)
2357  {
2358  *pDst = *pSrc;
2359  pDst += vDst.GetStep();
2360  pSrc += vSrc.GetStep();
2361  }
2362  }
2363 
2364  template<unsigned int Precision>
2365  void vMoveNeg(ap::raw_vector< ampf<Precision> > vDst, ap::const_raw_vector< ampf<Precision> > vSrc)
2366  {
2367  ap::ap_error::make_assertion(vDst.GetLength()==vSrc.GetLength());
2368  int i, cnt = vDst.GetLength();
2369  ampf<Precision> *pDst = vDst.GetData();
2370  const ampf<Precision> *pSrc = vSrc.GetData();
2371  for(i=0; i<cnt; i++)
2372  {
2373  *pDst = *pSrc;
2374  mpfr_ptr v = pDst->getWritePtr();
2375  mpfr_neg(v, v, GMP_RNDN);
2376  pDst += vDst.GetStep();
2377  pSrc += vSrc.GetStep();
2378  }
2379  }
2380 
2381  template<unsigned int Precision, class T2>
2382  void vMove(ap::raw_vector< ampf<Precision> > vDst, ap::const_raw_vector< ampf<Precision> > vSrc, T2 alpha)
2383  {
2384  ap::ap_error::make_assertion(vDst.GetLength()==vSrc.GetLength());
2385  int i, cnt = vDst.GetLength();
2386  ampf<Precision> *pDst = vDst.GetData();
2387  const ampf<Precision> *pSrc = vSrc.GetData();
2388  ampf<Precision> a(alpha);
2389  for(i=0; i<cnt; i++)
2390  {
2391  *pDst = *pSrc;
2392  mpfr_ptr v = pDst->getWritePtr();
2393  mpfr_mul(v, v, a.getReadPtr(), GMP_RNDN);
2394  pDst += vDst.GetStep();
2395  pSrc += vSrc.GetStep();
2396  }
2397  }
2398 
2399  template<unsigned int Precision>
2400  void vAdd(ap::raw_vector< ampf<Precision> > vDst, ap::const_raw_vector< ampf<Precision> > vSrc)
2401  {
2402  ap::ap_error::make_assertion(vDst.GetLength()==vSrc.GetLength());
2403  int i, cnt = vDst.GetLength();
2404  ampf<Precision> *pDst = vDst.GetData();
2405  const ampf<Precision> *pSrc = vSrc.GetData();
2406  for(i=0; i<cnt; i++)
2407  {
2408  mpfr_ptr v = pDst->getWritePtr();
2409  mpfr_srcptr vs = pSrc->getReadPtr();
2410  mpfr_add(v, v, vs, GMP_RNDN);
2411  pDst += vDst.GetStep();
2412  pSrc += vSrc.GetStep();
2413  }
2414  }
2415 
2416  template<unsigned int Precision, class T2>
2417  void vAdd(ap::raw_vector< ampf<Precision> > vDst, ap::const_raw_vector< ampf<Precision> > vSrc, T2 alpha)
2418  {
2419  ap::ap_error::make_assertion(vDst.GetLength()==vSrc.GetLength());
2420  int i, cnt = vDst.GetLength();
2421  ampf<Precision> *pDst = vDst.GetData();
2422  const ampf<Precision> *pSrc = vSrc.GetData();
2423  ampf<Precision> a(alpha), tmp;
2424  for(i=0; i<cnt; i++)
2425  {
2426  mpfr_ptr v = pDst->getWritePtr();
2427  mpfr_srcptr vs = pSrc->getReadPtr();
2428  mpfr_mul(tmp.getWritePtr(), a.getReadPtr(), vs, GMP_RNDN);
2429  mpfr_add(v, v, tmp.getWritePtr(), GMP_RNDN);
2430  pDst += vDst.GetStep();
2431  pSrc += vSrc.GetStep();
2432  }
2433  }
2434 
2435  template<unsigned int Precision>
2436  void vSub(ap::raw_vector< ampf<Precision> > vDst, ap::const_raw_vector< ampf<Precision> > vSrc)
2437  {
2438  ap::ap_error::make_assertion(vDst.GetLength()==vSrc.GetLength());
2439  int i, cnt = vDst.GetLength();
2440  ampf<Precision> *pDst = vDst.GetData();
2441  const ampf<Precision> *pSrc = vSrc.GetData();
2442  for(i=0; i<cnt; i++)
2443  {
2444  mpfr_ptr v = pDst->getWritePtr();
2445  mpfr_srcptr vs = pSrc->getReadPtr();
2446  mpfr_sub(v, v, vs, GMP_RNDN);
2447  pDst += vDst.GetStep();
2448  pSrc += vSrc.GetStep();
2449  }
2450  }
2451 
2452  template<unsigned int Precision, class T2>
2453  void vSub(ap::raw_vector< ampf<Precision> > vDst, ap::const_raw_vector< ampf<Precision> > vSrc, T2 alpha)
2454  {
2455  vAdd(vDst, vSrc, -alpha);
2456  }
2457 
2458  template<unsigned int Precision, class T2>
2459  void vMul(ap::raw_vector< ampf<Precision> > vDst, T2 alpha)
2460  {
2461  int i, cnt = vDst.GetLength();
2462  ampf<Precision> *pDst = vDst.GetData();
2463  ampf<Precision> a(alpha);
2464  for(i=0; i<cnt; i++)
2465  {
2466  mpfr_ptr v = pDst->getWritePtr();
2467  mpfr_mul(v, a.getReadPtr(), v, GMP_RNDN);
2468  pDst += vDst.GetStep();
2469  }
2470  }
2471 }
2472 
2473 /* stuff included from ./reflections.h */
2474 
2475 /*************************************************************************
2476 Copyright (c) 1992-2007 The University of Tennessee. All rights reserved.
2477 
2478 Contributors:
2479  * Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
2480  pseudocode.
2481 
2482 See subroutines comments for additional copyrights.
2483 
2484 Redistribution and use in source and binary forms, with or without
2485 modification, are permitted provided that the following conditions are
2486 met:
2487 
2488 - Redistributions of source code must retain the above copyright
2489  notice, this list of conditions and the following disclaimer.
2490 
2491 - Redistributions in binary form must reproduce the above copyright
2492  notice, this list of conditions and the following disclaimer listed
2493  in this license in the documentation and/or other materials
2494  provided with the distribution.
2495 
2496 - Neither the name of the copyright holders nor the names of its
2497  contributors may be used to endorse or promote products derived from
2498  this software without specific prior written permission.
2499 
2500 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2501 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2502 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2503 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2504 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2505 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2506 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2507 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2508 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2509 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2510 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2511 *************************************************************************/
2512 
2513 namespace reflections
2514 {
2515  template<unsigned int Precision>
2517  int n,
2519  template<unsigned int Precision>
2523  int m1,
2524  int m2,
2525  int n1,
2526  int n2,
2528  template<unsigned int Precision>
2532  int m1,
2533  int m2,
2534  int n1,
2535  int n2,
2537 
2538 
2539  /*************************************************************************
2540  Generation of an elementary reflection transformation
2541 
2542  The subroutine generates elementary reflection H of order N, so that, for
2543  a given X, the following equality holds true:
2544 
2545  ( X(1) ) ( Beta )
2546  H * ( .. ) = ( 0 )
2547  ( X(n) ) ( 0 )
2548 
2549  where
2550  ( V(1) )
2551  H = 1 - Tau * ( .. ) * ( V(1), ..., V(n) )
2552  ( V(n) )
2553 
2554  where the first component of vector V equals 1.
2555 
2556  Input parameters:
2557  X - vector. Array whose index ranges within [1..N].
2558  N - reflection order.
2559 
2560  Output parameters:
2561  X - components from 2 to N are replaced with vector V.
2562  The first component is replaced with parameter Beta.
2563  Tau - scalar value Tau. If X is a null vector, Tau equals 0,
2564  otherwise 1 <= Tau <= 2.
2565 
2566  This subroutine is the modification of the DLARFG subroutines from
2567  the LAPACK library. It has a similar functionality except for the
2568  cause an overflow.
2569 
2570 
2571  MODIFICATIONS:
2572  24.12.2005 sign(Alpha) was replaced with an analogous to the Fortran SIGN code.
2573 
2574  -- LAPACK auxiliary routine (version 3.0) --
2575  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
2576  Courant Institute, Argonne National Lab, and Rice University
2577  September 30, 1994
2578  *************************************************************************/
2579  template<unsigned int Precision>
2581  int n,
2583  {
2584  int j;
2586  amp::ampf<Precision> xnorm;
2590 
2591 
2592 
2593  //
2594  // Executable Statements ..
2595  //
2596  if( n<=1 )
2597  {
2598  tau = 0;
2599  return;
2600  }
2601 
2602  //
2603  // XNORM = DNRM2( N-1, X, INCX )
2604  //
2605  alpha = x(1);
2606  mx = 0;
2607  for(j=2; j<=n; j++)
2608  {
2609  mx = amp::maximum<Precision>(amp::abs<Precision>(x(j)), mx);
2610  }
2611  xnorm = 0;
2612  if( mx!=0 )
2613  {
2614  for(j=2; j<=n; j++)
2615  {
2616  xnorm = xnorm+amp::sqr<Precision>(x(j)/mx);
2617  }
2618  xnorm = amp::sqrt<Precision>(xnorm)*mx;
2619  }
2620  if( xnorm==0 )
2621  {
2622 
2623  //
2624  // H = I
2625  //
2626  tau = 0;
2627  return;
2628  }
2629 
2630  //
2631  // general case
2632  //
2633  mx = amp::maximum<Precision>(amp::abs<Precision>(alpha), amp::abs<Precision>(xnorm));
2634  beta = -mx*amp::sqrt<Precision>(amp::sqr<Precision>(alpha/mx)+amp::sqr<Precision>(xnorm/mx));
2635  if( alpha<0 )
2636  {
2637  beta = -beta;
2638  }
2639  tau = (beta-alpha)/beta;
2640  v = 1/(alpha-beta);
2641  ap::vmul(x.getvector(2, n), v);
2642  x(1) = beta;
2643  }
2644 
2645 
2646  /*************************************************************************
2647  Application of an elementary reflection to a rectangular matrix of size MxN
2648 
2649  The algorithm pre-multiplies the matrix by an elementary reflection transformation
2650  which is given by column V and scalar Tau (see the description of the
2651  GenerateReflection procedure). Not the whole matrix but only a part of it
2652  is transformed (rows from M1 to M2, columns from N1 to N2). Only the elements
2653  of this submatrix are changed.
2654 
2655  Input parameters:
2656  C - matrix to be transformed.
2657  Tau - scalar defining the transformation.
2658  V - column defining the transformation.
2659  Array whose index ranges within [1..M2-M1+1].
2660  M1, M2 - range of rows to be transformed.
2661  N1, N2 - range of columns to be transformed.
2662  WORK - working array whose indexes goes from N1 to N2.
2663 
2664  Output parameters:
2665  C - the result of multiplying the input matrix C by the
2666  transformation matrix which is given by Tau and V.
2667  If N1>N2 or M1>M2, C is not modified.
2668 
2669  -- LAPACK auxiliary routine (version 3.0) --
2670  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
2671  Courant Institute, Argonne National Lab, and Rice University
2672  September 30, 1994
2673  *************************************************************************/
2674  template<unsigned int Precision>
2678  int m1,
2679  int m2,
2680  int n1,
2681  int n2,
2683  {
2685  int i;
2686 
2687  if( tau==0 || n1>n2 || m1>m2 )
2688  {
2689  return;
2690  }
2691 
2692  //
2693  // w := C' * v
2694  //
2695  for(i=n1; i<=n2; i++)
2696  {
2697  work(i) = 0;
2698  }
2699  for(i=m1; i<=m2; i++)
2700  {
2701  t = v(i+1-m1);
2702  ap::vadd(work.getvector(n1, n2), c.getrow(i, n1, n2), t);
2703  }
2704 
2705  //
2706  // C := C - tau * v * w'
2707  //
2708  for(i=m1; i<=m2; i++)
2709  {
2710  t = v(i-m1+1)*tau;
2711  ap::vsub(c.getrow(i, n1, n2), work.getvector(n1, n2), t);
2712  }
2713  }
2714 
2715 
2716  /*************************************************************************
2717  Application of an elementary reflection to a rectangular matrix of size MxN
2718 
2719  The algorithm post-multiplies the matrix by an elementary reflection transformation
2720  which is given by column V and scalar Tau (see the description of the
2721  GenerateReflection procedure). Not the whole matrix but only a part of it
2722  is transformed (rows from M1 to M2, columns from N1 to N2). Only the
2723  elements of this submatrix are changed.
2724 
2725  Input parameters:
2726  C - matrix to be transformed.
2727  Tau - scalar defining the transformation.
2728  V - column defining the transformation.
2729  Array whose index ranges within [1..N2-N1+1].
2730  M1, M2 - range of rows to be transformed.
2731  N1, N2 - range of columns to be transformed.
2732  WORK - working array whose indexes goes from M1 to M2.
2733 
2734  Output parameters:
2735  C - the result of multiplying the input matrix C by the
2736  transformation matrix which is given by Tau and V.
2737  If N1>N2 or M1>M2, C is not modified.
2738 
2739  -- LAPACK auxiliary routine (version 3.0) --
2740  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
2741  Courant Institute, Argonne National Lab, and Rice University
2742  September 30, 1994
2743  *************************************************************************/
2744  template<unsigned int Precision>
2748  int m1,
2749  int m2,
2750  int n1,
2751  int n2,
2753  {
2755  int i;
2756  int vm;
2757 
2758 
2759  if( tau==0 || n1>n2 || m1>m2 )
2760  {
2761  return;
2762  }
2763 
2764  //
2765  // w := C * v
2766  //
2767  vm = n2-n1+1;
2768  for(i=m1; i<=m2; i++)
2769  {
2770  t = ap::vdotproduct(c.getrow(i, n1, n2), v.getvector(1, vm));
2771  work(i) = t;
2772  }
2773 
2774  //
2775  // C := C - w * v'
2776  //
2777  for(i=m1; i<=m2; i++)
2778  {
2779  t = work(i)*tau;
2780  ap::vsub(c.getrow(i, n1, n2), v.getvector(1, vm), t);
2781  }
2782  }
2783 } // namespace
2784 
2785 /* stuff included from ./bidiagonal.h */
2786 
2787 /*************************************************************************
2788 Copyright (c) 1992-2007 The University of Tennessee. All rights reserved.
2789 
2790 Contributors:
2791  * Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
2792  pseudocode.
2793 
2794 See subroutines comments for additional copyrights.
2795 
2796 Redistribution and use in source and binary forms, with or without
2797 modification, are permitted provided that the following conditions are
2798 met:
2799 
2800 - Redistributions of source code must retain the above copyright
2801  notice, this list of conditions and the following disclaimer.
2802 
2803 - Redistributions in binary form must reproduce the above copyright
2804  notice, this list of conditions and the following disclaimer listed
2805  in this license in the documentation and/or other materials
2806  provided with the distribution.
2807 
2808 - Neither the name of the copyright holders nor the names of its
2809  contributors may be used to endorse or promote products derived from
2810  this software without specific prior written permission.
2811 
2812 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2813 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2814 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2815 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2816 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2817 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2818 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2819 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2820 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2821 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2822 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2823 *************************************************************************/
2824 
2825 namespace bidiagonal
2826 {
2827  template<unsigned int Precision>
2829  int m,
2830  int n,
2833  template<unsigned int Precision>
2835  int m,
2836  int n,
2838  int qcolumns,
2840  template<unsigned int Precision>
2842  int m,
2843  int n,
2846  int zrows,
2847  int zcolumns,
2848  bool fromtheright,
2849  bool dotranspose);
2850  template<unsigned int Precision>
2852  int m,
2853  int n,
2855  int ptrows,
2857  template<unsigned int Precision>
2859  int m,
2860  int n,
2863  int zrows,
2864  int zcolumns,
2865  bool fromtheright,
2866  bool dotranspose);
2867  template<unsigned int Precision>
2869  int m,
2870  int n,
2871  bool& isupper,
2874  template<unsigned int Precision>
2876  int m,
2877  int n,
2880  template<unsigned int Precision>
2882  int m,
2883  int n,
2885  int qcolumns,
2887  template<unsigned int Precision>
2889  int m,
2890  int n,
2893  int zrows,
2894  int zcolumns,
2895  bool fromtheright,
2896  bool dotranspose);
2897  template<unsigned int Precision>
2899  int m,
2900  int n,
2902  int ptrows,
2904  template<unsigned int Precision>
2906  int m,
2907  int n,
2910  int zrows,
2911  int zcolumns,
2912  bool fromtheright,
2913  bool dotranspose);
2914  template<unsigned int Precision>
2916  int m,
2917  int n,
2918  bool& isupper,
2921 
2922 
2923  /*************************************************************************
2924  Reduction of a rectangular matrix to bidiagonal form
2925 
2926  The algorithm reduces the rectangular matrix A to bidiagonal form by
2927  orthogonal transformations P and Q: A = Q*B*P.
2928 
2929  Input parameters:
2930  A - source matrix. array[0..M-1, 0..N-1]
2931  M - number of rows in matrix A.
2932  N - number of columns in matrix A.
2933 
2934  Output parameters:
2935  A - matrices Q, B, P in compact form (see below).
2936  TauQ - scalar factors which are used to form matrix Q.
2937  TauP - scalar factors which are used to form matrix P.
2938 
2939  The main diagonal and one of the secondary diagonals of matrix A are
2940  replaced with bidiagonal matrix B. Other elements contain elementary
2941  reflections which form MxM matrix Q and NxN matrix P, respectively.
2942 
2943  If M>=N, B is the upper bidiagonal MxN matrix and is stored in the
2944  corresponding elements of matrix A. Matrix Q is represented as a
2945  product of elementary reflections Q = H(0)*H(1)*...*H(n-1), where
2946  H(i) = 1-tau*v*v'. Here tau is a scalar which is stored in TauQ[i], and
2947  vector v has the following structure: v(0:i-1)=0, v(i)=1, v(i+1:m-1) is
2948  stored in elements A(i+1:m-1,i). Matrix P is as follows: P =
2949  G(0)*G(1)*...*G(n-2), where G(i) = 1 - tau*u*u'. Tau is stored in TauP[i],
2950  u(0:i)=0, u(i+1)=1, u(i+2:n-1) is stored in elements A(i,i+2:n-1).
2951 
2952  If M<N, B is the lower bidiagonal MxN matrix and is stored in the
2953  corresponding elements of matrix A. Q = H(0)*H(1)*...*H(m-2), where
2954  H(i) = 1 - tau*v*v', tau is stored in TauQ, v(0:i)=0, v(i+1)=1, v(i+2:m-1)
2955  is stored in elements A(i+2:m-1,i). P = G(0)*G(1)*...*G(m-1),
2956  G(i) = 1-tau*u*u', tau is stored in TauP, u(0:i-1)=0, u(i)=1, u(i+1:n-1)
2957  is stored in A(i,i+1:n-1).
2958 
2959  EXAMPLE:
2960 
2961  m=6, n=5 (m > n): m=5, n=6 (m < n):
2962 
2963  ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
2964  ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
2965  ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
2966  ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
2967  ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
2968  ( v1 v2 v3 v4 v5 )
2969 
2970  Here vi and ui are vectors which form H(i) and G(i), and d and e -
2971  are the diagonal and off-diagonal elements of matrix B.
2972  *************************************************************************/
2973  template<unsigned int Precision>
2975  int m,
2976  int n,
2979  {
2982  int minmn;
2983  int maxmn;
2984  int i;
2985  int j;
2986  amp::ampf<Precision> ltau;
2987 
2988 
2989 
2990  //
2991  // Prepare
2992  //
2993  if( n<=0 || m<=0 )
2994  {
2995  return;
2996  }
2997  minmn = ap::minint(m, n);
2998  maxmn = ap::maxint(m, n);
2999  work.setbounds(0, maxmn);
3000  t.setbounds(0, maxmn);
3001  if( m>=n )
3002  {
3003  tauq.setbounds(0, n-1);
3004  taup.setbounds(0, n-1);
3005  }
3006  else
3007  {
3008  tauq.setbounds(0, m-1);
3009  taup.setbounds(0, m-1);
3010  }
3011  if( m>=n )
3012  {
3013 
3014  //
3015  // Reduce to upper bidiagonal form
3016  //
3017  for(i=0; i<=n-1; i++)
3018  {
3019 
3020  //
3021  // Generate elementary reflector H(i) to annihilate A(i+1:m-1,i)
3022  //
3023  ap::vmove(t.getvector(1, m-i), a.getcolumn(i, i, m-1));
3024  reflections::generatereflection<Precision>(t, m-i, ltau);
3025  tauq(i) = ltau;
3026  ap::vmove(a.getcolumn(i, i, m-1), t.getvector(1, m-i));
3027  t(1) = 1;
3028 
3029  //
3030  // Apply H(i) to A(i:m-1,i+1:n-1) from the left
3031  //
3032  reflections::applyreflectionfromtheleft<Precision>(a, ltau, t, i, m-1, i+1, n-1, work);
3033  if( i<n-1 )
3034  {
3035 
3036  //
3037  // Generate elementary reflector G(i) to annihilate
3038  // A(i,i+2:n-1)
3039  //
3040  ap::vmove(t.getvector(1, n-i-1), a.getrow(i, i+1, n-1));
3041  reflections::generatereflection<Precision>(t, n-1-i, ltau);
3042  taup(i) = ltau;
3043  ap::vmove(a.getrow(i, i+1, n-1), t.getvector(1, n-1-i));
3044  t(1) = 1;
3045 
3046  //
3047  // Apply G(i) to A(i+1:m-1,i+1:n-1) from the right
3048  //
3049  reflections::applyreflectionfromtheright<Precision>(a, ltau, t, i+1, m-1, i+1, n-1, work);
3050  }
3051  else
3052  {
3053  taup(i) = 0;
3054  }
3055  }
3056  }
3057  else
3058  {
3059 
3060  //
3061  // Reduce to lower bidiagonal form
3062  //
3063  for(i=0; i<=m-1; i++)
3064  {
3065 
3066  //
3067  // Generate elementary reflector G(i) to annihilate A(i,i+1:n-1)
3068  //
3069  ap::vmove(t.getvector(1, n-i), a.getrow(i, i, n-1));
3070  reflections::generatereflection<Precision>(t, n-i, ltau);
3071  taup(i) = ltau;
3072  ap::vmove(a.getrow(i, i, n-1), t.getvector(1, n-i));
3073  t(1) = 1;
3074 
3075  //
3076  // Apply G(i) to A(i+1:m-1,i:n-1) from the right
3077  //
3078  reflections::applyreflectionfromtheright<Precision>(a, ltau, t, i+1, m-1, i, n-1, work);
3079  if( i<m-1 )
3080  {
3081 
3082  //
3083  // Generate elementary reflector H(i) to annihilate
3084  // A(i+2:m-1,i)
3085  //
3086  ap::vmove(t.getvector(1, m-1-i), a.getcolumn(i, i+1, m-1));
3087  reflections::generatereflection<Precision>(t, m-1-i, ltau);
3088  tauq(i) = ltau;
3089  ap::vmove(a.getcolumn(i, i+1, m-1), t.getvector(1, m-1-i));
3090  t(1) = 1;
3091 
3092  //
3093  // Apply H(i) to A(i+1:m-1,i+1:n-1) from the left
3094  //
3095  reflections::applyreflectionfromtheleft<Precision>(a, ltau, t, i+1, m-1, i+1, n-1, work);
3096  }
3097  else
3098  {
3099  tauq(i) = 0;
3100  }
3101  }
3102  }
3103  }
3104 
3105 
3106  /*************************************************************************
3107  Unpacking matrix Q which reduces a matrix to bidiagonal form.
3108 
3109  Input parameters:
3110  QP - matrices Q and P in compact form.
3111  Output of ToBidiagonal subroutine.
3112  M - number of rows in matrix A.
3113  N - number of columns in matrix A.
3114  TAUQ - scalar factors which are used to form Q.
3115  Output of ToBidiagonal subroutine.
3116  QColumns - required number of columns in matrix Q.
3117  M>=QColumns>=0.
3118 
3119  Output parameters:
3120  Q - first QColumns columns of matrix Q.
3121  Array[0..M-1, 0..QColumns-1]
3122  If QColumns=0, the array is not modified.
3123 
3124  -- ALGLIB --
3125  Copyright 2005 by Bochkanov Sergey
3126  *************************************************************************/
3127  template<unsigned int Precision>
3129  int m,
3130  int n,
3132  int qcolumns,
3134  {
3135  int i;
3136  int j;
3137 
3138 
3139  ap::ap_error::make_assertion(qcolumns<=m);
3140  ap::ap_error::make_assertion(qcolumns>=0);
3141  if( m==0 || n==0 || qcolumns==0 )
3142  {
3143  return;
3144  }
3145 
3146  //
3147  // prepare Q
3148  //
3149  q.setbounds(0, m-1, 0, qcolumns-1);
3150  for(i=0; i<=m-1; i++)
3151  {
3152  for(j=0; j<=qcolumns-1; j++)
3153  {
3154  if( i==j )
3155  {
3156  q(i,j) = 1;
3157  }
3158  else
3159  {
3160  q(i,j) = 0;
3161  }
3162  }
3163  }
3164 
3165  //
3166  // Calculate
3167  //
3168  rmatrixbdmultiplybyq<Precision>(qp, m, n, tauq, q, m, qcolumns, false, false);
3169  }
3170 
3171 
3172  /*************************************************************************
3173  Multiplication by matrix Q which reduces matrix A to bidiagonal form.
3174 
3175  The algorithm allows pre- or post-multiply by Q or Q'.
3176 
3177  Input parameters:
3178  QP - matrices Q and P in compact form.
3179  Output of ToBidiagonal subroutine.
3180  M - number of rows in matrix A.
3181  N - number of columns in matrix A.
3182  TAUQ - scalar factors which are used to form Q.
3183  Output of ToBidiagonal subroutine.
3184  Z - multiplied matrix.
3185  array[0..ZRows-1,0..ZColumns-1]
3186  ZRows - number of rows in matrix Z. If FromTheRight=False,
3187  ZRows=M, otherwise ZRows can be arbitrary.
3188  ZColumns - number of columns in matrix Z. If FromTheRight=True,
3189  ZColumns=M, otherwise ZColumns can be arbitrary.
3190  FromTheRight - pre- or post-multiply.
3191  DoTranspose - multiply by Q or Q'.
3192 
3193  Output parameters:
3194  Z - product of Z and Q.
3195  Array[0..ZRows-1,0..ZColumns-1]
3196  If ZRows=0 or ZColumns=0, the array is not modified.
3197 
3198  -- ALGLIB --
3199  Copyright 2005 by Bochkanov Sergey
3200  *************************************************************************/
3201  template<unsigned int Precision>
3203  int m,
3204  int n,
3207  int zrows,
3208  int zcolumns,
3209  bool fromtheright,
3210  bool dotranspose)
3211  {
3212  int i;
3213  int i1;
3214  int i2;
3215  int istep;
3218  int mx;
3219 
3220 
3221  if( m<=0 || n<=0 || zrows<=0 || zcolumns<=0 )
3222  {
3223  return;
3224  }
3225  ap::ap_error::make_assertion(fromtheright && zcolumns==m || !fromtheright && zrows==m);
3226 
3227  //
3228  // init
3229  //
3230  mx = ap::maxint(m, n);
3231  mx = ap::maxint(mx, zrows);
3232  mx = ap::maxint(mx, zcolumns);
3233  v.setbounds(0, mx);
3234  work.setbounds(0, mx);
3235  if( m>=n )
3236  {
3237 
3238  //
3239  // setup
3240  //
3241  if( fromtheright )
3242  {
3243  i1 = 0;
3244  i2 = n-1;
3245  istep = +1;
3246  }
3247  else
3248  {
3249  i1 = n-1;
3250  i2 = 0;
3251  istep = -1;
3252  }
3253  if( dotranspose )
3254  {
3255  i = i1;
3256  i1 = i2;
3257  i2 = i;
3258  istep = -istep;
3259  }
3260 
3261  //
3262  // Process
3263  //
3264  i = i1;
3265  do
3266  {
3267  ap::vmove(v.getvector(1, m-i), qp.getcolumn(i, i, m-1));
3268  v(1) = 1;
3269  if( fromtheright )
3270  {
3271  reflections::applyreflectionfromtheright<Precision>(z, tauq(i), v, 0, zrows-1, i, m-1, work);
3272  }
3273  else
3274  {
3275  reflections::applyreflectionfromtheleft<Precision>(z, tauq(i), v, i, m-1, 0, zcolumns-1, work);
3276  }
3277  i = i+istep;
3278  }
3279  while( i!=i2+istep );
3280  }
3281  else
3282  {
3283 
3284  //
3285  // setup
3286  //
3287  if( fromtheright )
3288  {
3289  i1 = 0;
3290  i2 = m-2;
3291  istep = +1;
3292  }
3293  else
3294  {
3295  i1 = m-2;
3296  i2 = 0;
3297  istep = -1;
3298  }
3299  if( dotranspose )
3300  {
3301  i = i1;
3302  i1 = i2;
3303  i2 = i;
3304  istep = -istep;
3305  }
3306 
3307  //
3308  // Process
3309  //
3310  if( m-1>0 )
3311  {
3312  i = i1;
3313  do
3314  {
3315  ap::vmove(v.getvector(1, m-i-1), qp.getcolumn(i, i+1, m-1));
3316  v(1) = 1;
3317  if( fromtheright )
3318  {
3319  reflections::applyreflectionfromtheright<Precision>(z, tauq(i), v, 0, zrows-1, i+1, m-1, work);
3320  }
3321  else
3322  {
3323  reflections::applyreflectionfromtheleft<Precision>(z, tauq(i), v, i+1, m-1, 0, zcolumns-1, work);
3324  }
3325  i = i+istep;
3326  }
3327  while( i!=i2+istep );
3328  }
3329  }
3330  }
3331 
3332 
3333  /*************************************************************************
3334  Unpacking matrix P which reduces matrix A to bidiagonal form.
3335  The subroutine returns transposed matrix P.
3336 
3337  Input parameters:
3338  QP - matrices Q and P in compact form.
3339  Output of ToBidiagonal subroutine.
3340  M - number of rows in matrix A.
3341  N - number of columns in matrix A.
3342  TAUP - scalar factors which are used to form P.
3343  Output of ToBidiagonal subroutine.
3344  PTRows - required number of rows of matrix P^T. N >= PTRows >= 0.
3345 
3346  Output parameters:
3347  PT - first PTRows columns of matrix P^T
3348  Array[0..PTRows-1, 0..N-1]
3349  If PTRows=0, the array is not modified.
3350 
3351  -- ALGLIB --
3352  Copyright 2005-2007 by Bochkanov Sergey
3353  *************************************************************************/
3354  template<unsigned int Precision>
3356  int m,
3357  int n,
3359  int ptrows,
3361  {
3362  int i;
3363  int j;
3364 
3365 
3366  ap::ap_error::make_assertion(ptrows<=n);
3367  ap::ap_error::make_assertion(ptrows>=0);
3368  if( m==0 || n==0 || ptrows==0 )
3369  {
3370  return;
3371  }
3372 
3373  //
3374  // prepare PT
3375  //
3376  pt.setbounds(0, ptrows-1, 0, n-1);
3377  for(i=0; i<=ptrows-1; i++)
3378  {
3379  for(j=0; j<=n-1; j++)
3380  {
3381  if( i==j )
3382  {
3383  pt(i,j) = 1;
3384  }
3385  else
3386  {
3387  pt(i,j) = 0;
3388  }
3389  }
3390  }
3391 
3392  //
3393  // Calculate
3394  //
3395  rmatrixbdmultiplybyp<Precision>(qp, m, n, taup, pt, ptrows, n, true, true);
3396  }
3397 
3398 
3399  /*************************************************************************
3400  Multiplication by matrix P which reduces matrix A to bidiagonal form.
3401 
3402  The algorithm allows pre- or post-multiply by P or P'.
3403 
3404  Input parameters:
3405  QP - matrices Q and P in compact form.
3406  Output of RMatrixBD subroutine.
3407  M - number of rows in matrix A.
3408  N - number of columns in matrix A.
3409  TAUP - scalar factors which are used to form P.
3410  Output of RMatrixBD subroutine.
3411  Z - multiplied matrix.
3412  Array whose indexes range within [0..ZRows-1,0..ZColumns-1].
3413  ZRows - number of rows in matrix Z. If FromTheRight=False,
3414  ZRows=N, otherwise ZRows can be arbitrary.
3415  ZColumns - number of columns in matrix Z. If FromTheRight=True,
3416  ZColumns=N, otherwise ZColumns can be arbitrary.
3417  FromTheRight - pre- or post-multiply.
3418  DoTranspose - multiply by P or P'.
3419 
3420  Output parameters:
3421  Z - product of Z and P.
3422  Array whose indexes range within [0..ZRows-1,0..ZColumns-1].
3423  If ZRows=0 or ZColumns=0, the array is not modified.
3424 
3425  -- ALGLIB --
3426  Copyright 2005-2007 by Bochkanov Sergey
3427  *************************************************************************/
3428  template<unsigned int Precision>
3430  int m,
3431  int n,
3434  int zrows,
3435  int zcolumns,
3436  bool fromtheright,
3437  bool dotranspose)
3438  {
3439  int i;
3442  int mx;
3443  int i1;
3444  int i2;
3445  int istep;
3446 
3447 
3448  if( m<=0 || n<=0 || zrows<=0 || zcolumns<=0 )
3449  {
3450  return;
3451  }
3452  ap::ap_error::make_assertion(fromtheright && zcolumns==n || !fromtheright && zrows==n);
3453 
3454  //
3455  // init
3456  //
3457  mx = ap::maxint(m, n);
3458  mx = ap::maxint(mx, zrows);
3459  mx = ap::maxint(mx, zcolumns);
3460  v.setbounds(0, mx);
3461  work.setbounds(0, mx);
3462  v.setbounds(0, mx);
3463  work.setbounds(0, mx);
3464  if( m>=n )
3465  {
3466 
3467  //
3468  // setup
3469  //
3470  if( fromtheright )
3471  {
3472  i1 = n-2;
3473  i2 = 0;
3474  istep = -1;
3475  }
3476  else
3477  {
3478  i1 = 0;
3479  i2 = n-2;
3480  istep = +1;
3481  }
3482  if( !dotranspose )
3483  {
3484  i = i1;
3485  i1 = i2;
3486  i2 = i;
3487  istep = -istep;
3488  }
3489 
3490  //
3491  // Process
3492  //
3493  if( n-1>0 )
3494  {
3495  i = i1;
3496  do
3497  {
3498  ap::vmove(v.getvector(1, n-1-i), qp.getrow(i, i+1, n-1));
3499  v(1) = 1;
3500  if( fromtheright )
3501  {
3502  reflections::applyreflectionfromtheright<Precision>(z, taup(i), v, 0, zrows-1, i+1, n-1, work);
3503  }
3504  else
3505  {
3506  reflections::applyreflectionfromtheleft<Precision>(z, taup(i), v, i+1, n-1, 0, zcolumns-1, work);
3507  }
3508  i = i+istep;
3509  }
3510  while( i!=i2+istep );
3511  }
3512  }
3513  else
3514  {
3515 
3516  //
3517  // setup
3518  //
3519  if( fromtheright )
3520  {
3521  i1 = m-1;
3522  i2 = 0;
3523  istep = -1;
3524  }
3525  else
3526  {
3527  i1 = 0;
3528  i2 = m-1;
3529  istep = +1;
3530  }
3531  if( !dotranspose )
3532  {
3533  i = i1;
3534  i1 = i2;
3535  i2 = i;
3536  istep = -istep;
3537  }
3538 
3539  //
3540  // Process
3541  //
3542  i = i1;
3543  do
3544  {
3545  ap::vmove(v.getvector(1, n-i), qp.getrow(i, i, n-1));
3546  v(1) = 1;
3547  if( fromtheright )
3548  {
3549  reflections::applyreflectionfromtheright<Precision>(z, taup(i), v, 0, zrows-1, i, n-1, work);
3550  }
3551  else
3552  {
3553  reflections::applyreflectionfromtheleft<Precision>(z, taup(i), v, i, n-1, 0, zcolumns-1, work);
3554  }
3555  i = i+istep;
3556  }
3557  while( i!=i2+istep );
3558  }
3559  }
3560 
3561 
3562  /*************************************************************************
3563  Unpacking of the main and secondary diagonals of bidiagonal decomposition
3564  of matrix A.
3565 
3566  Input parameters:
3567  B - output of RMatrixBD subroutine.
3568  M - number of rows in matrix B.
3569  N - number of columns in matrix B.
3570 
3571  Output parameters:
3572  IsUpper - True, if the matrix is upper bidiagonal.
3573  otherwise IsUpper is False.
3574  D - the main diagonal.
3575  Array whose index ranges within [0..Min(M,N)-1].
3576  E - the secondary diagonal (upper or lower, depending on
3577  the value of IsUpper).
3578  Array index ranges within [0..Min(M,N)-1], the last
3579  element is not used.
3580 
3581  -- ALGLIB --
3582  Copyright 2005-2007 by Bochkanov Sergey
3583  *************************************************************************/
3584  template<unsigned int Precision>
3586  int m,
3587  int n,
3588  bool& isupper,
3591  {
3592  int i;
3593 
3594 
3595  isupper = m>=n;
3596  if( m<=0 || n<=0 )
3597  {
3598  return;
3599  }
3600  if( isupper )
3601  {
3602  d.setbounds(0, n-1);
3603  e.setbounds(0, n-1);
3604  for(i=0; i<=n-2; i++)
3605  {
3606  d(i) = b(i,i);
3607  e(i) = b(i,i+1);
3608  }
3609  d(n-1) = b(n-1,n-1);
3610  }
3611  else
3612  {
3613  d.setbounds(0, m-1);
3614  e.setbounds(0, m-1);
3615  for(i=0; i<=m-2; i++)
3616  {
3617  d(i) = b(i,i);
3618  e(i) = b(i+1,i);
3619  }
3620  d(m-1) = b(m-1,m-1);
3621  }
3622  }
3623 
3624 
3625  /*************************************************************************
3626  Obsolete 1-based subroutine.
3627  See RMatrixBD for 0-based replacement.
3628  *************************************************************************/
3629  template<unsigned int Precision>
3631  int m,
3632  int n,
3635  {
3638  int minmn;
3639  int maxmn;
3640  int i;
3641  amp::ampf<Precision> ltau;
3642  int mmip1;
3643  int nmi;
3644  int ip1;
3645  int nmip1;
3646  int mmi;
3647 
3648 
3649  minmn = ap::minint(m, n);
3650  maxmn = ap::maxint(m, n);
3651  work.setbounds(1, maxmn);
3652  t.setbounds(1, maxmn);
3653  taup.setbounds(1, minmn);
3654  tauq.setbounds(1, minmn);
3655  if( m>=n )
3656  {
3657 
3658  //
3659  // Reduce to upper bidiagonal form
3660  //
3661  for(i=1; i<=n; i++)
3662  {
3663 
3664  //
3665  // Generate elementary reflector H(i) to annihilate A(i+1:m,i)
3666  //
3667  mmip1 = m-i+1;
3668  ap::vmove(t.getvector(1, mmip1), a.getcolumn(i, i, m));
3669  reflections::generatereflection<Precision>(t, mmip1, ltau);
3670  tauq(i) = ltau;
3671  ap::vmove(a.getcolumn(i, i, m), t.getvector(1, mmip1));
3672  t(1) = 1;
3673 
3674  //
3675  // Apply H(i) to A(i:m,i+1:n) from the left
3676  //
3677  reflections::applyreflectionfromtheleft<Precision>(a, ltau, t, i, m, i+1, n, work);
3678  if( i<n )
3679  {
3680 
3681  //
3682  // Generate elementary reflector G(i) to annihilate
3683  // A(i,i+2:n)
3684  //
3685  nmi = n-i;
3686  ip1 = i+1;
3687  ap::vmove(t.getvector(1, nmi), a.getrow(i, ip1, n));
3688  reflections::generatereflection<Precision>(t, nmi, ltau);
3689  taup(i) = ltau;
3690  ap::vmove(a.getrow(i, ip1, n), t.getvector(1, nmi));
3691  t(1) = 1;
3692 
3693  //
3694  // Apply G(i) to A(i+1:m,i+1:n) from the right
3695  //
3696  reflections::applyreflectionfromtheright<Precision>(a, ltau, t, i+1, m, i+1, n, work);
3697  }
3698  else
3699  {
3700  taup(i) = 0;
3701  }
3702  }
3703  }
3704  else
3705  {
3706 
3707  //
3708  // Reduce to lower bidiagonal form
3709  //
3710  for(i=1; i<=m; i++)
3711  {
3712 
3713  //
3714  // Generate elementary reflector G(i) to annihilate A(i,i+1:n)
3715  //
3716  nmip1 = n-i+1;
3717  ap::vmove(t.getvector(1, nmip1), a.getrow(i, i, n));
3718  reflections::generatereflection<Precision>(t, nmip1, ltau);
3719  taup(i) = ltau;
3720  ap::vmove(a.getrow(i, i, n), t.getvector(1, nmip1));
3721  t(1) = 1;
3722 
3723  //
3724  // Apply G(i) to A(i+1:m,i:n) from the right
3725  //
3726  reflections::applyreflectionfromtheright<Precision>(a, ltau, t, i+1, m, i, n, work);
3727  if( i<m )
3728  {
3729 
3730  //
3731  // Generate elementary reflector H(i) to annihilate
3732  // A(i+2:m,i)
3733  //
3734  mmi = m-i;
3735  ip1 = i+1;
3736  ap::vmove(t.getvector(1, mmi), a.getcolumn(i, ip1, m));
3737  reflections::generatereflection<Precision>(t, mmi, ltau);
3738  tauq(i) = ltau;
3739  ap::vmove(a.getcolumn(i, ip1, m), t.getvector(1, mmi));
3740  t(1) = 1;
3741 
3742  //
3743  // Apply H(i) to A(i+1:m,i+1:n) from the left
3744  //
3745  reflections::applyreflectionfromtheleft<Precision>(a, ltau, t, i+1, m, i+1, n, work);
3746  }
3747  else
3748  {
3749  tauq(i) = 0;
3750  }
3751  }
3752  }
3753  }
3754 
3755 
3756  /*************************************************************************
3757  Obsolete 1-based subroutine.
3758  See RMatrixBDUnpackQ for 0-based replacement.
3759  *************************************************************************/
3760  template<unsigned int Precision>
3762  int m,
3763  int n,
3765  int qcolumns,
3767  {
3768  int i;
3769  int j;
3770  int ip1;
3773  int vm;
3774 
3775 
3776  ap::ap_error::make_assertion(qcolumns<=m);
3777  if( m==0 || n==0 || qcolumns==0 )
3778  {
3779  return;
3780  }
3781 
3782  //
3783  // init
3784  //
3785  q.setbounds(1, m, 1, qcolumns);
3786  v.setbounds(1, m);
3787  work.setbounds(1, qcolumns);
3788 
3789  //
3790  // prepare Q
3791  //
3792  for(i=1; i<=m; i++)
3793  {
3794  for(j=1; j<=qcolumns; j++)
3795  {
3796  if( i==j )
3797  {
3798  q(i,j) = 1;
3799  }
3800  else
3801  {
3802  q(i,j) = 0;
3803  }
3804  }
3805  }
3806  if( m>=n )
3807  {
3808  for(i=ap::minint(n, qcolumns); i>=1; i--)
3809  {
3810  vm = m-i+1;
3811  ap::vmove(v.getvector(1, vm), qp.getcolumn(i, i, m));
3812  v(1) = 1;
3813  reflections::applyreflectionfromtheleft<Precision>(q, tauq(i), v, i, m, 1, qcolumns, work);
3814  }
3815  }
3816  else
3817  {
3818  for(i=ap::minint(m-1, qcolumns-1); i>=1; i--)
3819  {
3820  vm = m-i;
3821  ip1 = i+1;
3822  ap::vmove(v.getvector(1, vm), qp.getcolumn(i, ip1, m));
3823  v(1) = 1;
3824  reflections::applyreflectionfromtheleft<Precision>(q, tauq(i), v, i+1, m, 1, qcolumns, work);
3825  }
3826  }
3827  }
3828 
3829 
3830  /*************************************************************************
3831  Obsolete 1-based subroutine.
3832  See RMatrixBDMultiplyByQ for 0-based replacement.
3833  *************************************************************************/
3834  template<unsigned int Precision>
3836  int m,
3837  int n,
3840  int zrows,
3841  int zcolumns,
3842  bool fromtheright,
3843  bool dotranspose)
3844  {
3845  int i;
3846  int ip1;
3847  int i1;
3848  int i2;
3849  int istep;
3852  int vm;
3853  int mx;
3854 
3855 
3856  if( m<=0 || n<=0 || zrows<=0 || zcolumns<=0 )
3857  {
3858  return;
3859  }
3860  ap::ap_error::make_assertion(fromtheright && zcolumns==m || !fromtheright && zrows==m);
3861 
3862  //
3863  // init
3864  //
3865  mx = ap::maxint(m, n);
3866  mx = ap::maxint(mx, zrows);
3867  mx = ap::maxint(mx, zcolumns);
3868  v.setbounds(1, mx);
3869  work.setbounds(1, mx);
3870  if( m>=n )
3871  {
3872 
3873  //
3874  // setup
3875  //
3876  if( fromtheright )
3877  {
3878  i1 = 1;
3879  i2 = n;
3880  istep = +1;
3881  }
3882  else
3883  {
3884  i1 = n;
3885  i2 = 1;
3886  istep = -1;
3887  }
3888  if( dotranspose )
3889  {
3890  i = i1;
3891  i1 = i2;
3892  i2 = i;
3893  istep = -istep;
3894  }
3895 
3896  //
3897  // Process
3898  //
3899  i = i1;
3900  do
3901  {
3902  vm = m-i+1;
3903  ap::vmove(v.getvector(1, vm), qp.getcolumn(i, i, m));
3904  v(1) = 1;
3905  if( fromtheright )
3906  {
3907  reflections::applyreflectionfromtheright<Precision>(z, tauq(i), v, 1, zrows, i, m, work);
3908  }
3909  else
3910  {
3911  reflections::applyreflectionfromtheleft<Precision>(z, tauq(i), v, i, m, 1, zcolumns, work);
3912  }
3913  i = i+istep;
3914  }
3915  while( i!=i2+istep );
3916  }
3917  else
3918  {
3919 
3920  //
3921  // setup
3922  //
3923  if( fromtheright )
3924  {
3925  i1 = 1;
3926  i2 = m-1;
3927  istep = +1;
3928  }
3929  else
3930  {
3931  i1 = m-1;
3932  i2 = 1;
3933  istep = -1;
3934  }
3935  if( dotranspose )
3936  {
3937  i = i1;
3938  i1 = i2;
3939  i2 = i;
3940  istep = -istep;
3941  }
3942 
3943  //
3944  // Process
3945  //
3946  if( m-1>0 )
3947  {
3948  i = i1;
3949  do
3950  {
3951  vm = m-i;
3952  ip1 = i+1;
3953  ap::vmove(v.getvector(1, vm), qp.getcolumn(i, ip1, m));
3954  v(1) = 1;
3955  if( fromtheright )
3956  {
3957  reflections::applyreflectionfromtheright<Precision>(z, tauq(i), v, 1, zrows, i+1, m, work);
3958  }
3959  else
3960  {
3961  reflections::applyreflectionfromtheleft<Precision>(z, tauq(i), v, i+1, m, 1, zcolumns, work);
3962  }
3963  i = i+istep;
3964  }
3965  while( i!=i2+istep );
3966  }
3967  }
3968  }
3969 
3970 
3971  /*************************************************************************
3972  Obsolete 1-based subroutine.
3973  See RMatrixBDUnpackPT for 0-based replacement.
3974  *************************************************************************/
3975  template<unsigned int Precision>
3977  int m,
3978  int n,
3980  int ptrows,
3982  {
3983  int i;
3984  int j;
3985  int ip1;
3988  int vm;
3989 
3990 
3991  ap::ap_error::make_assertion(ptrows<=n);
3992  if( m==0 || n==0 || ptrows==0 )
3993  {
3994  return;
3995  }
3996 
3997  //
3998  // init
3999  //
4000  pt.setbounds(1, ptrows, 1, n);
4001  v.setbounds(1, n);
4002  work.setbounds(1, ptrows);
4003 
4004  //
4005  // prepare PT
4006  //
4007  for(i=1; i<=ptrows; i++)
4008  {
4009  for(j=1; j<=n; j++)
4010  {
4011  if( i==j )
4012  {
4013  pt(i,j) = 1;
4014  }
4015  else
4016  {
4017  pt(i,j) = 0;
4018  }
4019  }
4020  }
4021  if( m>=n )
4022  {
4023  for(i=ap::minint(n-1, ptrows-1); i>=1; i--)
4024  {
4025  vm = n-i;
4026  ip1 = i+1;
4027  ap::vmove(v.getvector(1, vm), qp.getrow(i, ip1, n));
4028  v(1) = 1;
4029  reflections::applyreflectionfromtheright<Precision>(pt, taup(i), v, 1, ptrows, i+1, n, work);
4030  }
4031  }
4032  else
4033  {
4034  for(i=ap::minint(m, ptrows); i>=1; i--)
4035  {
4036  vm = n-i+1;
4037  ap::vmove(v.getvector(1, vm), qp.getrow(i, i, n));
4038  v(1) = 1;
4039  reflections::applyreflectionfromtheright<Precision>(pt, taup(i), v, 1, ptrows, i, n, work);
4040  }
4041  }
4042  }
4043 
4044 
4045  /*************************************************************************
4046  Obsolete 1-based subroutine.
4047  See RMatrixBDMultiplyByP for 0-based replacement.
4048  *************************************************************************/
4049  template<unsigned int Precision>
4051  int m,
4052  int n,
4055  int zrows,
4056  int zcolumns,
4057  bool fromtheright,
4058  bool dotranspose)
4059  {
4060  int i;
4061  int ip1;
4064  int vm;
4065  int mx;
4066  int i1;
4067  int i2;
4068  int istep;
4069 
4070 
4071  if( m<=0 || n<=0 || zrows<=0 || zcolumns<=0 )
4072  {
4073  return;
4074  }
4075  ap::ap_error::make_assertion(fromtheright && zcolumns==n || !fromtheright && zrows==n);
4076 
4077  //
4078  // init
4079  //
4080  mx = ap::maxint(m, n);
4081  mx = ap::maxint(mx, zrows);
4082  mx = ap::maxint(mx, zcolumns);
4083  v.setbounds(1, mx);
4084  work.setbounds(1, mx);
4085  v.setbounds(1, mx);
4086  work.setbounds(1, mx);
4087  if( m>=n )
4088  {
4089 
4090  //
4091  // setup
4092  //
4093  if( fromtheright )
4094  {
4095  i1 = n-1;
4096  i2 = 1;
4097  istep = -1;
4098  }
4099  else
4100  {
4101  i1 = 1;
4102  i2 = n-1;
4103  istep = +1;
4104  }
4105  if( !dotranspose )
4106  {
4107  i = i1;
4108  i1 = i2;
4109  i2 = i;
4110  istep = -istep;
4111  }
4112 
4113  //
4114  // Process
4115  //
4116  if( n-1>0 )
4117  {
4118  i = i1;
4119  do
4120  {
4121  vm = n-i;
4122  ip1 = i+1;
4123  ap::vmove(v.getvector(1, vm), qp.getrow(i, ip1, n));
4124  v(1) = 1;
4125  if( fromtheright )
4126  {
4127  reflections::applyreflectionfromtheright<Precision>(z, taup(i), v, 1, zrows, i+1, n, work);
4128  }
4129  else
4130  {
4131  reflections::applyreflectionfromtheleft<Precision>(z, taup(i), v, i+1, n, 1, zcolumns, work);
4132  }
4133  i = i+istep;
4134  }
4135  while( i!=i2+istep );
4136  }
4137  }
4138  else
4139  {
4140 
4141  //
4142  // setup
4143  //
4144  if( fromtheright )
4145  {
4146  i1 = m;
4147  i2 = 1;
4148  istep = -1;
4149  }
4150  else
4151  {
4152  i1 = 1;
4153  i2 = m;
4154  istep = +1;
4155  }
4156  if( !dotranspose )
4157  {
4158  i = i1;
4159  i1 = i2;
4160  i2 = i;
4161  istep = -istep;
4162  }
4163 
4164  //
4165  // Process
4166  //
4167  i = i1;
4168  do
4169  {
4170  vm = n-i+1;
4171  ap::vmove(v.getvector(1, vm), qp.getrow(i, i, n));
4172  v(1) = 1;
4173  if( fromtheright )
4174  {
4175  reflections::applyreflectionfromtheright<Precision>(z, taup(i), v, 1, zrows, i, n, work);
4176  }
4177  else
4178  {
4179  reflections::applyreflectionfromtheleft<Precision>(z, taup(i), v, i, n, 1, zcolumns, work);
4180  }
4181  i = i+istep;
4182  }
4183  while( i!=i2+istep );
4184  }
4185  }
4186 
4187 
4188  /*************************************************************************
4189  Obsolete 1-based subroutine.
4190  See RMatrixBDUnpackDiagonals for 0-based replacement.
4191  *************************************************************************/
4192  template<unsigned int Precision>
4194  int m,
4195  int n,
4196  bool& isupper,
4199  {
4200  int i;
4201 
4202 
4203  isupper = m>=n;
4204  if( m==0 || n==0 )
4205  {
4206  return;
4207  }
4208  if( isupper )
4209  {
4210  d.setbounds(1, n);
4211  e.setbounds(1, n);
4212  for(i=1; i<=n-1; i++)
4213  {
4214  d(i) = b(i,i);
4215  e(i) = b(i,i+1);
4216  }
4217  d(n) = b(n,n);
4218  }
4219  else
4220  {
4221  d.setbounds(1, m);
4222  e.setbounds(1, m);
4223  for(i=1; i<=m-1; i++)
4224  {
4225  d(i) = b(i,i);
4226  e(i) = b(i+1,i);
4227  }
4228  d(m) = b(m,m);
4229  }
4230  }
4231 } // namespace
4232 
4233 /* stuff included from ./qr.h */
4234 
4235 /*************************************************************************
4236 Copyright (c) 1992-2007 The University of Tennessee. All rights reserved.
4237 
4238 Contributors:
4239  * Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
4240  pseudocode.
4241 
4242 See subroutines comments for additional copyrights.
4243 
4244 Redistribution and use in source and binary forms, with or without
4245 modification, are permitted provided that the following conditions are
4246 met:
4247 
4248 - Redistributions of source code must retain the above copyright
4249  notice, this list of conditions and the following disclaimer.
4250 
4251 - Redistributions in binary form must reproduce the above copyright
4252  notice, this list of conditions and the following disclaimer listed
4253  in this license in the documentation and/or other materials
4254  provided with the distribution.
4255 
4256 - Neither the name of the copyright holders nor the names of its
4257  contributors may be used to endorse or promote products derived from
4258  this software without specific prior written permission.
4259 
4260 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4261 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
4262 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
4263 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
4264 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
4265 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
4266 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
4267 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
4268 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4269 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
4270 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4271 *************************************************************************/
4272 
4273 namespace qr
4274 {
4275  template<unsigned int Precision>
4277  int m,
4278  int n,
4280  template<unsigned int Precision>
4282  int m,
4283  int n,
4285  int qcolumns,
4287  template<unsigned int Precision>
4289  int m,
4290  int n,
4292  template<unsigned int Precision>
4294  int m,
4295  int n,
4297  template<unsigned int Precision>
4299  int m,
4300  int n,
4302  int qcolumns,
4304  template<unsigned int Precision>
4306  int m,
4307  int n,
4310 
4311 
4312  /*************************************************************************
4313  QR decomposition of a rectangular matrix of size MxN
4314 
4315  Input parameters:
4316  A - matrix A whose indexes range within [0..M-1, 0..N-1].
4317  M - number of rows in matrix A.
4318  N - number of columns in matrix A.
4319 
4320  Output parameters:
4321  A - matrices Q and R in compact form (see below).
4322  Tau - array of scalar factors which are used to form
4323  matrix Q. Array whose index ranges within [0.. Min(M-1,N-1)].
4324 
4325  Matrix A is represented as A = QR, where Q is an orthogonal matrix of size
4326  MxM, R - upper triangular (or upper trapezoid) matrix of size M x N.
4327 
4328  The elements of matrix R are located on and above the main diagonal of
4329  matrix A. The elements which are located in Tau array and below the main
4330  diagonal of matrix A are used to form matrix Q as follows:
4331 
4332  Matrix Q is represented as a product of elementary reflections
4333 
4334  Q = H(0)*H(2)*...*H(k-1),
4335 
4336  where k = min(m,n), and each H(i) is in the form
4337 
4338  H(i) = 1 - tau * v * (v^T)
4339 
4340  where tau is a scalar stored in Tau[I]; v - real vector,
4341  so that v(0:i-1) = 0, v(i) = 1, v(i+1:m-1) stored in A(i+1:m-1,i).
4342 
4343  -- LAPACK routine (version 3.0) --
4344  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
4345  Courant Institute, Argonne National Lab, and Rice University
4346  February 29, 1992.
4347  Translation from FORTRAN to pseudocode (AlgoPascal)
4348  by Sergey Bochkanov, ALGLIB project, 2005-2007.
4349  *************************************************************************/
4350  template<unsigned int Precision>
4352  int m,
4353  int n,
4355  {
4358  int i;
4359  int k;
4360  int minmn;
4362 
4363 
4364  if( m<=0 || n<=0 )
4365  {
4366  return;
4367  }
4368  minmn = ap::minint(m, n);
4369  work.setbounds(0, n-1);
4370  t.setbounds(1, m);
4371  tau.setbounds(0, minmn-1);
4372 
4373  //
4374  // Test the input arguments
4375  //
4376  k = minmn;
4377  for(i=0; i<=k-1; i++)
4378  {
4379 
4380  //
4381  // Generate elementary reflector H(i) to annihilate A(i+1:m,i)
4382  //
4383  ap::vmove(t.getvector(1, m-i), a.getcolumn(i, i, m-1));
4384  reflections::generatereflection<Precision>(t, m-i, tmp);
4385  tau(i) = tmp;
4386  ap::vmove(a.getcolumn(i, i, m-1), t.getvector(1, m-i));
4387  t(1) = 1;
4388  if( i<n )
4389  {
4390 
4391  //
4392  // Apply H(i) to A(i:m-1,i+1:n-1) from the left
4393  //
4394  reflections::applyreflectionfromtheleft<Precision>(a, tau(i), t, i, m-1, i+1, n-1, work);
4395  }
4396  }
4397  }
4398 
4399 
4400  /*************************************************************************
4401  Partial unpacking of matrix Q from the QR decomposition of a matrix A
4402 
4403  Input parameters:
4404  A - matrices Q and R in compact form.
4405  Output of RMatrixQR subroutine.
4406  M - number of rows in given matrix A. M>=0.
4407  N - number of columns in given matrix A. N>=0.
4408  Tau - scalar factors which are used to form Q.
4409  Output of the RMatrixQR subroutine.
4410  QColumns - required number of columns of matrix Q. M>=QColumns>=0.
4411 
4412  Output parameters:
4413  Q - first QColumns columns of matrix Q.
4414  Array whose indexes range within [0..M-1, 0..QColumns-1].
4415  If QColumns=0, the array remains unchanged.
4416 
4417  -- ALGLIB --
4418  Copyright 2005 by Bochkanov Sergey
4419  *************************************************************************/
4420  template<unsigned int Precision>
4422  int m,
4423  int n,
4425  int qcolumns,
4427  {
4428  int i;
4429  int j;
4430  int k;
4431  int minmn;
4434 
4435 
4436  ap::ap_error::make_assertion(qcolumns<=m);
4437  if( m<=0 || n<=0 || qcolumns<=0 )
4438  {
4439  return;
4440  }
4441 
4442  //
4443  // init
4444  //
4445  minmn = ap::minint(m, n);
4446  k = ap::minint(minmn, qcolumns);
4447  q.setbounds(0, m-1, 0, qcolumns-1);
4448  v.setbounds(1, m);
4449  work.setbounds(0, qcolumns-1);
4450  for(i=0; i<=m-1; i++)
4451  {
4452  for(j=0; j<=qcolumns-1; j++)
4453  {
4454  if( i==j )
4455  {
4456  q(i,j) = 1;
4457  }
4458  else
4459  {
4460  q(i,j) = 0;
4461  }
4462  }
4463  }
4464 
4465  //
4466  // unpack Q
4467  //
4468  for(i=k-1; i>=0; i--)
4469  {
4470 
4471  //
4472  // Apply H(i)
4473  //
4474  ap::vmove(v.getvector(1, m-i), a.getcolumn(i, i, m-1));
4475  v(1) = 1;
4476  reflections::applyreflectionfromtheleft<Precision>(q, tau(i), v, i, m-1, 0, qcolumns-1, work);
4477  }
4478  }
4479 
4480 
4481  /*************************************************************************
4482  Unpacking of matrix R from the QR decomposition of a matrix A
4483 
4484  Input parameters:
4485  A - matrices Q and R in compact form.
4486  Output of RMatrixQR subroutine.
4487  M - number of rows in given matrix A. M>=0.
4488  N - number of columns in given matrix A. N>=0.
4489 
4490  Output parameters:
4491  R - matrix R, array[0..M-1, 0..N-1].
4492 
4493  -- ALGLIB --
4494  Copyright 2005 by Bochkanov Sergey
4495  *************************************************************************/
4496  template<unsigned int Precision>
4498  int m,
4499  int n,
4501  {
4502  int i;
4503  int k;
4504 
4505 
4506  if( m<=0 || n<=0 )
4507  {
4508  return;
4509  }
4510  k = ap::minint(m, n);
4511  r.setbounds(0, m-1, 0, n-1);
4512  for(i=0; i<=n-1; i++)
4513  {
4514  r(0,i) = 0;
4515  }
4516  for(i=1; i<=m-1; i++)
4517  {
4518  ap::vmove(r.getrow(i, 0, n-1), r.getrow(0, 0, n-1));
4519  }
4520  for(i=0; i<=k-1; i++)
4521  {
4522  ap::vmove(r.getrow(i, i, n-1), a.getrow(i, i, n-1));
4523  }
4524  }
4525 
4526 
4527  /*************************************************************************
4528  Obsolete 1-based subroutine. See RMatrixQR for 0-based replacement.
4529  *************************************************************************/
4530  template<unsigned int Precision>
4532  int m,
4533  int n,
4535  {
4538  int i;
4539  int k;
4540  int mmip1;
4541  int minmn;
4543 
4544 
4545  minmn = ap::minint(m, n);
4546  work.setbounds(1, n);
4547  t.setbounds(1, m);
4548  tau.setbounds(1, minmn);
4549 
4550  //
4551  // Test the input arguments
4552  //
4553  k = ap::minint(m, n);
4554  for(i=1; i<=k; i++)
4555  {
4556 
4557  //
4558  // Generate elementary reflector H(i) to annihilate A(i+1:m,i)
4559  //
4560  mmip1 = m-i+1;
4561  ap::vmove(t.getvector(1, mmip1), a.getcolumn(i, i, m));
4562  reflections::generatereflection<Precision>(t, mmip1, tmp);
4563  tau(i) = tmp;
4564  ap::vmove(a.getcolumn(i, i, m), t.getvector(1, mmip1));
4565  t(1) = 1;
4566  if( i<n )
4567  {
4568 
4569  //
4570  // Apply H(i) to A(i:m,i+1:n) from the left
4571  //
4572  reflections::applyreflectionfromtheleft<Precision>(a, tau(i), t, i, m, i+1, n, work);
4573  }
4574  }
4575  }
4576 
4577 
4578  /*************************************************************************
4579  Obsolete 1-based subroutine. See RMatrixQRUnpackQ for 0-based replacement.
4580  *************************************************************************/
4581  template<unsigned int Precision>
4583  int m,
4584  int n,
4586  int qcolumns,
4588  {
4589  int i;
4590  int j;
4591  int k;
4592  int minmn;
4595  int vm;
4596 
4597 
4598  ap::ap_error::make_assertion(qcolumns<=m);
4599  if( m==0 || n==0 || qcolumns==0 )
4600  {
4601  return;
4602  }
4603 
4604  //
4605  // init
4606  //
4607  minmn = ap::minint(m, n);
4608  k = ap::minint(minmn, qcolumns);
4609  q.setbounds(1, m, 1, qcolumns);
4610  v.setbounds(1, m);
4611  work.setbounds(1, qcolumns);
4612  for(i=1; i<=m; i++)
4613  {
4614  for(j=1; j<=qcolumns; j++)
4615  {
4616  if( i==j )
4617  {
4618  q(i,j) = 1;
4619  }
4620  else
4621  {
4622  q(i,j) = 0;
4623  }
4624  }
4625  }
4626 
4627  //
4628  // unpack Q
4629  //
4630  for(i=k; i>=1; i--)
4631  {
4632 
4633  //
4634  // Apply H(i)
4635  //
4636  vm = m-i+1;
4637  ap::vmove(v.getvector(1, vm), a.getcolumn(i, i, m));
4638  v(1) = 1;
4639  reflections::applyreflectionfromtheleft<Precision>(q, tau(i), v, i, m, 1, qcolumns, work);
4640  }
4641  }
4642 
4643 
4644  /*************************************************************************
4645  Obsolete 1-based subroutine. See RMatrixQR for 0-based replacement.
4646  *************************************************************************/
4647  template<unsigned int Precision>
4649  int m,
4650  int n,
4653  {
4654  int i;
4655  int k;
4659 
4660 
4661  k = ap::minint(m, n);
4662  if( n<=0 )
4663  {
4664  return;
4665  }
4666  work.setbounds(1, m);
4667  v.setbounds(1, m);
4668  q.setbounds(1, m, 1, m);
4669  r.setbounds(1, m, 1, n);
4670 
4671  //
4672  // QRDecomposition
4673  //
4674  qrdecomposition<Precision>(a, m, n, tau);
4675 
4676  //
4677  // R
4678  //
4679  for(i=1; i<=n; i++)
4680  {
4681  r(1,i) = 0;
4682  }
4683  for(i=2; i<=m; i++)
4684  {
4685  ap::vmove(r.getrow(i, 1, n), r.getrow(1, 1, n));
4686  }
4687  for(i=1; i<=k; i++)
4688  {
4689  ap::vmove(r.getrow(i, i, n), a.getrow(i, i, n));
4690  }
4691 
4692  //
4693  // Q
4694  //
4695  unpackqfromqr<Precision>(a, m, n, tau, m, q);
4696  }
4697 } // namespace
4698 
4699 /* stuff included from ./lq.h */
4700 
4701 /*************************************************************************
4702 Copyright (c) 2005-2007, Sergey Bochkanov (ALGLIB project).
4703 
4704 Redistribution and use in source and binary forms, with or without
4705 modification, are permitted provided that the following conditions are
4706 met:
4707 
4708 - Redistributions of source code must retain the above copyright
4709  notice, this list of conditions and the following disclaimer.
4710 
4711 - Redistributions in binary form must reproduce the above copyright
4712  notice, this list of conditions and the following disclaimer listed
4713  in this license in the documentation and/or other materials
4714  provided with the distribution.
4715 
4716 - Neither the name of the copyright holders nor the names of its
4717  contributors may be used to endorse or promote products derived from
4718  this software without specific prior written permission.
4719 
4720 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4721 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
4722 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
4723 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
4724 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
4725 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
4726 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
4727 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
4728 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4729 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
4730 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4731 *************************************************************************/
4732 
4733 namespace lq
4734 {
4735  template<unsigned int Precision>
4737  int m,
4738  int n,
4740  template<unsigned int Precision>
4742  int m,
4743  int n,
4745  int qrows,
4747  template<unsigned int Precision>
4749  int m,
4750  int n,
4752  template<unsigned int Precision>
4754  int m,
4755  int n,
4757  template<unsigned int Precision>
4759  int m,
4760  int n,
4762  int qrows,
4764  template<unsigned int Precision>
4766  int m,
4767  int n,
4770 
4771 
4772  /*************************************************************************
4773  LQ decomposition of a rectangular matrix of size MxN
4774 
4775  Input parameters:
4776  A - matrix A whose indexes range within [0..M-1, 0..N-1].
4777  M - number of rows in matrix A.
4778  N - number of columns in matrix A.
4779 
4780  Output parameters:
4781  A - matrices L and Q in compact form (see below)
4782  Tau - array of scalar factors which are used to form
4783  matrix Q. Array whose index ranges within [0..Min(M,N)-1].
4784 
4785  Matrix A is represented as A = LQ, where Q is an orthogonal matrix of size
4786  MxM, L - lower triangular (or lower trapezoid) matrix of size M x N.
4787 
4788  The elements of matrix L are located on and below the main diagonal of
4789  matrix A. The elements which are located in Tau array and above the main
4790  diagonal of matrix A are used to form matrix Q as follows:
4791 
4792  Matrix Q is represented as a product of elementary reflections
4793 
4794  Q = H(k-1)*H(k-2)*...*H(1)*H(0),
4795 
4796  where k = min(m,n), and each H(i) is of the form
4797 
4798  H(i) = 1 - tau * v * (v^T)
4799 
4800  where tau is a scalar stored in Tau[I]; v - real vector, so that v(0:i-1)=0,
4801  v(i) = 1, v(i+1:n-1) stored in A(i,i+1:n-1).
4802 
4803  -- ALGLIB --
4804  Copyright 2005-2007 by Bochkanov Sergey
4805  *************************************************************************/
4806  template<unsigned int Precision>
4808  int m,
4809  int n,
4811  {
4814  int i;
4815  int k;
4816  int minmn;
4817  int maxmn;
4819 
4820 
4821  minmn = ap::minint(m, n);
4822  maxmn = ap::maxint(m, n);
4823  work.setbounds(0, m);
4824  t.setbounds(0, n);
4825  tau.setbounds(0, minmn-1);
4826  k = ap::minint(m, n);
4827  for(i=0; i<=k-1; i++)
4828  {
4829 
4830  //
4831  // Generate elementary reflector H(i) to annihilate A(i,i+1:n-1)
4832  //
4833  ap::vmove(t.getvector(1, n-i), a.getrow(i, i, n-1));
4834  reflections::generatereflection<Precision>(t, n-i, tmp);
4835  tau(i) = tmp;
4836  ap::vmove(a.getrow(i, i, n-1), t.getvector(1, n-i));
4837  t(1) = 1;
4838  if( i<n )
4839  {
4840 
4841  //
4842  // Apply H(i) to A(i+1:m,i:n) from the right
4843  //
4844  reflections::applyreflectionfromtheright<Precision>(a, tau(i), t, i+1, m-1, i, n-1, work);
4845  }
4846  }
4847  }
4848 
4849 
4850  /*************************************************************************
4851  Partial unpacking of matrix Q from the LQ decomposition of a matrix A
4852 
4853  Input parameters:
4854  A - matrices L and Q in compact form.
4855  Output of RMatrixLQ subroutine.
4856  M - number of rows in given matrix A. M>=0.
4857  N - number of columns in given matrix A. N>=0.
4858  Tau - scalar factors which are used to form Q.
4859  Output of the RMatrixLQ subroutine.
4860  QRows - required number of rows in matrix Q. N>=QRows>=0.
4861 
4862  Output parameters:
4863  Q - first QRows rows of matrix Q. Array whose indexes range
4864  within [0..QRows-1, 0..N-1]. If QRows=0, the array remains
4865  unchanged.
4866 
4867  -- ALGLIB --
4868  Copyright 2005 by Bochkanov Sergey
4869  *************************************************************************/
4870  template<unsigned int Precision>
4872  int m,
4873  int n,
4875  int qrows,
4877  {
4878  int i;
4879  int j;
4880  int k;
4881  int minmn;
4884 
4885 
4886  ap::ap_error::make_assertion(qrows<=n);
4887  if( m<=0 || n<=0 || qrows<=0 )
4888  {
4889  return;
4890  }
4891 
4892  //
4893  // init
4894  //
4895  minmn = ap::minint(m, n);
4896  k = ap::minint(minmn, qrows);
4897  q.setbounds(0, qrows-1, 0, n-1);
4898  v.setbounds(0, n);
4899  work.setbounds(0, qrows);
4900  for(i=0; i<=qrows-1; i++)
4901  {
4902  for(j=0; j<=n-1; j++)
4903  {
4904  if( i==j )
4905  {
4906  q(i,j) = 1;
4907  }
4908  else
4909  {
4910  q(i,j) = 0;
4911  }
4912  }
4913  }
4914 
4915  //
4916  // unpack Q
4917  //
4918  for(i=k-1; i>=0; i--)
4919  {
4920 
4921  //
4922  // Apply H(i)
4923  //
4924  ap::vmove(v.getvector(1, n-i), a.getrow(i, i, n-1));
4925  v(1) = 1;
4926  reflections::applyreflectionfromtheright<Precision>(q, tau(i), v, 0, qrows-1, i, n-1, work);
4927  }
4928  }
4929 
4930 
4931  /*************************************************************************
4932  Unpacking of matrix L from the LQ decomposition of a matrix A
4933 
4934  Input parameters:
4935  A - matrices Q and L in compact form.
4936  Output of RMatrixLQ subroutine.
4937  M - number of rows in given matrix A. M>=0.
4938  N - number of columns in given matrix A. N>=0.
4939 
4940  Output parameters:
4941  L - matrix L, array[0..M-1, 0..N-1].
4942 
4943  -- ALGLIB --
4944  Copyright 2005 by Bochkanov Sergey
4945  *************************************************************************/
4946  template<unsigned int Precision>
4948  int m,
4949  int n,
4951  {
4952  int i;
4953  int k;
4954 
4955 
4956  if( m<=0 || n<=0 )
4957  {
4958  return;
4959  }
4960  l.setbounds(0, m-1, 0, n-1);
4961  for(i=0; i<=n-1; i++)
4962  {
4963  l(0,i) = 0;
4964  }
4965  for(i=1; i<=m-1; i++)
4966  {
4967  ap::vmove(l.getrow(i, 0, n-1), l.getrow(0, 0, n-1));
4968  }
4969  for(i=0; i<=m-1; i++)
4970  {
4971  k = ap::minint(i, n-1);
4972  ap::vmove(l.getrow(i, 0, k), a.getrow(i, 0, k));
4973  }
4974  }
4975 
4976 
4977  /*************************************************************************
4978  Obsolete 1-based subroutine
4979  See RMatrixLQ for 0-based replacement.
4980  *************************************************************************/
4981  template<unsigned int Precision>
4983  int m,
4984  int n,
4986  {
4989  int i;
4990  int k;
4991  int nmip1;
4992  int minmn;
4993  int maxmn;
4995 
4996 
4997  minmn = ap::minint(m, n);
4998  maxmn = ap::maxint(m, n);
4999  work.setbounds(1, m);
5000  t.setbounds(1, n);
5001  tau.setbounds(1, minmn);
5002 
5003  //
5004  // Test the input arguments
5005  //
5006  k = ap::minint(m, n);
5007  for(i=1; i<=k; i++)
5008  {
5009 
5010  //
5011  // Generate elementary reflector H(i) to annihilate A(i,i+1:n)
5012  //
5013  nmip1 = n-i+1;
5014  ap::vmove(t.getvector(1, nmip1), a.getrow(i, i, n));
5015  reflections::generatereflection<Precision>(t, nmip1, tmp);
5016  tau(i) = tmp;
5017  ap::vmove(a.getrow(i, i, n), t.getvector(1, nmip1));
5018  t(1) = 1;
5019  if( i<n )
5020  {
5021 
5022  //
5023  // Apply H(i) to A(i+1:m,i:n) from the right
5024  //
5025  reflections::applyreflectionfromtheright<Precision>(a, tau(i), t, i+1, m, i, n, work);
5026  }
5027  }
5028  }
5029 
5030 
5031  /*************************************************************************
5032  Obsolete 1-based subroutine
5033  See RMatrixLQUnpackQ for 0-based replacement.
5034  *************************************************************************/
5035  template<unsigned int Precision>
5037  int m,
5038  int n,
5040  int qrows,
5042  {
5043  int i;
5044  int j;
5045  int k;
5046  int minmn;
5049  int vm;
5050 
5051 
5052  ap::ap_error::make_assertion(qrows<=n);
5053  if( m==0 || n==0 || qrows==0 )
5054  {
5055  return;
5056  }
5057 
5058  //
5059  // init
5060  //
5061  minmn = ap::minint(m, n);
5062  k = ap::minint(minmn, qrows);
5063  q.setbounds(1, qrows, 1, n);
5064  v.setbounds(1, n);
5065  work.setbounds(1, qrows);
5066  for(i=1; i<=qrows; i++)
5067  {
5068  for(j=1; j<=n; j++)
5069  {
5070  if( i==j )
5071  {
5072  q(i,j) = 1;
5073  }
5074  else
5075  {
5076  q(i,j) = 0;
5077  }
5078  }
5079  }
5080 
5081  //
5082  // unpack Q
5083  //
5084  for(i=k; i>=1; i--)
5085  {
5086 
5087  //
5088  // Apply H(i)
5089  //
5090  vm = n-i+1;
5091  ap::vmove(v.getvector(1, vm), a.getrow(i, i, n));
5092  v(1) = 1;
5093  reflections::applyreflectionfromtheright<Precision>(q, tau(i), v, 1, qrows, i, n, work);
5094  }
5095  }
5096 
5097 
5098  /*************************************************************************
5099  Obsolete 1-based subroutine
5100  *************************************************************************/
5101  template<unsigned int Precision>
5103  int m,
5104  int n,
5107  {
5108  int i;
5109  int j;
5111 
5112 
5113  if( n<=0 )
5114  {
5115  return;
5116  }
5117  q.setbounds(1, n, 1, n);
5118  l.setbounds(1, m, 1, n);
5119 
5120  //
5121  // LQDecomposition
5122  //
5123  lqdecomposition<Precision>(a, m, n, tau);
5124 
5125  //
5126  // L
5127  //
5128  for(i=1; i<=m; i++)
5129  {
5130  for(j=1; j<=n; j++)
5131  {
5132  if( j>i )
5133  {
5134  l(i,j) = 0;
5135  }
5136  else
5137  {
5138  l(i,j) = a(i,j);
5139  }
5140  }
5141  }
5142 
5143  //
5144  // Q
5145  //
5146  unpackqfromlq<Precision>(a, m, n, tau, n, q);
5147  }
5148 } // namespace
5149 
5150 /* stuff included from ./blas.h */
5151 
5152 /*************************************************************************
5153 Copyright (c) 2005-2007, Sergey Bochkanov (ALGLIB project).
5154 
5155 Redistribution and use in source and binary forms, with or without
5156 modification, are permitted provided that the following conditions are
5157 met:
5158 
5159 - Redistributions of source code must retain the above copyright
5160  notice, this list of conditions and the following disclaimer.
5161 
5162 - Redistributions in binary form must reproduce the above copyright
5163  notice, this list of conditions and the following disclaimer listed
5164  in this license in the documentation and/or other materials
5165  provided with the distribution.
5166 
5167 - Neither the name of the copyright holders nor the names of its
5168  contributors may be used to endorse or promote products derived from
5169  this software without specific prior written permission.
5170 
5171 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
5172 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
5173 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
5174 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
5175 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
5176 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
5177 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
5178 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
5179 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5180 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
5181 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5182 *************************************************************************/
5183 
5184 namespace blas
5185 {
5186  template<unsigned int Precision>
5188  int i1,
5189  int i2);
5190  template<unsigned int Precision>
5192  int i1,
5193  int i2);
5194  template<unsigned int Precision>
5196  int i1,
5197  int i2,
5198  int j);
5199  template<unsigned int Precision>
5201  int j1,
5202  int j2,
5203  int i);
5204  template<unsigned int Precision>
5206  int i1,
5207  int i2,
5208  int j1,
5209  int j2,
5211  template<unsigned int Precision>
5213  int is1,
5214  int is2,
5215  int js1,
5216  int js2,
5218  int id1,
5219  int id2,
5220  int jd1,
5221  int jd2);
5222  template<unsigned int Precision>
5224  int i1,
5225  int i2,
5226  int j1,
5227  int j2,
5229  template<unsigned int Precision>
5231  int is1,
5232  int is2,
5233  int js1,
5234  int js2,
5236  int id1,
5237  int id2,
5238  int jd1,
5239  int jd2);
5240  template<unsigned int Precision>
5242  int i1,
5243  int i2,
5244  int j1,
5245  int j2,
5246  bool trans,
5248  int ix1,
5249  int ix2,
5252  int iy1,
5253  int iy2,
5255  template<unsigned int Precision>
5258  template<unsigned int Precision>
5260  int ai1,
5261  int ai2,
5262  int aj1,
5263  int aj2,
5264  bool transa,
5266  int bi1,
5267  int bi2,
5268  int bj1,
5269  int bj2,
5270  bool transb,
5273  int ci1,
5274  int ci2,
5275  int cj1,
5276  int cj2,
5279 
5280 
5281  template<unsigned int Precision>
5283  int i1,
5284  int i2)
5285  {
5287  int n;
5288  int ix;
5289  amp::ampf<Precision> absxi;
5292 
5293 
5294  n = i2-i1+1;
5295  if( n<1 )
5296  {
5297  result = 0;
5298  return result;
5299  }
5300  if( n==1 )
5301  {
5302  result = amp::abs<Precision>(x(i1));
5303  return result;
5304  }
5305  scl = 0;
5306  ssq = 1;
5307  for(ix=i1; ix<=i2; ix++)
5308  {
5309  if( x(ix)!=0 )
5310  {
5311  absxi = amp::abs<Precision>(x(ix));
5312  if( scl<absxi )
5313  {
5314  ssq = 1+ssq*amp::sqr<Precision>(scl/absxi);
5315  scl = absxi;
5316  }
5317  else
5318  {
5319  ssq = ssq+amp::sqr<Precision>(absxi/scl);
5320  }
5321  }
5322  }
5323  result = scl*amp::sqrt<Precision>(ssq);
5324  return result;
5325  }
5326 
5327 
5328  template<unsigned int Precision>
5330  int i1,
5331  int i2)
5332  {
5333  int result;
5334  int i;
5336 
5337 
5338  result = i1;
5339  a = amp::abs<Precision>(x(result));
5340  for(i=i1+1; i<=i2; i++)
5341  {
5342  if( amp::abs<Precision>(x(i))>amp::abs<Precision>(x(result)) )
5343  {
5344  result = i;
5345  }
5346  }
5347  return result;
5348  }
5349 
5350 
5351  template<unsigned int Precision>
5353  int i1,
5354  int i2,
5355  int j)
5356  {
5357  int result;
5358  int i;
5360 
5361 
5362  result = i1;
5363  a = amp::abs<Precision>(x(result,j));
5364  for(i=i1+1; i<=i2; i++)
5365  {
5366  if( amp::abs<Precision>(x(i,j))>amp::abs<Precision>(x(result,j)) )
5367  {
5368  result = i;
5369  }
5370  }
5371  return result;
5372  }
5373 
5374 
5375  template<unsigned int Precision>
5377  int j1,
5378  int j2,
5379  int i)
5380  {
5381  int result;
5382  int j;
5384 
5385 
5386  result = j1;
5387  a = amp::abs<Precision>(x(i,result));
5388  for(j=j1+1; j<=j2; j++)
5389  {
5390  if( amp::abs<Precision>(x(i,j))>amp::abs<Precision>(x(i,result)) )
5391  {
5392  result = j;
5393  }
5394  }
5395  return result;
5396  }
5397 
5398 
5399  template<unsigned int Precision>
5401  int i1,
5402  int i2,
5403  int j1,
5404  int j2,
5406  {
5408  int i;
5409  int j;
5410 
5411 
5412  ap::ap_error::make_assertion(i2-i1==j2-j1);
5413  for(j=j1; j<=j2; j++)
5414  {
5415  work(j) = 0;
5416  }
5417  for(i=i1; i<=i2; i++)
5418  {
5419  for(j=ap::maxint(j1, j1+i-i1-1); j<=j2; j++)
5420  {
5421  work(j) = work(j)+amp::abs<Precision>(a(i,j));
5422  }
5423  }
5424  result = 0;
5425  for(j=j1; j<=j2; j++)
5426  {
5427  result = amp::maximum<Precision>(result, work(j));
5428  }
5429  return result;
5430  }
5431 
5432 
5433  template<unsigned int Precision>
5435  int is1,
5436  int is2,
5437  int js1,
5438  int js2,
5440  int id1,
5441  int id2,
5442  int jd1,
5443  int jd2)
5444  {
5445  int isrc;
5446  int idst;
5447 
5448 
5449  if( is1>is2 || js1>js2 )
5450  {
5451  return;
5452  }
5453  ap::ap_error::make_assertion(is2-is1==id2-id1);
5454  ap::ap_error::make_assertion(js2-js1==jd2-jd1);
5455  for(isrc=is1; isrc<=is2; isrc++)
5456  {
5457  idst = isrc-is1+id1;
5458  ap::vmove(b.getrow(idst, jd1, jd2), a.getrow(isrc, js1, js2));
5459  }
5460  }
5461 
5462 
5463  template<unsigned int Precision>
5465  int i1,
5466  int i2,
5467  int j1,
5468  int j2,
5470  {
5471  int i;
5472  int j;
5473  int ips;
5474  int jps;
5475  int l;
5476 
5477 
5478  if( i1>i2 || j1>j2 )
5479  {
5480  return;
5481  }
5482  ap::ap_error::make_assertion(i1-i2==j1-j2);
5483  for(i=i1; i<=i2-1; i++)
5484  {
5485  j = j1+i-i1;
5486  ips = i+1;
5487  jps = j1+ips-i1;
5488  l = i2-i;
5489  ap::vmove(work.getvector(1, l), a.getcolumn(j, ips, i2));
5490  ap::vmove(a.getcolumn(j, ips, i2), a.getrow(i, jps, j2));
5491  ap::vmove(a.getrow(i, jps, j2), work.getvector(1, l));
5492  }
5493  }
5494 
5495 
5496  template<unsigned int Precision>
5498  int is1,
5499  int is2,
5500  int js1,
5501  int js2,
5503  int id1,
5504  int id2,
5505  int jd1,
5506  int jd2)
5507  {
5508  int isrc;
5509  int jdst;
5510 
5511 
5512  if( is1>is2 || js1>js2 )
5513  {
5514  return;
5515  }
5516  ap::ap_error::make_assertion(is2-is1==jd2-jd1);
5517  ap::ap_error::make_assertion(js2-js1==id2-id1);
5518  for(isrc=is1; isrc<=is2; isrc++)
5519  {
5520  jdst = isrc-is1+jd1;
5521  ap::vmove(b.getcolumn(jdst, id1, id2), a.getrow(isrc, js1, js2));
5522  }
5523  }
5524 
5525 
5526  template<unsigned int Precision>
5528  int i1,
5529  int i2,
5530  int j1,
5531  int j2,
5532  bool trans,
5534  int ix1,
5535  int ix2,
5538  int iy1,
5539  int iy2,
5541  {
5542  int i;
5544 
5545 
5546  if( !trans )
5547  {
5548 
5549  //
5550  // y := alpha*A*x + beta*y;
5551  //
5552  if( i1>i2 || j1>j2 )
5553  {
5554  return;
5555  }
5556  ap::ap_error::make_assertion(j2-j1==ix2-ix1);
5557  ap::ap_error::make_assertion(i2-i1==iy2-iy1);
5558 
5559  //
5560  // beta*y
5561  //
5562  if( beta==0 )
5563  {
5564  for(i=iy1; i<=iy2; i++)
5565  {
5566  y(i) = 0;
5567  }
5568  }
5569  else
5570  {
5571  ap::vmul(y.getvector(iy1, iy2), beta);
5572  }
5573 
5574  //
5575  // alpha*A*x
5576  //
5577  for(i=i1; i<=i2; i++)
5578  {
5579  v = ap::vdotproduct(a.getrow(i, j1, j2), x.getvector(ix1, ix2));
5580  y(iy1+i-i1) = y(iy1+i-i1)+alpha*v;
5581  }
5582  }
5583  else
5584  {
5585 
5586  //
5587  // y := alpha*A'*x + beta*y;
5588  //
5589  if( i1>i2 || j1>j2 )
5590  {
5591  return;
5592  }
5593  ap::ap_error::make_assertion(i2-i1==ix2-ix1);
5594  ap::ap_error::make_assertion(j2-j1==iy2-iy1);
5595 
5596  //
5597  // beta*y
5598  //
5599  if( beta==0 )
5600  {
5601  for(i=iy1; i<=iy2; i++)
5602  {
5603  y(i) = 0;
5604  }
5605  }
5606  else
5607  {
5608  ap::vmul(y.getvector(iy1, iy2), beta);
5609  }
5610 
5611  //
5612  // alpha*A'*x
5613  //
5614  for(i=i1; i<=i2; i++)
5615  {
5616  v = alpha*x(ix1+i-i1);
5617  ap::vadd(y.getvector(iy1, iy2), a.getrow(i, j1, j2), v);
5618  }
5619  }
5620  }
5621 
5622 
5623  template<unsigned int Precision>
5626  {
5629  amp::ampf<Precision> xabs;
5630  amp::ampf<Precision> yabs;
5632 
5633 
5634  xabs = amp::abs<Precision>(x);
5635  yabs = amp::abs<Precision>(y);
5636  w = amp::maximum<Precision>(xabs, yabs);
5637  z = amp::minimum<Precision>(xabs, yabs);
5638  if( z==0 )
5639  {
5640  result = w;
5641  }
5642  else
5643  {
5644  result = w*amp::sqrt<Precision>(1+amp::sqr<Precision>(z/w));
5645  }
5646  return result;
5647  }
5648 
5649 
5650  template<unsigned int Precision>
5652  int ai1,
5653  int ai2,
5654  int aj1,
5655  int aj2,
5656  bool transa,
5658  int bi1,
5659  int bi2,
5660  int bj1,
5661  int bj2,
5662  bool transb,
5665  int ci1,
5666  int ci2,
5667  int cj1,
5668  int cj2,
5671  {
5672  int arows;
5673  int acols;
5674  int brows;
5675  int bcols;
5676  int crows;
5677  int ccols;
5678  int i;
5679  int j;
5680  int k;
5681  int l;
5682  int r;
5684 
5685 
5686 
5687  //
5688  // Setup
5689  //
5690  if( !transa )
5691  {
5692  arows = ai2-ai1+1;
5693  acols = aj2-aj1+1;
5694  }
5695  else
5696  {
5697  arows = aj2-aj1+1;
5698  acols = ai2-ai1+1;
5699  }
5700  if( !transb )
5701  {
5702  brows = bi2-bi1+1;
5703  bcols = bj2-bj1+1;
5704  }
5705  else
5706  {
5707  brows = bj2-bj1+1;
5708  bcols = bi2-bi1+1;
5709  }
5710  ap::ap_error::make_assertion(acols==brows);
5711  if( arows<=0 || acols<=0 || brows<=0 || bcols<=0 )
5712  {
5713  return;
5714  }
5715  crows = arows;
5716  ccols = bcols;
5717 
5718  //
5719  // Test WORK
5720  //
5721  i = ap::maxint(arows, acols);
5722  i = ap::maxint(brows, i);
5723  i = ap::maxint(i, bcols);
5724  work(1) = 0;
5725  work(i) = 0;
5726 
5727  //
5728  // Prepare C
5729  //
5730  if( beta==0 )
5731  {
5732  for(i=ci1; i<=ci2; i++)
5733  {
5734  for(j=cj1; j<=cj2; j++)
5735  {
5736  c(i,j) = 0;
5737  }
5738  }
5739  }
5740  else
5741  {
5742  for(i=ci1; i<=ci2; i++)
5743  {
5744  ap::vmul(c.getrow(i, cj1, cj2), beta);
5745  }
5746  }
5747 
5748  //
5749  // A*B
5750  //
5751  if( !transa && !transb )
5752  {
5753  for(l=ai1; l<=ai2; l++)
5754  {
5755  for(r=bi1; r<=bi2; r++)
5756  {
5757  v = alpha*a(l,aj1+r-bi1);
5758  k = ci1+l-ai1;
5759  ap::vadd(c.getrow(k, cj1, cj2), b.getrow(r, bj1, bj2), v);
5760  }
5761  }
5762  return;
5763  }
5764 
5765  //
5766  // A*B'
5767  //
5768  if( !transa && transb )
5769  {
5770  if( arows*acols<brows*bcols )
5771  {
5772  for(r=bi1; r<=bi2; r++)
5773  {
5774  for(l=ai1; l<=ai2; l++)
5775  {
5776  v = ap::vdotproduct(a.getrow(l, aj1, aj2), b.getrow(r, bj1, bj2));
5777  c(ci1+l-ai1,cj1+r-bi1) = c(ci1+l-ai1,cj1+r-bi1)+alpha*v;
5778  }
5779  }
5780  return;
5781  }
5782  else
5783  {
5784  for(l=ai1; l<=ai2; l++)
5785  {
5786  for(r=bi1; r<=bi2; r++)
5787  {
5788  v = ap::vdotproduct(a.getrow(l, aj1, aj2), b.getrow(r, bj1, bj2));
5789  c(ci1+l-ai1,cj1+r-bi1) = c(ci1+l-ai1,cj1+r-bi1)+alpha*v;
5790  }
5791  }
5792  return;
5793  }
5794  }
5795 
5796  //
5797  // A'*B
5798  //
5799  if( transa && !transb )
5800  {
5801  for(l=aj1; l<=aj2; l++)
5802  {
5803  for(r=bi1; r<=bi2; r++)
5804  {
5805  v = alpha*a(ai1+r-bi1,l);
5806  k = ci1+l-aj1;
5807  ap::vadd(c.getrow(k, cj1, cj2), b.getrow(r, bj1, bj2), v);
5808  }
5809  }
5810  return;
5811  }
5812 
5813  //
5814  // A'*B'
5815  //
5816  if( transa && transb )
5817  {
5818  if( arows*acols<brows*bcols )
5819  {
5820  for(r=bi1; r<=bi2; r++)
5821  {
5822  for(i=1; i<=crows; i++)
5823  {
5824  work(i) = amp::ampf<Precision>("0.0");
5825  }
5826  for(l=ai1; l<=ai2; l++)
5827  {
5828  v = alpha*b(r,bj1+l-ai1);
5829  k = cj1+r-bi1;
5830  ap::vadd(work.getvector(1, crows), a.getrow(l, aj1, aj2), v);
5831  }
5832  ap::vadd(c.getcolumn(k, ci1, ci2), work.getvector(1, crows));
5833  }
5834  return;
5835  }
5836  else
5837  {
5838  for(l=aj1; l<=aj2; l++)
5839  {
5840  k = ai2-ai1+1;
5841  ap::vmove(work.getvector(1, k), a.getcolumn(l, ai1, ai2));
5842  for(r=bi1; r<=bi2; r++)
5843  {
5844  v = ap::vdotproduct(work.getvector(1, k), b.getrow(r, bj1, bj2));
5845  c(ci1+l-aj1,cj1+r-bi1) = c(ci1+l-aj1,cj1+r-bi1)+alpha*v;
5846  }
5847  }
5848  return;
5849  }
5850  }
5851  }
5852 } // namespace
5853 
5854 /* stuff included from ./rotations.h */
5855 
5856 /*************************************************************************
5857 Copyright (c) 1992-2007 The University of Tennessee. All rights reserved.
5858 
5859 Contributors:
5860  * Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
5861  pseudocode.
5862 
5863 See subroutines comments for additional copyrights.
5864 
5865 Redistribution and use in source and binary forms, with or without
5866 modification, are permitted provided that the following conditions are
5867 met:
5868 
5869 - Redistributions of source code must retain the above copyright
5870  notice, this list of conditions and the following disclaimer.
5871 
5872 - Redistributions in binary form must reproduce the above copyright
5873  notice, this list of conditions and the following disclaimer listed
5874  in this license in the documentation and/or other materials
5875  provided with the distribution.
5876 
5877 - Neither the name of the copyright holders nor the names of its
5878  contributors may be used to endorse or promote products derived from
5879  this software without specific prior written permission.
5880 
5881 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
5882 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
5883 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
5884 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
5885 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
5886 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
5887 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
5888 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
5889 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5890 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
5891 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5892 *************************************************************************/
5893 
5894 namespace rotations
5895 {
5896  template<unsigned int Precision>
5897  void applyrotationsfromtheleft(bool isforward,
5898  int m1,
5899  int m2,
5900  int n1,
5901  int n2,
5906  template<unsigned int Precision>
5907  void applyrotationsfromtheright(bool isforward,
5908  int m1,
5909  int m2,
5910  int n1,
5911  int n2,
5916  template<unsigned int Precision>
5922 
5923 
5924  /*************************************************************************
5925  Application of a sequence of elementary rotations to a matrix
5926 
5927  The algorithm pre-multiplies the matrix by a sequence of rotation
5928  transformations which is given by arrays C and S. Depending on the value
5929  of the IsForward parameter either 1 and 2, 3 and 4 and so on (if IsForward=true)
5930  rows are rotated, or the rows N and N-1, N-2 and N-3 and so on, are rotated.
5931 
5932  Not the whole matrix but only a part of it is transformed (rows from M1 to
5933  M2, columns from N1 to N2). Only the elements of this submatrix are changed.
5934 
5935  Input parameters:
5936  IsForward - the sequence of the rotation application.
5937  M1,M2 - the range of rows to be transformed.
5938  N1, N2 - the range of columns to be transformed.
5939  C,S - transformation coefficients.
5940  Array whose index ranges within [1..M2-M1].
5941  A - processed matrix.
5942  WORK - working array whose index ranges within [N1..N2].
5943 
5944  Output parameters:
5945  A - transformed matrix.
5946 
5947  Utility subroutine.
5948  *************************************************************************/
5949  template<unsigned int Precision>
5950  void applyrotationsfromtheleft(bool isforward,
5951  int m1,
5952  int m2,
5953  int n1,
5954  int n2,
5959  {
5960  int j;
5961  int jp1;
5962  amp::ampf<Precision> ctemp;
5963  amp::ampf<Precision> stemp;
5964  amp::ampf<Precision> temp;
5965 
5966 
5967  if( m1>m2 || n1>n2 )
5968  {
5969  return;
5970  }
5971 
5972  //
5973  // Form P * A
5974  //
5975  if( isforward )
5976  {
5977  if( n1!=n2 )
5978  {
5979 
5980  //
5981  // Common case: N1<>N2
5982  //
5983  for(j=m1; j<=m2-1; j++)
5984  {
5985  ctemp = c(j-m1+1);
5986  stemp = s(j-m1+1);
5987  if( ctemp!=1 || stemp!=0 )
5988  {
5989  jp1 = j+1;
5990  ap::vmove(work.getvector(n1, n2), a.getrow(jp1, n1, n2), ctemp);
5991  ap::vsub(work.getvector(n1, n2), a.getrow(j, n1, n2), stemp);
5992  ap::vmul(a.getrow(j, n1, n2), ctemp);
5993  ap::vadd(a.getrow(j, n1, n2), a.getrow(jp1, n1, n2), stemp);
5994  ap::vmove(a.getrow(jp1, n1, n2), work.getvector(n1, n2));
5995  }
5996  }
5997  }
5998  else
5999  {
6000 
6001  //
6002  // Special case: N1=N2
6003  //
6004  for(j=m1; j<=m2-1; j++)
6005  {
6006  ctemp = c(j-m1+1);
6007  stemp = s(j-m1+1);
6008  if( ctemp!=1 || stemp!=0 )
6009  {
6010  temp = a(j+1,n1);
6011  a(j+1,n1) = ctemp*temp-stemp*a(j,n1);
6012  a(j,n1) = stemp*temp+ctemp*a(j,n1);
6013  }
6014  }
6015  }
6016  }
6017  else
6018  {
6019  if( n1!=n2 )
6020  {
6021 
6022  //
6023  // Common case: N1<>N2
6024  //
6025  for(j=m2-1; j>=m1; j--)
6026  {
6027  ctemp = c(j-m1+1);
6028  stemp = s(j-m1+1);
6029  if( ctemp!=1 || stemp!=0 )
6030  {
6031  jp1 = j+1;
6032  ap::vmove(work.getvector(n1, n2), a.getrow(jp1, n1, n2), ctemp);
6033  ap::vsub(work.getvector(n1, n2), a.getrow(j, n1, n2), stemp);
6034  ap::vmul(a.getrow(j, n1, n2), ctemp);
6035  ap::vadd(a.getrow(j, n1, n2), a.getrow(jp1, n1, n2), stemp);
6036  ap::vmove(a.getrow(jp1, n1, n2), work.getvector(n1, n2));
6037  }
6038  }
6039  }
6040  else
6041  {
6042 
6043  //
6044  // Special case: N1=N2
6045  //
6046  for(j=m2-1; j>=m1; j--)
6047  {
6048  ctemp = c(j-m1+1);
6049  stemp = s(j-m1+1);
6050  if( ctemp!=1 || stemp!=0 )
6051  {
6052  temp = a(j+1,n1);
6053  a(j+1,n1) = ctemp*temp-stemp*a(j,n1);
6054  a(j,n1) = stemp*temp+ctemp*a(j,n1);
6055  }
6056  }
6057  }
6058  }
6059  }
6060 
6061 
6062  /*************************************************************************
6063  Application of a sequence of elementary rotations to a matrix
6064 
6065  The algorithm post-multiplies the matrix by a sequence of rotation
6066  transformations which is given by arrays C and S. Depending on the value
6067  of the IsForward parameter either 1 and 2, 3 and 4 and so on (if IsForward=true)
6068  rows are rotated, or the rows N and N-1, N-2 and N-3 and so on are rotated.
6069 
6070  Not the whole matrix but only a part of it is transformed (rows from M1
6071  to M2, columns from N1 to N2). Only the elements of this submatrix are changed.
6072 
6073  Input parameters:
6074  IsForward - the sequence of the rotation application.
6075  M1,M2 - the range of rows to be transformed.
6076  N1, N2 - the range of columns to be transformed.
6077  C,S - transformation coefficients.
6078  Array whose index ranges within [1..N2-N1].
6079  A - processed matrix.
6080  WORK - working array whose index ranges within [M1..M2].
6081 
6082  Output parameters:
6083  A - transformed matrix.
6084 
6085  Utility subroutine.
6086  *************************************************************************/
6087  template<unsigned int Precision>
6088  void applyrotationsfromtheright(bool isforward,
6089  int m1,
6090  int m2,
6091  int n1,
6092  int n2,
6097  {
6098  int j;
6099  int jp1;
6100  amp::ampf<Precision> ctemp;
6101  amp::ampf<Precision> stemp;
6102  amp::ampf<Precision> temp;
6103 
6104 
6105 
6106  //
6107  // Form A * P'
6108  //
6109  if( isforward )
6110  {
6111  if( m1!=m2 )
6112  {
6113 
6114  //
6115  // Common case: M1<>M2
6116  //
6117  for(j=n1; j<=n2-1; j++)
6118  {
6119  ctemp = c(j-n1+1);
6120  stemp = s(j-n1+1);
6121  if( ctemp!=1 || stemp!=0 )
6122  {
6123  jp1 = j+1;
6124  ap::vmove(work.getvector(m1, m2), a.getcolumn(jp1, m1, m2), ctemp);
6125  ap::vsub(work.getvector(m1, m2), a.getcolumn(j, m1, m2), stemp);
6126  ap::vmul(a.getcolumn(j, m1, m2), ctemp);
6127  ap::vadd(a.getcolumn(j, m1, m2), a.getcolumn(jp1, m1, m2), stemp);
6128  ap::vmove(a.getcolumn(jp1, m1, m2), work.getvector(m1, m2));
6129  }
6130  }
6131  }
6132  else
6133  {
6134 
6135  //
6136  // Special case: M1=M2
6137  //
6138  for(j=n1; j<=n2-1; j++)
6139  {
6140  ctemp = c(j-n1+1);
6141  stemp = s(j-n1+1);
6142  if( ctemp!=1 || stemp!=0 )
6143  {
6144  temp = a(m1,j+1);
6145  a(m1,j+1) = ctemp*temp-stemp*a(m1,j);
6146  a(m1,j) = stemp*temp+ctemp*a(m1,j);
6147  }
6148  }
6149  }
6150  }
6151  else
6152  {
6153  if( m1!=m2 )
6154  {
6155 
6156  //
6157  // Common case: M1<>M2
6158  //
6159  for(j=n2-1; j>=n1; j--)
6160  {
6161  ctemp = c(j-n1+1);
6162  stemp = s(j-n1+1);
6163  if( ctemp!=1 || stemp!=0 )
6164  {
6165  jp1 = j+1;
6166  ap::vmove(work.getvector(m1, m2), a.getcolumn(jp1, m1, m2), ctemp);
6167  ap::vsub(work.getvector(m1, m2), a.getcolumn(j, m1, m2), stemp);
6168  ap::vmul(a.getcolumn(j, m1, m2), ctemp);
6169  ap::vadd(a.getcolumn(j, m1, m2), a.getcolumn(jp1, m1, m2), stemp);
6170  ap::vmove(a.getcolumn(jp1, m1, m2), work.getvector(m1, m2));
6171  }
6172  }
6173  }
6174  else
6175  {
6176 
6177  //
6178  // Special case: M1=M2
6179  //
6180  for(j=n2-1; j>=n1; j--)
6181  {
6182  ctemp = c(j-n1+1);
6183  stemp = s(j-n1+1);
6184  if( ctemp!=1 || stemp!=0 )
6185  {
6186  temp = a(m1,j+1);
6187  a(m1,j+1) = ctemp*temp-stemp*a(m1,j);
6188  a(m1,j) = stemp*temp+ctemp*a(m1,j);
6189  }
6190  }
6191  }
6192  }
6193  }
6194 
6195 
6196  /*************************************************************************
6197  The subroutine generates the elementary rotation, so that:
6198 
6199  [ CS SN ] . [ F ] = [ R ]
6200  [ -SN CS ] [ G ] [ 0 ]
6201 
6202  CS**2 + SN**2 = 1
6203  *************************************************************************/
6204  template<unsigned int Precision>
6210  {
6213 
6214 
6215  if( g==0 )
6216  {
6217  cs = 1;
6218  sn = 0;
6219  r = f;
6220  }
6221  else
6222  {
6223  if( f==0 )
6224  {
6225  cs = 0;
6226  sn = 1;
6227  r = g;
6228  }
6229  else
6230  {
6231  f1 = f;
6232  g1 = g;
6233  r = amp::sqrt<Precision>(amp::sqr<Precision>(f1)+amp::sqr<Precision>(g1));
6234  cs = f1/r;
6235  sn = g1/r;
6236  if( amp::abs<Precision>(f)>amp::abs<Precision>(g) && cs<0 )
6237  {
6238  cs = -cs;
6239  sn = -sn;
6240  r = -r;
6241  }
6242  }
6243  }
6244  }
6245 } // namespace
6246 
6247 /* stuff included from ./bdsvd.h */
6248 
6249 /*************************************************************************
6250 Copyright (c) 1992-2007 The University of Tennessee. All rights reserved.
6251 
6252 Contributors:
6253  * Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
6254  pseudocode.
6255 
6256 See subroutines comments for additional copyrights.
6257 
6258 Redistribution and use in source and binary forms, with or without
6259 modification, are permitted provided that the following conditions are
6260 met:
6261 
6262 - Redistributions of source code must retain the above copyright
6263  notice, this list of conditions and the following disclaimer.
6264 
6265 - Redistributions in binary form must reproduce the above copyright
6266  notice, this list of conditions and the following disclaimer listed
6267  in this license in the documentation and/or other materials
6268  provided with the distribution.
6269 
6270 - Neither the name of the copyright holders nor the names of its
6271  contributors may be used to endorse or promote products derived from
6272  this software without specific prior written permission.
6273 
6274 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
6275 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
6276 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
6277 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
6278 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
6279 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
6280 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
6281 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
6282 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
6283 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
6284 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6285 *************************************************************************/
6286 
6287 namespace bdsvd
6288 {
6289  template<unsigned int Precision>
6292  int n,
6293  bool isupper,
6294  bool isfractionalaccuracyrequired,
6296  int nru,
6298  int ncc,
6300  int ncvt);
6301  template<unsigned int Precision>
6304  int n,
6305  bool isupper,
6306  bool isfractionalaccuracyrequired,
6308  int nru,
6310  int ncc,
6312  int ncvt);
6313  template<unsigned int Precision>
6316  int n,
6317  bool isupper,
6318  bool isfractionalaccuracyrequired,
6320  int ustart,
6321  int nru,
6323  int cstart,
6324  int ncc,
6326  int vstart,
6327  int ncvt);
6328  template<unsigned int Precision>
6331  template<unsigned int Precision>
6335  amp::ampf<Precision>& ssmin,
6336  amp::ampf<Precision>& ssmax);
6337  template<unsigned int Precision>
6341  amp::ampf<Precision>& ssmin,
6342  amp::ampf<Precision>& ssmax,
6343  amp::ampf<Precision>& snr,
6344  amp::ampf<Precision>& csr,
6345  amp::ampf<Precision>& snl,
6346  amp::ampf<Precision>& csl);
6347 
6348 
6349  /*************************************************************************
6350  Singular value decomposition of a bidiagonal matrix (extended algorithm)
6351 
6352  The algorithm performs the singular value decomposition of a bidiagonal
6353  matrix B (upper or lower) representing it as B = Q*S*P^T, where Q and P -
6354  orthogonal matrices, S - diagonal matrix with non-negative elements on the
6355  main diagonal, in descending order.
6356 
6357  The algorithm finds singular values. In addition, the algorithm can
6358  calculate matrices Q and P (more precisely, not the matrices, but their
6359  product with given matrices U and VT - U*Q and (P^T)*VT)). Of course,
6360  matrices U and VT can be of any type, including identity. Furthermore, the
6361  algorithm can calculate Q'*C (this product is calculated more effectively
6362  than U*Q, because this calculation operates with rows instead of matrix
6363  columns).
6364 
6365  The feature of the algorithm is its ability to find all singular values
6366  including those which are arbitrarily close to 0 with relative accuracy
6367  close to machine precision. If the parameter IsFractionalAccuracyRequired
6368  is set to True, all singular values will have high relative accuracy close
6369  to machine precision. If the parameter is set to False, only the biggest
6370  singular value will have relative accuracy close to machine precision.
6371  The absolute error of other singular values is equal to the absolute error
6372  of the biggest singular value.
6373 
6374  Input parameters:
6375  D - main diagonal of matrix B.
6376  Array whose index ranges within [0..N-1].
6377  E - superdiagonal (or subdiagonal) of matrix B.
6378  Array whose index ranges within [0..N-2].
6379  N - size of matrix B.
6380  IsUpper - True, if the matrix is upper bidiagonal.
6381  IsFractionalAccuracyRequired -
6382  accuracy to search singular values with.
6383  U - matrix to be multiplied by Q.
6384  Array whose indexes range within [0..NRU-1, 0..N-1].
6385  The matrix can be bigger, in that case only the submatrix
6386  [0..NRU-1, 0..N-1] will be multiplied by Q.
6387  NRU - number of rows in matrix U.
6388  C - matrix to be multiplied by Q'.
6389  Array whose indexes range within [0..N-1, 0..NCC-1].
6390  The matrix can be bigger, in that case only the submatrix
6391  [0..N-1, 0..NCC-1] will be multiplied by Q'.
6392  NCC - number of columns in matrix C.
6393  VT - matrix to be multiplied by P^T.
6394  Array whose indexes range within [0..N-1, 0..NCVT-1].
6395  The matrix can be bigger, in that case only the submatrix
6396  [0..N-1, 0..NCVT-1] will be multiplied by P^T.
6397  NCVT - number of columns in matrix VT.
6398 
6399  Output parameters:
6400  D - singular values of matrix B in descending order.
6401  U - if NRU>0, contains matrix U*Q.
6402  VT - if NCVT>0, contains matrix (P^T)*VT.
6403  C - if NCC>0, contains matrix Q'*C.
6404 
6405  Result:
6406  True, if the algorithm has converged.
6407  False, if the algorithm hasn't converged (rare case).
6408 
6409  Additional information:
6410  The type of convergence is controlled by the internal parameter TOL.
6411  If the parameter is greater than 0, the singular values will have
6412  relative accuracy TOL. If TOL<0, the singular values will have
6413  absolute accuracy ABS(TOL)*norm(B).
6414  By default, |TOL| falls within the range of 10*Epsilon and 100*Epsilon,
6415  where Epsilon is the machine precision. It is not recommended to use
6416  TOL less than 10*Epsilon since this will considerably slow down the
6417  algorithm and may not lead to error decreasing.
6418  History:
6419  * 31 March, 2007.
6420  changed MAXITR from 6 to 12.
6421 
6422  -- LAPACK routine (version 3.0) --
6423  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
6424  Courant Institute, Argonne National Lab, and Rice University
6425  October 31, 1999.
6426  *************************************************************************/
6427  template<unsigned int Precision>
6430  int n,
6431  bool isupper,
6432  bool isfractionalaccuracyrequired,
6434  int nru,
6436  int ncc,
6438  int ncvt)
6439  {
6440  bool result;
6443 
6444 
6445  d1.setbounds(1, n);
6446  ap::vmove(d1.getvector(1, n), d.getvector(0, n-1));
6447  if( n>1 )
6448  {
6449  e1.setbounds(1, n-1);
6450  ap::vmove(e1.getvector(1, n-1), e.getvector(0, n-2));
6451  }
6452  result = bidiagonalsvddecompositioninternal<Precision>(d1, e1, n, isupper, isfractionalaccuracyrequired, u, 0, nru, c, 0, ncc, vt, 0, ncvt);
6453  ap::vmove(d.getvector(0, n-1), d1.getvector(1, n));
6454  return result;
6455  }
6456 
6457 
6458  /*************************************************************************
6459  Obsolete 1-based subroutine. See RMatrixBDSVD for 0-based replacement.
6460 
6461  History:
6462  * 31 March, 2007.
6463  changed MAXITR from 6 to 12.
6464 
6465  -- LAPACK routine (version 3.0) --
6466  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
6467  Courant Institute, Argonne National Lab, and Rice University
6468  October 31, 1999.
6469  *************************************************************************/
6470  template<unsigned int Precision>
6473  int n,
6474  bool isupper,
6475  bool isfractionalaccuracyrequired,
6477  int nru,
6479  int ncc,
6481  int ncvt)
6482  {
6483  bool result;
6484 
6485 
6486  result = bidiagonalsvddecompositioninternal<Precision>(d, e, n, isupper, isfractionalaccuracyrequired, u, 1, nru, c, 1, ncc, vt, 1, ncvt);
6487  return result;
6488  }
6489 
6490 
6491  /*************************************************************************
6492  Internal working subroutine for bidiagonal decomposition
6493  *************************************************************************/
6494  template<unsigned int Precision>
6497  int n,
6498  bool isupper,
6499  bool isfractionalaccuracyrequired,
6501  int ustart,
6502  int nru,
6504  int cstart,
6505  int ncc,
6507  int vstart,
6508  int ncvt)
6509  {
6510  bool result;
6511  int i;
6512  int idir;
6513  int isub;
6514  int iter;
6515  int j;
6516  int ll;
6517  int lll;
6518  int m;
6519  int maxit;
6520  int oldll;
6521  int oldm;
6522  amp::ampf<Precision> abse;
6523  amp::ampf<Precision> abss;
6524  amp::ampf<Precision> cosl;
6525  amp::ampf<Precision> cosr;
6532  amp::ampf<Precision> oldcs;
6533  amp::ampf<Precision> oldsn;
6535  amp::ampf<Precision> shift;
6536  amp::ampf<Precision> sigmn;
6537  amp::ampf<Precision> sigmx;
6538  amp::ampf<Precision> sinl;
6539  amp::ampf<Precision> sinr;
6541  amp::ampf<Precision> smax;
6542  amp::ampf<Precision> smin;
6543  amp::ampf<Precision> sminl;
6544  amp::ampf<Precision> sminlo;
6545  amp::ampf<Precision> sminoa;
6547  amp::ampf<Precision> thresh;
6549  amp::ampf<Precision> tolmul;
6550  amp::ampf<Precision> unfl;
6555  int maxitr;
6556  bool matrixsplitflag;
6557  bool iterflag;
6562  bool fwddir;
6564  int mm1;
6565  int mm0;
6566  bool bchangedir;
6567  int uend;
6568  int cend;
6569  int vend;
6570 
6571 
6572  result = true;
6573  if( n==0 )
6574  {
6575  return result;
6576  }
6577  if( n==1 )
6578  {
6579  if( d(1)<0 )
6580  {
6581  d(1) = -d(1);
6582  if( ncvt>0 )
6583  {
6584  ap::vmul(vt.getrow(vstart, vstart, vstart+ncvt-1), -1);
6585  }
6586  }
6587  return result;
6588  }
6589 
6590  //
6591  // init
6592  //
6593  work0.setbounds(1, n-1);
6594  work1.setbounds(1, n-1);
6595  work2.setbounds(1, n-1);
6596  work3.setbounds(1, n-1);
6597  uend = ustart+ap::maxint(nru-1, 0);
6598  vend = vstart+ap::maxint(ncvt-1, 0);
6599  cend = cstart+ap::maxint(ncc-1, 0);
6600  utemp.setbounds(ustart, uend);
6601  vttemp.setbounds(vstart, vend);
6602  ctemp.setbounds(cstart, cend);
6603  maxitr = 12;
6604  fwddir = true;
6605 
6606  //
6607  // resize E from N-1 to N
6608  //
6609  etemp.setbounds(1, n);
6610  for(i=1; i<=n-1; i++)
6611  {
6612  etemp(i) = e(i);
6613  }
6614  e.setbounds(1, n);
6615  for(i=1; i<=n-1; i++)
6616  {
6617  e(i) = etemp(i);
6618  }
6619  e(n) = 0;
6620  idir = 0;
6621 
6622  //
6623  // Get machine constants
6624  //
6627 
6628  //
6629  // If matrix lower bidiagonal, rotate to be upper bidiagonal
6630  // by applying Givens rotations on the left
6631  //
6632  if( !isupper )
6633  {
6634  for(i=1; i<=n-1; i++)
6635  {
6636  rotations::generaterotation<Precision>(d(i), e(i), cs, sn, r);
6637  d(i) = r;
6638  e(i) = sn*d(i+1);
6639  d(i+1) = cs*d(i+1);
6640  work0(i) = cs;
6641  work1(i) = sn;
6642  }
6643 
6644  //
6645  // Update singular vectors if desired
6646  //
6647  if( nru>0 )
6648  {
6649  rotations::applyrotationsfromtheright<Precision>(fwddir, ustart, uend, 1+ustart-1, n+ustart-1, work0, work1, u, utemp);
6650  }
6651  if( ncc>0 )
6652  {
6653  rotations::applyrotationsfromtheleft<Precision>(fwddir, 1+cstart-1, n+cstart-1, cstart, cend, work0, work1, c, ctemp);
6654  }
6655  }
6656 
6657  //
6658  // Compute singular values to relative accuracy TOL
6659  // (By setting TOL to be negative, algorithm will compute
6660  // singular values to absolute accuracy ABS(TOL)*norm(input matrix))
6661  //
6662  tolmul = amp::maximum<Precision>(10, amp::minimum<Precision>(100, amp::pow<Precision>(eps, -amp::ampf<Precision>("0.125"))));
6663  tol = tolmul*eps;
6664  if( !isfractionalaccuracyrequired )
6665  {
6666  tol = -tol;
6667  }
6668 
6669  //
6670  // Compute approximate maximum, minimum singular values
6671  //
6672  smax = 0;
6673  for(i=1; i<=n; i++)
6674  {
6675  smax = amp::maximum<Precision>(smax, amp::abs<Precision>(d(i)));
6676  }
6677  for(i=1; i<=n-1; i++)
6678  {
6679  smax = amp::maximum<Precision>(smax, amp::abs<Precision>(e(i)));
6680  }
6681  sminl = 0;
6682  if( tol>=0 )
6683  {
6684 
6685  //
6686  // Relative accuracy desired
6687  //
6688  sminoa = amp::abs<Precision>(d(1));
6689  if( sminoa!=0 )
6690  {
6691  mu = sminoa;
6692  for(i=2; i<=n; i++)
6693  {
6694  mu = amp::abs<Precision>(d(i))*(mu/(mu+amp::abs<Precision>(e(i-1))));
6695  sminoa = amp::minimum<Precision>(sminoa, mu);
6696  if( sminoa==0 )
6697  {
6698  break;
6699  }
6700  }
6701  }
6702  sminoa = sminoa/amp::sqrt<Precision>(n);
6703  thresh = amp::maximum<Precision>(tol*sminoa, maxitr*n*n*unfl);
6704  }
6705  else
6706  {
6707 
6708  //
6709  // Absolute accuracy desired
6710  //
6711  thresh = amp::maximum<Precision>(amp::abs<Precision>(tol)*smax, maxitr*n*n*unfl);
6712  }
6713 
6714  //
6715  // Prepare for main iteration loop for the singular values
6716  // (MAXIT is the maximum number of passes through the inner
6717  // loop permitted before nonconvergence signalled.)
6718  //
6719  maxit = maxitr*n*n;
6720  iter = 0;
6721  oldll = -1;
6722  oldm = -1;
6723 
6724  //
6725  // M points to last element of unconverged part of matrix
6726  //
6727  m = n;
6728 
6729  //
6730  // Begin main iteration loop
6731  //
6732  while( true )
6733  {
6734 
6735  //
6736  // Check for convergence or exceeding iteration count
6737  //
6738  if( m<=1 )
6739  {
6740  break;
6741  }
6742  if( iter>maxit )
6743  {
6744  result = false;
6745  return result;
6746  }
6747 
6748  //
6749  // Find diagonal block of matrix to work on
6750  //
6751  if( tol<0 && amp::abs<Precision>(d(m))<=thresh )
6752  {
6753  d(m) = 0;
6754  }
6755  smax = amp::abs<Precision>(d(m));
6756  smin = smax;
6757  matrixsplitflag = false;
6758  for(lll=1; lll<=m-1; lll++)
6759  {
6760  ll = m-lll;
6761  abss = amp::abs<Precision>(d(ll));
6762  abse = amp::abs<Precision>(e(ll));
6763  if( tol<0 && abss<=thresh )
6764  {
6765  d(ll) = 0;
6766  }
6767  if( abse<=thresh )
6768  {
6769  matrixsplitflag = true;
6770  break;
6771  }
6772  smin = amp::minimum<Precision>(smin, abss);
6773  smax = amp::maximum<Precision>(smax, amp::maximum<Precision>(abss, abse));
6774  }
6775  if( !matrixsplitflag )
6776  {
6777  ll = 0;
6778  }
6779  else
6780  {
6781 
6782  //
6783  // Matrix splits since E(LL) = 0
6784  //
6785  e(ll) = 0;
6786  if( ll==m-1 )
6787  {
6788 
6789  //
6790  // Convergence of bottom singular value, return to top of loop
6791  //
6792  m = m-1;
6793  continue;
6794  }
6795  }
6796  ll = ll+1;
6797 
6798  //
6799  // E(LL) through E(M-1) are nonzero, E(LL-1) is zero
6800  //
6801  if( ll==m-1 )
6802  {
6803 
6804  //
6805  // 2 by 2 block, handle separately
6806  //
6807  svdv2x2<Precision>(d(m-1), e(m-1), d(m), sigmn, sigmx, sinr, cosr, sinl, cosl);
6808  d(m-1) = sigmx;
6809  e(m-1) = 0;
6810  d(m) = sigmn;
6811 
6812  //
6813  // Compute singular vectors, if desired
6814  //
6815  if( ncvt>0 )
6816  {
6817  mm0 = m+(vstart-1);
6818  mm1 = m-1+(vstart-1);
6819  ap::vmove(vttemp.getvector(vstart, vend), vt.getrow(mm1, vstart, vend), cosr);
6820  ap::vadd(vttemp.getvector(vstart, vend), vt.getrow(mm0, vstart, vend), sinr);
6821  ap::vmul(vt.getrow(mm0, vstart, vend), cosr);
6822  ap::vsub(vt.getrow(mm0, vstart, vend), vt.getrow(mm1, vstart, vend), sinr);
6823  ap::vmove(vt.getrow(mm1, vstart, vend), vttemp.getvector(vstart, vend));
6824  }
6825  if( nru>0 )
6826  {
6827  mm0 = m+ustart-1;
6828  mm1 = m-1+ustart-1;
6829  ap::vmove(utemp.getvector(ustart, uend), u.getcolumn(mm1, ustart, uend), cosl);
6830  ap::vadd(utemp.getvector(ustart, uend), u.getcolumn(mm0, ustart, uend), sinl);
6831  ap::vmul(u.getcolumn(mm0, ustart, uend), cosl);
6832  ap::vsub(u.getcolumn(mm0, ustart, uend), u.getcolumn(mm1, ustart, uend), sinl);
6833  ap::vmove(u.getcolumn(mm1, ustart, uend), utemp.getvector(ustart, uend));
6834  }
6835  if( ncc>0 )
6836  {
6837  mm0 = m+cstart-1;
6838  mm1 = m-1+cstart-1;
6839  ap::vmove(ctemp.getvector(cstart, cend), c.getrow(mm1, cstart, cend), cosl);
6840  ap::vadd(ctemp.getvector(cstart, cend), c.getrow(mm0, cstart, cend), sinl);
6841  ap::vmul(c.getrow(mm0, cstart, cend), cosl);
6842  ap::vsub(c.getrow(mm0, cstart, cend), c.getrow(mm1, cstart, cend), sinl);
6843  ap::vmove(c.getrow(mm1, cstart, cend), ctemp.getvector(cstart, cend));
6844  }
6845  m = m-2;
6846  continue;
6847  }
6848 
6849  //
6850  // If working on new submatrix, choose shift direction
6851  // (from larger end diagonal element towards smaller)
6852  //
6853  // Previously was
6854  // "if (LL>OLDM) or (M<OLDLL) then"
6855  // fixed thanks to Michael Rolle < m@rolle.name >
6856  // Very strange that LAPACK still contains it.
6857  //
6858  bchangedir = false;
6859  if( idir==1 && amp::abs<Precision>(d(ll))<amp::ampf<Precision>("1.0E-3")*amp::abs<Precision>(d(m)) )
6860  {
6861  bchangedir = true;
6862  }
6863  if( idir==2 && amp::abs<Precision>(d(m))<amp::ampf<Precision>("1.0E-3")*amp::abs<Precision>(d(ll)) )
6864  {
6865  bchangedir = true;
6866  }
6867  if( ll!=oldll || m!=oldm || bchangedir )
6868  {
6869  if( amp::abs<Precision>(d(ll))>=amp::abs<Precision>(d(m)) )
6870  {
6871 
6872  //
6873  // Chase bulge from top (big end) to bottom (small end)
6874  //
6875  idir = 1;
6876  }
6877  else
6878  {
6879 
6880  //
6881  // Chase bulge from bottom (big end) to top (small end)
6882  //
6883  idir = 2;
6884  }
6885  }
6886 
6887  //
6888  // Apply convergence tests
6889  //
6890  if( idir==1 )
6891  {
6892 
6893  //
6894  // Run convergence test in forward direction
6895  // First apply standard test to bottom of matrix
6896  //
6897  if( amp::abs<Precision>(e(m-1))<=amp::abs<Precision>(tol)*amp::abs<Precision>(d(m)) || tol<0 && amp::abs<Precision>(e(m-1))<=thresh )
6898  {
6899  e(m-1) = 0;
6900  continue;
6901  }
6902  if( tol>=0 )
6903  {
6904 
6905  //
6906  // If relative accuracy desired,
6907  // apply convergence criterion forward
6908  //
6909  mu = amp::abs<Precision>(d(ll));
6910  sminl = mu;
6911  iterflag = false;
6912  for(lll=ll; lll<=m-1; lll++)
6913  {
6914  if( amp::abs<Precision>(e(lll))<=tol*mu )
6915  {
6916  e(lll) = 0;
6917  iterflag = true;
6918  break;
6919  }
6920  sminlo = sminl;
6921  mu = amp::abs<Precision>(d(lll+1))*(mu/(mu+amp::abs<Precision>(e(lll))));
6922  sminl = amp::minimum<Precision>(sminl, mu);
6923  }
6924  if( iterflag )
6925  {
6926  continue;
6927  }
6928  }
6929  }
6930  else
6931  {
6932 
6933  //
6934  // Run convergence test in backward direction
6935  // First apply standard test to top of matrix
6936  //
6937  if( amp::abs<Precision>(e(ll))<=amp::abs<Precision>(tol)*amp::abs<Precision>(d(ll)) || tol<0 && amp::abs<Precision>(e(ll))<=thresh )
6938  {
6939  e(ll) = 0;
6940  continue;
6941  }
6942  if( tol>=0 )
6943  {
6944 
6945  //
6946  // If relative accuracy desired,
6947  // apply convergence criterion backward
6948  //
6949  mu = amp::abs<Precision>(d(m));
6950  sminl = mu;
6951  iterflag = false;
6952  for(lll=m-1; lll>=ll; lll--)
6953  {
6954  if( amp::abs<Precision>(e(lll))<=tol*mu )
6955  {
6956  e(lll) = 0;
6957  iterflag = true;
6958  break;
6959  }
6960  sminlo = sminl;
6961  mu = amp::abs<Precision>(d(lll))*(mu/(mu+amp::abs<Precision>(e(lll))));
6962  sminl = amp::minimum<Precision>(sminl, mu);
6963  }
6964  if( iterflag )
6965  {
6966  continue;
6967  }
6968  }
6969  }
6970  oldll = ll;
6971  oldm = m;
6972 
6973  //
6974  // Compute shift. First, test if shifting would ruin relative
6975  // accuracy, and if so set the shift to zero.
6976  //
6977  if( tol>=0 && n*tol*(sminl/smax)<=amp::maximum<Precision>(eps, amp::ampf<Precision>("0.01")*tol) )
6978  {
6979 
6980  //
6981  // Use a zero shift to avoid loss of relative accuracy
6982  //
6983  shift = 0;
6984  }
6985  else
6986  {
6987 
6988  //
6989  // Compute the shift from 2-by-2 block at end of matrix
6990  //
6991  if( idir==1 )
6992  {
6993  sll = amp::abs<Precision>(d(ll));
6994  svd2x2<Precision>(d(m-1), e(m-1), d(m), shift, r);
6995  }
6996  else
6997  {
6998  sll = amp::abs<Precision>(d(m));
6999  svd2x2<Precision>(d(ll), e(ll), d(ll+1), shift, r);
7000  }
7001 
7002  //
7003  // Test if shift negligible, and if so set to zero
7004  //
7005  if( sll>0 )
7006  {
7007  if( amp::sqr<Precision>(shift/sll)<eps )
7008  {
7009  shift = 0;
7010  }
7011  }
7012  }
7013 
7014  //
7015  // Increment iteration count
7016  //
7017  iter = iter+m-ll;
7018 
7019  //
7020  // If SHIFT = 0, do simplified QR iteration
7021  //
7022  if( shift==0 )
7023  {
7024  if( idir==1 )
7025  {
7026 
7027  //
7028  // Chase bulge from top to bottom
7029  // Save cosines and sines for later singular vector updates
7030  //
7031  cs = 1;
7032  oldcs = 1;
7033  for(i=ll; i<=m-1; i++)
7034  {
7035  rotations::generaterotation<Precision>(d(i)*cs, e(i), cs, sn, r);
7036  if( i>ll )
7037  {
7038  e(i-1) = oldsn*r;
7039  }
7040  rotations::generaterotation<Precision>(oldcs*r, d(i+1)*sn, oldcs, oldsn, tmp);
7041  d(i) = tmp;
7042  work0(i-ll+1) = cs;
7043  work1(i-ll+1) = sn;
7044  work2(i-ll+1) = oldcs;
7045  work3(i-ll+1) = oldsn;
7046  }
7047  h = d(m)*cs;
7048  d(m) = h*oldcs;
7049  e(m-1) = h*oldsn;
7050 
7051  //
7052  // Update singular vectors
7053  //
7054  if( ncvt>0 )
7055  {
7056  rotations::applyrotationsfromtheleft<Precision>(fwddir, ll+vstart-1, m+vstart-1, vstart, vend, work0, work1, vt, vttemp);
7057  }
7058  if( nru>0 )
7059  {
7060  rotations::applyrotationsfromtheright<Precision>(fwddir, ustart, uend, ll+ustart-1, m+ustart-1, work2, work3, u, utemp);
7061  }
7062  if( ncc>0 )
7063  {
7064  rotations::applyrotationsfromtheleft<Precision>(fwddir, ll+cstart-1, m+cstart-1, cstart, cend, work2, work3, c, ctemp);
7065  }
7066 
7067  //
7068  // Test convergence
7069  //
7070  if( amp::abs<Precision>(e(m-1))<=thresh )
7071  {
7072  e(m-1) = 0;
7073  }
7074  }
7075  else
7076  {
7077 
7078  //
7079  // Chase bulge from bottom to top
7080  // Save cosines and sines for later singular vector updates
7081  //
7082  cs = 1;
7083  oldcs = 1;
7084  for(i=m; i>=ll+1; i--)
7085  {
7086  rotations::generaterotation<Precision>(d(i)*cs, e(i-1), cs, sn, r);
7087  if( i<m )
7088  {
7089  e(i) = oldsn*r;
7090  }
7091  rotations::generaterotation<Precision>(oldcs*r, d(i-1)*sn, oldcs, oldsn, tmp);
7092  d(i) = tmp;
7093  work0(i-ll) = cs;
7094  work1(i-ll) = -sn;
7095  work2(i-ll) = oldcs;
7096  work3(i-ll) = -oldsn;
7097  }
7098  h = d(ll)*cs;
7099  d(ll) = h*oldcs;
7100  e(ll) = h*oldsn;
7101 
7102  //
7103  // Update singular vectors
7104  //
7105  if( ncvt>0 )
7106  {
7107  rotations::applyrotationsfromtheleft<Precision>(!fwddir, ll+vstart-1, m+vstart-1, vstart, vend, work2, work3, vt, vttemp);
7108  }
7109  if( nru>0 )
7110  {
7111  rotations::applyrotationsfromtheright<Precision>(!fwddir, ustart, uend, ll+ustart-1, m+ustart-1, work0, work1, u, utemp);
7112  }
7113  if( ncc>0 )
7114  {
7115  rotations::applyrotationsfromtheleft<Precision>(!fwddir, ll+cstart-1, m+cstart-1, cstart, cend, work0, work1, c, ctemp);
7116  }
7117 
7118  //
7119  // Test convergence
7120  //
7121  if( amp::abs<Precision>(e(ll))<=thresh )
7122  {
7123  e(ll) = 0;
7124  }
7125  }
7126  }
7127  else
7128  {
7129 
7130  //
7131  // Use nonzero shift
7132  //
7133  if( idir==1 )
7134  {
7135 
7136  //
7137  // Chase bulge from top to bottom
7138  // Save cosines and sines for later singular vector updates
7139  //
7140  f = (amp::abs<Precision>(d(ll))-shift)*(extsignbdsqr<Precision>(1, d(ll))+shift/d(ll));
7141  g = e(ll);
7142  for(i=ll; i<=m-1; i++)
7143  {
7144  rotations::generaterotation<Precision>(f, g, cosr, sinr, r);
7145  if( i>ll )
7146  {
7147  e(i-1) = r;
7148  }
7149  f = cosr*d(i)+sinr*e(i);
7150  e(i) = cosr*e(i)-sinr*d(i);
7151  g = sinr*d(i+1);
7152  d(i+1) = cosr*d(i+1);
7153  rotations::generaterotation<Precision>(f, g, cosl, sinl, r);
7154  d(i) = r;
7155  f = cosl*e(i)+sinl*d(i+1);
7156  d(i+1) = cosl*d(i+1)-sinl*e(i);
7157  if( i<m-1 )
7158  {
7159  g = sinl*e(i+1);
7160  e(i+1) = cosl*e(i+1);
7161  }
7162  work0(i-ll+1) = cosr;
7163  work1(i-ll+1) = sinr;
7164  work2(i-ll+1) = cosl;
7165  work3(i-ll+1) = sinl;
7166  }
7167  e(m-1) = f;
7168 
7169  //
7170  // Update singular vectors
7171  //
7172  if( ncvt>0 )
7173  {
7174  rotations::applyrotationsfromtheleft<Precision>(fwddir, ll+vstart-1, m+vstart-1, vstart, vend, work0, work1, vt, vttemp);
7175  }
7176  if( nru>0 )
7177  {
7178  rotations::applyrotationsfromtheright<Precision>(fwddir, ustart, uend, ll+ustart-1, m+ustart-1, work2, work3, u, utemp);
7179  }
7180  if( ncc>0 )
7181  {
7182  rotations::applyrotationsfromtheleft<Precision>(fwddir, ll+cstart-1, m+cstart-1, cstart, cend, work2, work3, c, ctemp);
7183  }
7184 
7185  //
7186  // Test convergence
7187  //
7188  if( amp::abs<Precision>(e(m-1))<=thresh )
7189  {
7190  e(m-1) = 0;
7191  }
7192  }
7193  else
7194  {
7195 
7196  //
7197  // Chase bulge from bottom to top
7198  // Save cosines and sines for later singular vector updates
7199  //
7200  f = (amp::abs<Precision>(d(m))-shift)*(extsignbdsqr<Precision>(1, d(m))+shift/d(m));
7201  g = e(m-1);
7202  for(i=m; i>=ll+1; i--)
7203  {
7204  rotations::generaterotation<Precision>(f, g, cosr, sinr, r);
7205  if( i<m )
7206  {
7207  e(i) = r;
7208  }
7209  f = cosr*d(i)+sinr*e(i-1);
7210  e(i-1) = cosr*e(i-1)-sinr*d(i);
7211  g = sinr*d(i-1);
7212  d(i-1) = cosr*d(i-1);
7213  rotations::generaterotation<Precision>(f, g, cosl, sinl, r);
7214  d(i) = r;
7215  f = cosl*e(i-1)+sinl*d(i-1);
7216  d(i-1) = cosl*d(i-1)-sinl*e(i-1);
7217  if( i>ll+1 )
7218  {
7219  g = sinl*e(i-2);
7220  e(i-2) = cosl*e(i-2);
7221  }
7222  work0(i-ll) = cosr;
7223  work1(i-ll) = -sinr;
7224  work2(i-ll) = cosl;
7225  work3(i-ll) = -sinl;
7226  }
7227  e(ll) = f;
7228 
7229  //
7230  // Test convergence
7231  //
7232  if( amp::abs<Precision>(e(ll))<=thresh )
7233  {
7234  e(ll) = 0;
7235  }
7236 
7237  //
7238  // Update singular vectors if desired
7239  //
7240  if( ncvt>0 )
7241  {
7242  rotations::applyrotationsfromtheleft<Precision>(!fwddir, ll+vstart-1, m+vstart-1, vstart, vend, work2, work3, vt, vttemp);
7243  }
7244  if( nru>0 )
7245  {
7246  rotations::applyrotationsfromtheright<Precision>(!fwddir, ustart, uend, ll+ustart-1, m+ustart-1, work0, work1, u, utemp);
7247  }
7248  if( ncc>0 )
7249  {
7250  rotations::applyrotationsfromtheleft<Precision>(!fwddir, ll+cstart-1, m+cstart-1, cstart, cend, work0, work1, c, ctemp);
7251  }
7252  }
7253  }
7254 
7255  //
7256  // QR iteration finished, go back and check convergence
7257  //
7258  continue;
7259  }
7260 
7261  //
7262  // All singular values converged, so make them positive
7263  //
7264  for(i=1; i<=n; i++)
7265  {
7266  if( d(i)<0 )
7267  {
7268  d(i) = -d(i);
7269 
7270  //
7271  // Change sign of singular vectors, if desired
7272  //
7273  if( ncvt>0 )
7274  {
7275  ap::vmul(vt.getrow(i+vstart-1, vstart, vend), -1);
7276  }
7277  }
7278  }
7279 
7280  //
7281  // Sort the singular values into decreasing order (insertion sort on
7282  // singular values, but only one transposition per singular vector)
7283  //
7284  for(i=1; i<=n-1; i++)
7285  {
7286 
7287  //
7288  // Scan for smallest D(I)
7289  //
7290  isub = 1;
7291  smin = d(1);
7292  for(j=2; j<=n+1-i; j++)
7293  {
7294  if( d(j)<=smin )
7295  {
7296  isub = j;
7297  smin = d(j);
7298  }
7299  }
7300  if( isub!=n+1-i )
7301  {
7302 
7303  //
7304  // Swap singular values and vectors
7305  //
7306  d(isub) = d(n+1-i);
7307  d(n+1-i) = smin;
7308  if( ncvt>0 )
7309  {
7310  j = n+1-i;
7311  ap::vmove(vttemp.getvector(vstart, vend), vt.getrow(isub+vstart-1, vstart, vend));
7312  ap::vmove(vt.getrow(isub+vstart-1, vstart, vend), vt.getrow(j+vstart-1, vstart, vend));
7313  ap::vmove(vt.getrow(j+vstart-1, vstart, vend), vttemp.getvector(vstart, vend));
7314  }
7315  if( nru>0 )
7316  {
7317  j = n+1-i;
7318  ap::vmove(utemp.getvector(ustart, uend), u.getcolumn(isub+ustart-1, ustart, uend));
7319  ap::vmove(u.getcolumn(isub+ustart-1, ustart, uend), u.getcolumn(j+ustart-1, ustart, uend));
7320  ap::vmove(u.getcolumn(j+ustart-1, ustart, uend), utemp.getvector(ustart, uend));
7321  }
7322  if( ncc>0 )
7323  {
7324  j = n+1-i;
7325  ap::vmove(ctemp.getvector(cstart, cend), c.getrow(isub+cstart-1, cstart, cend));
7326  ap::vmove(c.getrow(isub+cstart-1, cstart, cend), c.getrow(j+cstart-1, cstart, cend));
7327  ap::vmove(c.getrow(j+cstart-1, cstart, cend), ctemp.getvector(cstart, cend));
7328  }
7329  }
7330  }
7331  return result;
7332  }
7333 
7334 
7335  template<unsigned int Precision>
7338  {
7340 
7341 
7342  if( b>=0 )
7343  {
7344  result = amp::abs<Precision>(a);
7345  }
7346  else
7347  {
7348  result = -amp::abs<Precision>(a);
7349  }
7350  return result;
7351  }
7352 
7353 
7354  template<unsigned int Precision>
7358  amp::ampf<Precision>& ssmin,
7359  amp::ampf<Precision>& ssmax)
7360  {
7366  amp::ampf<Precision> fhmn;
7367  amp::ampf<Precision> fhmx;
7370 
7371 
7372  fa = amp::abs<Precision>(f);
7373  ga = amp::abs<Precision>(g);
7374  ha = amp::abs<Precision>(h);
7375  fhmn = amp::minimum<Precision>(fa, ha);
7376  fhmx = amp::maximum<Precision>(fa, ha);
7377  if( fhmn==0 )
7378  {
7379  ssmin = 0;
7380  if( fhmx==0 )
7381  {
7382  ssmax = ga;
7383  }
7384  else
7385  {
7386  ssmax = amp::maximum<Precision>(fhmx, ga)*amp::sqrt<Precision>(1+amp::sqr<Precision>(amp::minimum<Precision>(fhmx, ga)/amp::maximum<Precision>(fhmx, ga)));
7387  }
7388  }
7389  else
7390  {
7391  if( ga<fhmx )
7392  {
7393  aas = 1+fhmn/fhmx;
7394  at = (fhmx-fhmn)/fhmx;
7395  au = amp::sqr<Precision>(ga/fhmx);
7396  c = 2/(amp::sqrt<Precision>(aas*aas+au)+amp::sqrt<Precision>(at*at+au));
7397  ssmin = fhmn*c;
7398  ssmax = fhmx/c;
7399  }
7400  else
7401  {
7402  au = fhmx/ga;
7403  if( au==0 )
7404  {
7405 
7406  //
7407  // Avoid possible harmful underflow if exponent range
7408  // asymmetric (true SSMIN may not underflow even if
7409  // AU underflows)
7410  //
7411  ssmin = fhmn*fhmx/ga;
7412  ssmax = ga;
7413  }
7414  else
7415  {
7416  aas = 1+fhmn/fhmx;
7417  at = (fhmx-fhmn)/fhmx;
7418  c = 1/(amp::sqrt<Precision>(1+amp::sqr<Precision>(aas*au))+amp::sqrt<Precision>(1+amp::sqr<Precision>(at*au)));
7419  ssmin = fhmn*c*au;
7420  ssmin = ssmin+ssmin;
7421  ssmax = ga/(c+c);
7422  }
7423  }
7424  }
7425  }
7426 
7427 
7428  template<unsigned int Precision>
7432  amp::ampf<Precision>& ssmin,
7433  amp::ampf<Precision>& ssmax,
7434  amp::ampf<Precision>& snr,
7435  amp::ampf<Precision>& csr,
7436  amp::ampf<Precision>& snl,
7437  amp::ampf<Precision>& csl)
7438  {
7439  bool gasmal;
7440  bool swp;
7441  int pmax;
7460  amp::ampf<Precision> temp;
7461  amp::ampf<Precision> tsign;
7464 
7465 
7466  ft = f;
7467  fa = amp::abs<Precision>(ft);
7468  ht = h;
7469  ha = amp::abs<Precision>(h);
7470 
7471  //
7472  // PMAX points to the maximum absolute element of matrix
7473  // PMAX = 1 if F largest in absolute values
7474  // PMAX = 2 if G largest in absolute values
7475  // PMAX = 3 if H largest in absolute values
7476  //
7477  pmax = 1;
7478  swp = ha>fa;
7479  if( swp )
7480  {
7481 
7482  //
7483  // Now FA .ge. HA
7484  //
7485  pmax = 3;
7486  temp = ft;
7487  ft = ht;
7488  ht = temp;
7489  temp = fa;
7490  fa = ha;
7491  ha = temp;
7492  }
7493  gt = g;
7494  ga = amp::abs<Precision>(gt);
7495  if( ga==0 )
7496  {
7497 
7498  //
7499  // Diagonal matrix
7500  //
7501  ssmin = ha;
7502  ssmax = fa;
7503  clt = 1;
7504  crt = 1;
7505  slt = 0;
7506  srt = 0;
7507  }
7508  else
7509  {
7510  gasmal = true;
7511  if( ga>fa )
7512  {
7513  pmax = 2;
7515  {
7516 
7517  //
7518  // Case of very large GA
7519  //
7520  gasmal = false;
7521  ssmax = ga;
7522  if( ha>1 )
7523  {
7524  v = ga/ha;
7525  ssmin = fa/v;
7526  }
7527  else
7528  {
7529  v = fa/ga;
7530  ssmin = v*ha;
7531  }
7532  clt = 1;
7533  slt = ht/gt;
7534  srt = 1;
7535  crt = ft/gt;
7536  }
7537  }
7538  if( gasmal )
7539  {
7540 
7541  //
7542  // Normal case
7543  //
7544  d = fa-ha;
7545  if( d==fa )
7546  {
7547  l = 1;
7548  }
7549  else
7550  {
7551  l = d/fa;
7552  }
7553  m = gt/ft;
7554  t = 2-l;
7555  mm = m*m;
7556  tt = t*t;
7557  s = amp::sqrt<Precision>(tt+mm);
7558  if( l==0 )
7559  {
7560  r = amp::abs<Precision>(m);
7561  }
7562  else
7563  {
7564  r = amp::sqrt<Precision>(l*l+mm);
7565  }
7566  a = amp::ampf<Precision>("0.5")*(s+r);
7567  ssmin = ha/a;
7568  ssmax = fa*a;
7569  if( mm==0 )
7570  {
7571 
7572  //
7573  // Note that M is very tiny
7574  //
7575  if( l==0 )
7576  {
7577  t = extsignbdsqr<Precision>(2, ft)*extsignbdsqr<Precision>(1, gt);
7578  }
7579  else
7580  {
7581  t = gt/extsignbdsqr<Precision>(d, ft)+m/t;
7582  }
7583  }
7584  else
7585  {
7586  t = (m/(s+t)+m/(r+l))*(1+a);
7587  }
7588  l = amp::sqrt<Precision>(t*t+4);
7589  crt = 2/l;
7590  srt = t/l;
7591  clt = (crt+srt*m)/a;
7592  v = ht/ft;
7593  slt = v*srt/a;
7594  }
7595  }
7596  if( swp )
7597  {
7598  csl = srt;
7599  snl = crt;
7600  csr = slt;
7601  snr = clt;
7602  }
7603  else
7604  {
7605  csl = clt;
7606  snl = slt;
7607  csr = crt;
7608  snr = srt;
7609  }
7610 
7611  //
7612  // Correct signs of SSMAX and SSMIN
7613  //
7614  if( pmax==1 )
7615  {
7616  tsign = extsignbdsqr<Precision>(1, csr)*extsignbdsqr<Precision>(1, csl)*extsignbdsqr<Precision>(1, f);
7617  }
7618  if( pmax==2 )
7619  {
7620  tsign = extsignbdsqr<Precision>(1, snr)*extsignbdsqr<Precision>(1, csl)*extsignbdsqr<Precision>(1, g);
7621  }
7622  if( pmax==3 )
7623  {
7624  tsign = extsignbdsqr<Precision>(1, snr)*extsignbdsqr<Precision>(1, snl)*extsignbdsqr<Precision>(1, h);
7625  }
7626  ssmax = extsignbdsqr<Precision>(ssmax, tsign);
7627  ssmin = extsignbdsqr<Precision>(ssmin, tsign*extsignbdsqr<Precision>(1, f)*extsignbdsqr<Precision>(1, h));
7628  }
7629 } // namespace
7630 
7631 /* stuff included from ./svd.h */
7632 
7633 /*************************************************************************
7634 Copyright (c) 2005-2007, Sergey Bochkanov (ALGLIB project).
7635 
7636 Redistribution and use in source and binary forms, with or without
7637 modification, are permitted provided that the following conditions are
7638 met:
7639 
7640 - Redistributions of source code must retain the above copyright
7641  notice, this list of conditions and the following disclaimer.
7642 
7643 - Redistributions in binary form must reproduce the above copyright
7644  notice, this list of conditions and the following disclaimer listed
7645  in this license in the documentation and/or other materials
7646  provided with the distribution.
7647 
7648 - Neither the name of the copyright holders nor the names of its
7649  contributors may be used to endorse or promote products derived from
7650  this software without specific prior written permission.
7651 
7652 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
7653 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
7654 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
7655 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
7656 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
7657 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
7658 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
7659 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
7660 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7661 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
7662 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7663 *************************************************************************/
7664 
7665 /*MAKEHEADER*/
7666 
7667 /*MAKEHEADER*/
7668 
7669 /*MAKEHEADER*/
7670 
7671 /*MAKEHEADER*/
7672 
7673 /*MAKEHEADER*/
7674 
7675 /*MAKEHEADER*/
7676 
7677 /*MAKEHEADER*/
7678 
7679 /*MAKEHEADER*/
7680 
7681 /*MAKEHEADER*/
7682 
7683 namespace svd
7684 {
7685  template<unsigned int Precision>
7687  int m,
7688  int n,
7689  int uneeded,
7690  int vtneeded,
7691  int additionalmemory,
7695  template<unsigned int Precision>
7697  int m,
7698  int n,
7699  int uneeded,
7700  int vtneeded,
7701  int additionalmemory,
7705 
7706 
7707  /*************************************************************************
7708  Singular value decomposition of a rectangular matrix.
7709 
7710  The algorithm calculates the singular value decomposition of a matrix of
7711  size MxN: A = U * S * V^T
7712 
7713  The algorithm finds the singular values and, optionally, matrices U and V^T.
7714  The algorithm can find both first min(M,N) columns of matrix U and rows of
7715  matrix V^T (singular vectors), and matrices U and V^T wholly (of sizes MxM
7716  and NxN respectively).
7717 
7718  Take into account that the subroutine does not return matrix V but V^T.
7719 
7720  Input parameters:
7721  A - matrix to be decomposed.
7722  Array whose indexes range within [0..M-1, 0..N-1].
7723  M - number of rows in matrix A.
7724  N - number of columns in matrix A.
7725  UNeeded - 0, 1 or 2. See the description of the parameter U.
7726  VTNeeded - 0, 1 or 2. See the description of the parameter VT.
7727  AdditionalMemory -
7728  If the parameter:
7729  memory (lower requirements, lower performance).
7730  * equals 1, the algorithm uses additional
7731  memory of size min(M,N)*min(M,N) of real numbers.
7732  It often speeds up the algorithm.
7733  * equals 2, the algorithm uses additional
7734  memory of size M*min(M,N) of real numbers.
7735  It allows to get a maximum performance.
7736  The recommended value of the parameter is 2.
7737 
7738  Output parameters:
7739  W - contains singular values in descending order.
7740  U - if UNeeded=0, U isn't changed, the left singular vectors
7741  are not calculated.
7742  if Uneeded=1, U contains left singular vectors (first
7743  min(M,N) columns of matrix U). Array whose indexes range
7744  within [0..M-1, 0..Min(M,N)-1].
7745  if UNeeded=2, U contains matrix U wholly. Array whose
7746  indexes range within [0..M-1, 0..M-1].
7747  are not calculated.
7748  if VTNeeded=1, VT contains right singular vectors (first
7749  min(M,N) rows of matrix V^T). Array whose indexes range
7750  within [0..min(M,N)-1, 0..N-1].
7751  if VTNeeded=2, VT contains matrix V^T wholly. Array whose
7752  indexes range within [0..N-1, 0..N-1].
7753 
7754  -- ALGLIB --
7755  Copyright 2005 by Bochkanov Sergey
7756  *************************************************************************/
7757  template<unsigned int Precision>
7759  int m,
7760  int n,
7761  int uneeded,
7762  int vtneeded,
7763  int additionalmemory,
7767  {
7768  bool result;
7775  bool isupper;
7776  int minmn;
7777  int ncu;
7778  int nrvt;
7779  int nru;
7780  int ncvt;
7781  int i;
7782  int j;
7783  int im1;
7785 
7786 
7787  result = true;
7788  if( m==0 || n==0 )
7789  {
7790  return result;
7791  }
7792  ap::ap_error::make_assertion(uneeded>=0 && uneeded<=2);
7793  ap::ap_error::make_assertion(vtneeded>=0 && vtneeded<=2);
7794  ap::ap_error::make_assertion(additionalmemory>=0 && additionalmemory<=2);
7795 
7796  //
7797  // initialize
7798  //
7799  minmn = ap::minint(m, n);
7800  w.setbounds(1, minmn);
7801  ncu = 0;
7802  nru = 0;
7803  if( uneeded==1 )
7804  {
7805  nru = m;
7806  ncu = minmn;
7807  u.setbounds(0, nru-1, 0, ncu-1);
7808  }
7809  if( uneeded==2 )
7810  {
7811  nru = m;
7812  ncu = m;
7813  u.setbounds(0, nru-1, 0, ncu-1);
7814  }
7815  nrvt = 0;
7816  ncvt = 0;
7817  if( vtneeded==1 )
7818  {
7819  nrvt = minmn;
7820  ncvt = n;
7821  vt.setbounds(0, nrvt-1, 0, ncvt-1);
7822  }
7823  if( vtneeded==2 )
7824  {
7825  nrvt = n;
7826  ncvt = n;
7827  vt.setbounds(0, nrvt-1, 0, ncvt-1);
7828  }
7829 
7830  //
7831  // M much larger than N
7832  // Use bidiagonal reduction with QR-decomposition
7833  //
7834  if( m>amp::ampf<Precision>("1.6")*n )
7835  {
7836  if( uneeded==0 )
7837  {
7838 
7839  //
7840  // No left singular vectors to be computed
7841  //
7842  qr::rmatrixqr<Precision>(a, m, n, tau);
7843  for(i=0; i<=n-1; i++)
7844  {
7845  for(j=0; j<=i-1; j++)
7846  {
7847  a(i,j) = 0;
7848  }
7849  }
7850  bidiagonal::rmatrixbd<Precision>(a, n, n, tauq, taup);
7851  bidiagonal::rmatrixbdunpackpt<Precision>(a, n, n, taup, nrvt, vt);
7852  bidiagonal::rmatrixbdunpackdiagonals<Precision>(a, n, n, isupper, w, e);
7853  result = bdsvd::rmatrixbdsvd<Precision>(w, e, n, isupper, false, u, 0, a, 0, vt, ncvt);
7854  return result;
7855  }
7856  else
7857  {
7858 
7859  //
7860  // Left singular vectors (may be full matrix U) to be computed
7861  //
7862  qr::rmatrixqr<Precision>(a, m, n, tau);
7863  qr::rmatrixqrunpackq<Precision>(a, m, n, tau, ncu, u);
7864  for(i=0; i<=n-1; i++)
7865  {
7866  for(j=0; j<=i-1; j++)
7867  {
7868  a(i,j) = 0;
7869  }
7870  }
7871  bidiagonal::rmatrixbd<Precision>(a, n, n, tauq, taup);
7872  bidiagonal::rmatrixbdunpackpt<Precision>(a, n, n, taup, nrvt, vt);
7873  bidiagonal::rmatrixbdunpackdiagonals<Precision>(a, n, n, isupper, w, e);
7874  if( additionalmemory<1 )
7875  {
7876 
7877  //
7878  // No additional memory can be used
7879  //
7880  bidiagonal::rmatrixbdmultiplybyq<Precision>(a, n, n, tauq, u, m, n, true, false);
7881  result = bdsvd::rmatrixbdsvd<Precision>(w, e, n, isupper, false, u, m, a, 0, vt, ncvt);
7882  }
7883  else
7884  {
7885 
7886  //
7887  // Large U. Transforming intermediate matrix T2
7888  //
7889  work.setbounds(1, ap::maxint(m, n));
7890  bidiagonal::rmatrixbdunpackq<Precision>(a, n, n, tauq, n, t2);
7891  blas::copymatrix<Precision>(u, 0, m-1, 0, n-1, a, 0, m-1, 0, n-1);
7892  blas::inplacetranspose<Precision>(t2, 0, n-1, 0, n-1, work);
7893  result = bdsvd::rmatrixbdsvd<Precision>(w, e, n, isupper, false, u, 0, t2, n, vt, ncvt);
7894  blas::matrixmatrixmultiply<Precision>(a, 0, m-1, 0, n-1, false, t2, 0, n-1, 0, n-1, true, amp::ampf<Precision>("1.0"), u, 0, m-1, 0, n-1, amp::ampf<Precision>("0.0"), work);
7895  }
7896  return result;
7897  }
7898  }
7899 
7900  //
7901  // N much larger than M
7902  // Use bidiagonal reduction with LQ-decomposition
7903  //
7904  if( n>amp::ampf<Precision>("1.6")*m )
7905  {
7906  if( vtneeded==0 )
7907  {
7908 
7909  //
7910  // No right singular vectors to be computed
7911  //
7912  lq::rmatrixlq<Precision>(a, m, n, tau);
7913  for(i=0; i<=m-1; i++)
7914  {
7915  for(j=i+1; j<=m-1; j++)
7916  {
7917  a(i,j) = 0;
7918  }
7919  }
7920  bidiagonal::rmatrixbd<Precision>(a, m, m, tauq, taup);
7921  bidiagonal::rmatrixbdunpackq<Precision>(a, m, m, tauq, ncu, u);
7922  bidiagonal::rmatrixbdunpackdiagonals<Precision>(a, m, m, isupper, w, e);
7923  work.setbounds(1, m);
7924  blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7925  result = bdsvd::rmatrixbdsvd<Precision>(w, e, m, isupper, false, a, 0, u, nru, vt, 0);
7926  blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7927  return result;
7928  }
7929  else
7930  {
7931 
7932  //
7933  // Right singular vectors (may be full matrix VT) to be computed
7934  //
7935  lq::rmatrixlq<Precision>(a, m, n, tau);
7936  lq::rmatrixlqunpackq<Precision>(a, m, n, tau, nrvt, vt);
7937  for(i=0; i<=m-1; i++)
7938  {
7939  for(j=i+1; j<=m-1; j++)
7940  {
7941  a(i,j) = 0;
7942  }
7943  }
7944  bidiagonal::rmatrixbd<Precision>(a, m, m, tauq, taup);
7945  bidiagonal::rmatrixbdunpackq<Precision>(a, m, m, tauq, ncu, u);
7946  bidiagonal::rmatrixbdunpackdiagonals<Precision>(a, m, m, isupper, w, e);
7947  work.setbounds(1, ap::maxint(m, n));
7948  blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7949  if( additionalmemory<1 )
7950  {
7951 
7952  //
7953  // No additional memory available
7954  //
7955  bidiagonal::rmatrixbdmultiplybyp<Precision>(a, m, m, taup, vt, m, n, false, true);
7956  result = bdsvd::rmatrixbdsvd<Precision>(w, e, m, isupper, false, a, 0, u, nru, vt, n);
7957  }
7958  else
7959  {
7960 
7961  //
7962  // Large VT. Transforming intermediate matrix T2
7963  //
7964  bidiagonal::rmatrixbdunpackpt<Precision>(a, m, m, taup, m, t2);
7965  result = bdsvd::rmatrixbdsvd<Precision>(w, e, m, isupper, false, a, 0, u, nru, t2, m);
7966  blas::copymatrix<Precision>(vt, 0, m-1, 0, n-1, a, 0, m-1, 0, n-1);
7967  blas::matrixmatrixmultiply<Precision>(t2, 0, m-1, 0, m-1, false, a, 0, m-1, 0, n-1, false, amp::ampf<Precision>("1.0"), vt, 0, m-1, 0, n-1, amp::ampf<Precision>("0.0"), work);
7968  }
7969  blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7970  return result;
7971  }
7972  }
7973 
7974  //
7975  // M<=N
7976  // We can use inplace transposition of U to get rid of columnwise operations
7977  //
7978  if( m<=n )
7979  {
7980  bidiagonal::rmatrixbd<Precision>(a, m, n, tauq, taup);
7981  bidiagonal::rmatrixbdunpackq<Precision>(a, m, n, tauq, ncu, u);
7982  bidiagonal::rmatrixbdunpackpt<Precision>(a, m, n, taup, nrvt, vt);
7983  bidiagonal::rmatrixbdunpackdiagonals<Precision>(a, m, n, isupper, w, e);
7984  work.setbounds(1, m);
7985  blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7986  result = bdsvd::rmatrixbdsvd<Precision>(w, e, minmn, isupper, false, a, 0, u, nru, vt, ncvt);
7987  blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7988  return result;
7989  }
7990 
7991  //
7992  // Simple bidiagonal reduction
7993  //
7994  bidiagonal::rmatrixbd<Precision>(a, m, n, tauq, taup);
7995  bidiagonal::rmatrixbdunpackq<Precision>(a, m, n, tauq, ncu, u);
7996  bidiagonal::rmatrixbdunpackpt<Precision>(a, m, n, taup, nrvt, vt);
7997  bidiagonal::rmatrixbdunpackdiagonals<Precision>(a, m, n, isupper, w, e);
7998  if( additionalmemory<2 || uneeded==0 )
7999  {
8000 
8001  //
8002  // We cant use additional memory or there is no need in such operations
8003  //
8004  result = bdsvd::rmatrixbdsvd<Precision>(w, e, minmn, isupper, false, u, nru, a, 0, vt, ncvt);
8005  }
8006  else
8007  {
8008 
8009  //
8010  // We can use additional memory
8011  //
8012  t2.setbounds(0, minmn-1, 0, m-1);
8013  blas::copyandtranspose<Precision>(u, 0, m-1, 0, minmn-1, t2, 0, minmn-1, 0, m-1);
8014  result = bdsvd::rmatrixbdsvd<Precision>(w, e, minmn, isupper, false, u, 0, t2, m, vt, ncvt);
8015  blas::copyandtranspose<Precision>(t2, 0, minmn-1, 0, m-1, u, 0, m-1, 0, minmn-1);
8016  }
8017  return result;
8018  }
8019 
8020 
8021  /*************************************************************************
8022  Obsolete 1-based subroutine.
8023  See RMatrixSVD for 0-based replacement.
8024  *************************************************************************/
8025  template<unsigned int Precision>
8027  int m,
8028  int n,
8029  int uneeded,
8030  int vtneeded,
8031  int additionalmemory,
8035  {
8036  bool result;
8043  bool isupper;
8044  int minmn;
8045  int ncu;
8046  int nrvt;
8047  int nru;
8048  int ncvt;
8049  int i;
8050  int j;
8051  int im1;
8053 
8054 
8055  result = true;
8056  if( m==0 || n==0 )
8057  {
8058  return result;
8059  }
8060  ap::ap_error::make_assertion(uneeded>=0 && uneeded<=2);
8061  ap::ap_error::make_assertion(vtneeded>=0 && vtneeded<=2);
8062  ap::ap_error::make_assertion(additionalmemory>=0 && additionalmemory<=2);
8063 
8064  //
8065  // initialize
8066  //
8067  minmn = ap::minint(m, n);
8068  w.setbounds(1, minmn);
8069  ncu = 0;
8070  nru = 0;
8071  if( uneeded==1 )
8072  {
8073  nru = m;
8074  ncu = minmn;
8075  u.setbounds(1, nru, 1, ncu);
8076  }
8077  if( uneeded==2 )
8078  {
8079  nru = m;
8080  ncu = m;
8081  u.setbounds(1, nru, 1, ncu);
8082  }
8083  nrvt = 0;
8084  ncvt = 0;
8085  if( vtneeded==1 )
8086  {
8087  nrvt = minmn;
8088  ncvt = n;
8089  vt.setbounds(1, nrvt, 1, ncvt);
8090  }
8091  if( vtneeded==2 )
8092  {
8093  nrvt = n;
8094  ncvt = n;
8095  vt.setbounds(1, nrvt, 1, ncvt);
8096  }
8097 
8098  //
8099  // M much larger than N
8100  // Use bidiagonal reduction with QR-decomposition
8101  //
8102  if( m>amp::ampf<Precision>("1.6")*n )
8103  {
8104  if( uneeded==0 )
8105  {
8106 
8107  //
8108  // No left singular vectors to be computed
8109  //
8110  qr::qrdecomposition<Precision>(a, m, n, tau);
8111  for(i=2; i<=n; i++)
8112  {
8113  for(j=1; j<=i-1; j++)
8114  {
8115  a(i,j) = 0;
8116  }
8117  }
8118  bidiagonal::tobidiagonal<Precision>(a, n, n, tauq, taup);
8119  bidiagonal::unpackptfrombidiagonal<Precision>(a, n, n, taup, nrvt, vt);
8120  bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a, n, n, isupper, w, e);
8121  result = bdsvd::bidiagonalsvddecomposition<Precision>(w, e, n, isupper, false, u, 0, a, 0, vt, ncvt);
8122  return result;
8123  }
8124  else
8125  {
8126 
8127  //
8128  // Left singular vectors (may be full matrix U) to be computed
8129  //
8130  qr::qrdecomposition<Precision>(a, m, n, tau);
8131  qr::unpackqfromqr<Precision>(a, m, n, tau, ncu, u);
8132  for(i=2; i<=n; i++)
8133  {
8134  for(j=1; j<=i-1; j++)
8135  {
8136  a(i,j) = 0;
8137  }
8138  }
8139  bidiagonal::tobidiagonal<Precision>(a, n, n, tauq, taup);
8140  bidiagonal::unpackptfrombidiagonal<Precision>(a, n, n, taup, nrvt, vt);
8141  bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a, n, n, isupper, w, e);
8142  if( additionalmemory<1 )
8143  {
8144 
8145  //
8146  // No additional memory can be used
8147  //
8148  bidiagonal::multiplybyqfrombidiagonal<Precision>(a, n, n, tauq, u, m, n, true, false);
8149  result = bdsvd::bidiagonalsvddecomposition<Precision>(w, e, n, isupper, false, u, m, a, 0, vt, ncvt);
8150  }
8151  else
8152  {
8153 
8154  //
8155  // Large U. Transforming intermediate matrix T2
8156  //
8157  work.setbounds(1, ap::maxint(m, n));
8158  bidiagonal::unpackqfrombidiagonal<Precision>(a, n, n, tauq, n, t2);
8159  blas::copymatrix<Precision>(u, 1, m, 1, n, a, 1, m, 1, n);
8160  blas::inplacetranspose<Precision>(t2, 1, n, 1, n, work);
8161  result = bdsvd::bidiagonalsvddecomposition<Precision>(w, e, n, isupper, false, u, 0, t2, n, vt, ncvt);
8162  blas::matrixmatrixmultiply<Precision>(a, 1, m, 1, n, false, t2, 1, n, 1, n, true, amp::ampf<Precision>("1.0"), u, 1, m, 1, n, amp::ampf<Precision>("0.0"), work);
8163  }
8164  return result;
8165  }
8166  }
8167 
8168  //
8169  // N much larger than M
8170  // Use bidiagonal reduction with LQ-decomposition
8171  //
8172  if( n>amp::ampf<Precision>("1.6")*m )
8173  {
8174  if( vtneeded==0 )
8175  {
8176 
8177  //
8178  // No right singular vectors to be computed
8179  //
8180  lq::lqdecomposition<Precision>(a, m, n, tau);
8181  for(i=1; i<=m-1; i++)
8182  {
8183  for(j=i+1; j<=m; j++)
8184  {
8185  a(i,j) = 0;
8186  }
8187  }
8188  bidiagonal::tobidiagonal<Precision>(a, m, m, tauq, taup);
8189  bidiagonal::unpackqfrombidiagonal<Precision>(a, m, m, tauq, ncu, u);
8190  bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a, m, m, isupper, w, e);
8191  work.setbounds(1, m);
8192  blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8193  result = bdsvd::bidiagonalsvddecomposition<Precision>(w, e, m, isupper, false, a, 0, u, nru, vt, 0);
8194  blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8195  return result;
8196  }
8197  else
8198  {
8199 
8200  //
8201  // Right singular vectors (may be full matrix VT) to be computed
8202  //
8203  lq::lqdecomposition<Precision>(a, m, n, tau);
8204  lq::unpackqfromlq<Precision>(a, m, n, tau, nrvt, vt);
8205  for(i=1; i<=m-1; i++)
8206  {
8207  for(j=i+1; j<=m; j++)
8208  {
8209  a(i,j) = 0;
8210  }
8211  }
8212  bidiagonal::tobidiagonal<Precision>(a, m, m, tauq, taup);
8213  bidiagonal::unpackqfrombidiagonal<Precision>(a, m, m, tauq, ncu, u);
8214  bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a, m, m, isupper, w, e);
8215  work.setbounds(1, ap::maxint(m, n));
8216  blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8217  if( additionalmemory<1 )
8218  {
8219 
8220  //
8221  // No additional memory available
8222  //
8223  bidiagonal::multiplybypfrombidiagonal<Precision>(a, m, m, taup, vt, m, n, false, true);
8224  result = bdsvd::bidiagonalsvddecomposition<Precision>(w, e, m, isupper, false, a, 0, u, nru, vt, n);
8225  }
8226  else
8227  {
8228 
8229  //
8230  // Large VT. Transforming intermediate matrix T2
8231  //
8232  bidiagonal::unpackptfrombidiagonal<Precision>(a, m, m, taup, m, t2);
8233  result = bdsvd::bidiagonalsvddecomposition<Precision>(w, e, m, isupper, false, a, 0, u, nru, t2, m);
8234  blas::copymatrix<Precision>(vt, 1, m, 1, n, a, 1, m, 1, n);
8235  blas::matrixmatrixmultiply<Precision>(t2, 1, m, 1, m, false, a, 1, m, 1, n, false, amp::ampf<Precision>("1.0"), vt, 1, m, 1, n, amp::ampf<Precision>("0.0"), work);
8236  }
8237  blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8238  return result;
8239  }
8240  }
8241 
8242  //
8243  // M<=N
8244  // We can use inplace transposition of U to get rid of columnwise operations
8245  //
8246  if( m<=n )
8247  {
8248  bidiagonal::tobidiagonal<Precision>(a, m, n, tauq, taup);
8249  bidiagonal::unpackqfrombidiagonal<Precision>(a, m, n, tauq, ncu, u);
8250  bidiagonal::unpackptfrombidiagonal<Precision>(a, m, n, taup, nrvt, vt);
8251  bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a, m, n, isupper, w, e);
8252  work.setbounds(1, m);
8253  blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8254  result = bdsvd::bidiagonalsvddecomposition<Precision>(w, e, minmn, isupper, false, a, 0, u, nru, vt, ncvt);
8255  blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8256  return result;
8257  }
8258 
8259  //
8260  // Simple bidiagonal reduction
8261  //
8262  bidiagonal::tobidiagonal<Precision>(a, m, n, tauq, taup);
8263  bidiagonal::unpackqfrombidiagonal<Precision>(a, m, n, tauq, ncu, u);
8264  bidiagonal::unpackptfrombidiagonal<Precision>(a, m, n, taup, nrvt, vt);
8265  bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a, m, n, isupper, w, e);
8266  if( additionalmemory<2 || uneeded==0 )
8267  {
8268 
8269  //
8270  // We cant use additional memory or there is no need in such operations
8271  //
8272  result = bdsvd::bidiagonalsvddecomposition<Precision>(w, e, minmn, isupper, false, u, nru, a, 0, vt, ncvt);
8273  }
8274  else
8275  {
8276 
8277  //
8278  // We can use additional memory
8279  //
8280  t2.setbounds(1, minmn, 1, m);
8281  blas::copyandtranspose<Precision>(u, 1, m, 1, minmn, t2, 1, minmn, 1, m);
8282  result = bdsvd::bidiagonalsvddecomposition<Precision>(w, e, minmn, isupper, false, u, 0, t2, m, vt, ncvt);
8283  blas::copyandtranspose<Precision>(t2, 1, minmn, 1, m, u, 1, m, 1, minmn);
8284  }
8285  return result;
8286  }
8287 } // namespace
8288 
8289 #endif
8290 #endif
8291 
blas::pythag2
amp::ampf< Precision > pythag2(amp::ampf< Precision > x, amp::ampf< Precision > y)
Definition: blas.h:478
ap::conj
const complex conj(const complex &z)
Definition: ap.cpp:117
amp::cosh
const ampf< Precision > cosh(const ampf< Precision > &x)
Definition: amp.h:1047
amp::campf::campf
campf(const campf< Prec2 > &z)
Definition: svd_si.h:2114
amp::ampf::isZero
bool isZero() const
bidiagonal::unpackptfrombidiagonal
void unpackptfrombidiagonal(const ap::template_2d_array< amp::ampf< Precision > > &qp, int m, int n, const ap::template_1d_array< amp::ampf< Precision > > &taup, int ptrows, ap::template_2d_array< amp::ampf< Precision > > &pt)
Definition: bidiagonal.h:1196
ap::template_2d_array::getlowbound
int getlowbound(int iBoundNum) const
Definition: svd_si.h:929
omalloc.h
exponent
int exponent(const CanonicalForm &f, int q)
int exponent ( const CanonicalForm & f, int q )
Definition: gengftables-conway.cc:92
ap::template_2d_array::getcontent
T * getcontent()
Definition: svd_si.h:919
ap::machineepsilon
const double machineepsilon
Definition: svd_si.h:994
ap::operator+
const complex operator+(const complex &lhs)
Definition: ap.cpp:17
ap::template_2d_array::template_2d_array
template_2d_array(const template_2d_array &rhs)
Definition: svd_si.h:829
amp::ampf::ampf
ampf(const char *s)
Definition: svd_si.h:1130
ap::operator/
const complex operator/(const complex &lhs, const complex &rhs)
Definition: ap.cpp:50
rotations
Definition: rotations.h:45
ap::template_2d_array::operator()
T & operator()(int i1, int i2)
Definition: svd_si.h:889
rotations::generaterotation
void generaterotation(amp::ampf< Precision > f, amp::ampf< Precision > g, amp::ampf< Precision > &cs, amp::ampf< Precision > &sn, amp::ampf< Precision > &r)
Definition: rotations.h:355
reflections
Definition: reflections.h:45
ap::template_2d_array::template_2d_array
template_2d_array()
Definition: svd_si.h:817
ap::vmove
void vmove(raw_vector< T > vdst, const_raw_vector< T > vsrc)
Definition: ap.h:237
amp::ampf::InitializeAsDouble
void InitializeAsDouble(long double v)
Definition: amp.h:276
ap::complex::operator+=
complex & operator+=(const complex &z)
Definition: svd_si.h:82
j
int j
Definition: facHensel.cc:105
f
FILE * f
Definition: checklibs.c:9
ap::operator==
const bool operator==(const complex &lhs, const complex &rhs)
Definition: ap.cpp:11
amp::operator*=
campf< Precision > & operator*=(campf< Precision > &lhs, const campf< Precision > &rhs)
Definition: amp.h:1165
amp::ampf::rval
mpfr_record * rval
Definition: amp.h:235
ap::round
int round(double x)
Definition: ap.cpp:144
blas::vectornorm2
amp::ampf< Precision > vectornorm2(const ap::template_1d_array< amp::ampf< Precision > > &x, int i1, int i2)
Definition: blas.h:136
bdsvd::svd2x2
void svd2x2(amp::ampf< Precision > f, amp::ampf< Precision > g, amp::ampf< Precision > h, amp::ampf< Precision > &ssmin, amp::ampf< Precision > &ssmax)
Definition: bdsvd.h:1115
amp::ampf::toHex
std::string toHex() const
k
int k
Definition: cfEzgcd.cc:92
amp::operator+
const ampf< Precision > operator+(const ampf< Precision > &op1)
Definition: amp.h:644
amp::vMove
void vMove(ap::raw_vector< ampf< Precision > > vDst, ap::const_raw_vector< ampf< Precision > > vSrc)
Definition: amp.h:1328
blas::copyandtranspose
void copyandtranspose(const ap::template_2d_array< amp::ampf< Precision > > &a, int is1, int is2, int js1, int js2, ap::template_2d_array< amp::ampf< Precision > > &b, int id1, int id2, int jd1, int jd2)
Definition: blas.h:351
amp::sign
const int sign(const ampf< Precision > &x)
Definition: amp.h:703
amp::ampf::ampf
ampf(signed long v)
Definition: svd_si.h:1116
amp::ampf::getAlgoPascalEpsilon
static const ampf getAlgoPascalEpsilon()
Definition: amp.h:566
amp::ampf::getRandom
static const ampf getRandom()
amp::vMul
void vMul(ap::raw_vector< ampf< Precision > > vDst, T2 alpha)
Definition: amp.h:1439
amp::abscomplex
const ampf< Precision > abscomplex(const campf< Precision > &z)
Definition: amp.h:1208
x
Variable x
Definition: cfModGcd.cc:4023
ap::template_2d_array::getcontent
const T * getcontent() const
Definition: svd_si.h:924
y
const CanonicalForm int const CFList const Variable & y
Definition: facAbsFact.cc:57
amp::trunc
const signed long trunc(const ampf< Precision > &x)
Definition: amp.h:750
lq::unpackqfromlq
void unpackqfromlq(const ap::template_2d_array< amp::ampf< Precision > > &a, int m, int n, const ap::template_1d_array< amp::ampf< Precision > > &tau, int qrows, ap::template_2d_array< amp::ampf< Precision > > &q)
Definition: lq.h:342
ap::complex::operator*=
complex & operator*=(const complex &z)
Definition: svd_si.h:84
bidiagonal::rmatrixbdunpackq
void rmatrixbdunpackq(const ap::template_2d_array< amp::ampf< Precision > > &qp, int m, int n, const ap::template_1d_array< amp::ampf< Precision > > &tauq, int qcolumns, ap::template_2d_array< amp::ampf< Precision > > &q)
Definition: bidiagonal.h:348
ap::vadd
void vadd(raw_vector< T > vdst, const_raw_vector< T > vsrc)
Definition: ap.h:413
ap::template_1d_array::getvector
raw_vector< T > getvector(int iStart, int iEnd)
Definition: svd_si.h:784
amp::ampf::CheckPrecision
void CheckPrecision()
Definition: amp.h:245
result
return result
Definition: facAbsBiFact.cc:76
amp::frac
const ampf< Precision > frac(const ampf< Precision > &x)
Definition: amp.h:765
ap::template_1d_array::getcontent
T * getcontent()
Definition: svd_si.h:762
lq
Definition: lq.h:40
ap::template_2d_array::m_Vec
T * m_Vec
Definition: ap.h:964
ap::template_1d_array
Definition: ap.h:657
amp::ampf::getMinNumber
static const ampf getMinNumber()
ap::template_2d_array
Definition: ap.h:807
ap::template_2d_array::m_iLinearMember
long m_iLinearMember
Definition: ap.h:969
amp::mpfr_reference::initialize
void initialize(int Precision)
ap::template_2d_array::gethighbound
int gethighbound(int iBoundNum) const
Definition: svd_si.h:934
amp::mpfr_storage::deleteMpfr
static void deleteMpfr(mpfr_record *ref)
blas::matrixmatrixmultiply
void matrixmatrixmultiply(const ap::template_2d_array< amp::ampf< Precision > > &a, int ai1, int ai2, int aj1, int aj2, bool transa, const ap::template_2d_array< amp::ampf< Precision > > &b, int bi1, int bi2, int bj1, int bj2, bool transb, amp::ampf< Precision > alpha, ap::template_2d_array< amp::ampf< Precision > > &c, int ci1, int ci2, int cj1, int cj2, amp::ampf< Precision > beta, ap::template_1d_array< amp::ampf< Precision > > &work)
Definition: blas.h:505
amp::abs
const ampf< Precision > abs(const ampf< Precision > &x)
Definition: amp.h:714
amp::exp
const ampf< Precision > exp(const ampf< Precision > &x)
Definition: amp.h:1031
amp::sqrt
const ampf< Precision > sqrt(const ampf< Precision > &x)
Definition: amp.h:741
ap::const_raw_vector::const_raw_vector
const_raw_vector(const T *Data, int Length, int Step)
Definition: svd_si.h:147
amp::sinh
const ampf< Precision > sinh(const ampf< Precision > &x)
Definition: amp.h:1039
ap::abscomplex
const double abscomplex(const complex &z)
Definition: ap.cpp:97
lq::lqdecomposition
void lqdecomposition(ap::template_2d_array< amp::ampf< Precision > > &a, int m, int n, ap::template_1d_array< amp::ampf< Precision > > &tau)
Definition: lq.h:288
amp::ampf::ampf
ampf(unsigned short v)
Definition: svd_si.h:1121
amp::ampf::operator/=
ampf & operator/=(const T &v)
Definition: svd_si.h:1199
amp::ampf::getRandom
static const ampf getRandom()
Definition: amp.h:594
ap::template_2d_array::wrongColumn
bool wrongColumn(int j) const
Definition: ap.h:964
amp::campf::campf
campf(signed char v)
Definition: svd_si.h:2107
amp::ampf::ampf
ampf(signed int v)
Definition: svd_si.h:1118
amp::operator-
const ampf< Precision > operator-(const ampf< Precision > &op1)
Definition: amp.h:650
amp::ampf::ampf
ampf(const ampf &r)
Definition: svd_si.h:1135
ap::complex::complex
complex(const double &_x)
Definition: svd_si.h:71
amp::campf::operator=
campf & operator=(long double v)
Definition: amp.h:1097
amp::tanh
const ampf< Precision > tanh(const ampf< Precision > &x)
Definition: amp.h:1055
amp::vMoveNeg
void vMoveNeg(ap::raw_vector< ampf< Precision > > vDst, ap::const_raw_vector< ampf< Precision > > vSrc)
Definition: amp.h:1345
qr::unpackqfromqr
void unpackqfromqr(const ap::template_2d_array< amp::ampf< Precision > > &a, int m, int n, const ap::template_1d_array< amp::ampf< Precision > > &tau, int qcolumns, ap::template_2d_array< amp::ampf< Precision > > &q)
Definition: qr.h:354
ap::template_1d_array::getlowbound
int getlowbound(int iBoundNum=0) const
Definition: svd_si.h:773
amp::atan
const ampf< Precision > atan(const ampf< Precision > &x)
Definition: amp.h:991
amp::ampf::getReadPtr
mpfr_srcptr getReadPtr() const
Definition: amp.h:292
blas
Definition: blas.h:39
amp::ampf
Definition: amp.h:83
ap::template_1d_array::gethighbound
int gethighbound(int iBoundNum=0) const
Definition: svd_si.h:779
ap::template_2d_array::m_iConstOffset
long m_iConstOffset
Definition: ap.h:969
ap::template_1d_array::getvector
const_raw_vector< T > getvector(int iStart, int iEnd) const
Definition: svd_si.h:793
ap::maxreal
double maxreal(double m1, double m2)
Definition: ap.cpp:172
svd::svddecomposition
bool svddecomposition(ap::template_2d_array< amp::ampf< Precision > > a, int m, int n, int uneeded, int vtneeded, int additionalmemory, ap::template_1d_array< amp::ampf< Precision > > &w, ap::template_2d_array< amp::ampf< Precision > > &u, ap::template_2d_array< amp::ampf< Precision > > &vt)
Definition: svd.h:408
reflections::applyreflectionfromtheleft
void applyreflectionfromtheleft(ap::template_2d_array< amp::ampf< Precision > > &c, amp::ampf< Precision > tau, const ap::template_1d_array< amp::ampf< Precision > > &v, int m1, int m2, int n1, int n2, ap::template_1d_array< amp::ampf< Precision > > &work)
Definition: reflections.h:207
amp::ampf::getUlpOf
static const ampf getUlpOf(const ampf &x)
ap::template_2d_array::m_iLow1
long m_iLow1
Definition: ap.h:968
ap::template_1d_array::operator=
const template_1d_array & operator=(const template_1d_array &rhs)
Definition: svd_si.h:699
ap::complex::operator=
complex & operator=(const double &v)
Definition: ap.h:67
amp::ampf::getUlpOf
const ampf getUlpOf()
g
g
Definition: cfModGcd.cc:4031
ap::complex::complex
complex(const double &_x, const double &_y)
Definition: svd_si.h:72
amp::campf::campf
campf(float v)
Definition: svd_si.h:2100
ap::template_2d_array::setbounds
void setbounds(int iLow1, int iHigh1, int iLow2, int iHigh2)
Definition: svd_si.h:898
bidiagonal::tobidiagonal
void tobidiagonal(ap::template_2d_array< amp::ampf< Precision > > &a, int m, int n, ap::template_1d_array< amp::ampf< Precision > > &tauq, ap::template_1d_array< amp::ampf< Precision > > &taup)
Definition: bidiagonal.h:850
qr::rmatrixqrunpackr
void rmatrixqrunpackr(const ap::template_2d_array< amp::ampf< Precision > > &a, int m, int n, ap::template_2d_array< amp::ampf< Precision > > &r)
Definition: qr.h:269
ap::raw_vector
Definition: ap.h:168
ap::complex::y
double y
Definition: ap.h:100
ap::randomreal
double randomreal()
Definition: ap.cpp:133
amp::tan
const ampf< Precision > tan(const ampf< Precision > &x)
Definition: amp.h:967
ap::operator-
const complex operator-(const complex &lhs)
Definition: ap.cpp:20
amp::floor
const signed long floor(const ampf< Precision > &x)
Definition: amp.h:774
ap::template_1d_array::m_iVecSize
long m_iVecSize
Definition: ap.h:796
amp::conj
const campf< Precision > conj(const campf< Precision > &z)
Definition: amp.h:1226
ap::template_2d_array::m_iHigh2
long m_iHigh2
Definition: ap.h:968
amp::ampf::isFiniteNumber
bool isFiniteNumber() const
ap::complex::operator-=
complex & operator-=(const double &v)
Definition: svd_si.h:77
ap::minrealnumber
const double minrealnumber
Definition: svd_si.h:996
iter
CFFListIterator iter
Definition: facAbsBiFact.cc:54
amp::campf::y
ampf< Precision > y
Definition: amp.h:1126
amp::campf::campf
campf(unsigned int v)
Definition: svd_si.h:2104
bdsvd
Definition: bdsvd.h:46
amp::ampf::getUlp512
static const ampf getUlp512()
amp::mpfr_storage::getRandState
static gmp_randstate_t * getRandState()
Definition: amp.cpp:37
amp::operator/=
campf< Precision > & operator/=(campf< Precision > &lhs, const campf< Precision > &rhs)
Definition: amp.h:1201
amp::ampf::operator*=
ampf & operator*=(const T &v)
Definition: svd_si.h:1198
amp::mpfr_reference::mpfr_reference
mpfr_reference(const mpfr_reference &r)
amp::ampf::operator+=
ampf & operator+=(const T &v)
Definition: svd_si.h:1196
amp::ampf::operator=
ampf & operator=(long double v)
Definition: amp.h:141
w
const CanonicalForm & w
Definition: facAbsFact.cc:55
amp::ampf::getUlp
static const ampf getUlp()
Definition: amp.h:512
b
CanonicalForm b
Definition: cfModGcd.cc:4044
ap::sqr
double sqr(double x)
Definition: ap.cpp:159
amp::ampf::ampf
ampf(long double v)
Definition: svd_si.h:1113
amp::ampf::getAlgoPascalEpsilon
static const ampf getAlgoPascalEpsilon()
ap
Definition: ap.h:40
ap::template_2d_array::getrow
const_raw_vector< T > getrow(int iRow, int iColumnStart, int iColumnEnd) const
Definition: svd_si.h:963
amp::operator==
const bool operator==(const ampf< Precision > &op1, const ampf< Precision > &op2)
Definition: amp.h:605
amp::ampf::~ampf
~ampf()
Definition: svd_si.h:1100
bdsvd::bidiagonalsvddecompositioninternal
bool bidiagonalsvddecompositioninternal(ap::template_1d_array< amp::ampf< Precision > > &d, ap::template_1d_array< amp::ampf< Precision > > e, int n, bool isupper, bool isfractionalaccuracyrequired, ap::template_2d_array< amp::ampf< Precision > > &u, int ustart, int nru, ap::template_2d_array< amp::ampf< Precision > > &c, int cstart, int ncc, ap::template_2d_array< amp::ampf< Precision > > &vt, int vstart, int ncvt)
Definition: bdsvd.h:253
amp::log2
const ampf< Precision > log2(const ampf< Precision > &x)
Definition: amp.h:1015
amp::ampf::getAlgoPascalMaxNumber
static const ampf getAlgoPascalMaxNumber()
Definition: amp.h:572
amp::campf
Definition: amp.h:1075
amp::ampf::toDec
std::string toDec() const
mu
void mu(int **points, int sizePoints)
Definition: cfNewtonPolygon.cc:467
amp::campf::campf
campf(const ampf< Precision > &_x)
Definition: svd_si.h:2109
amp::ceil
const signed long ceil(const ampf< Precision > &x)
Definition: amp.h:789
ap::vmul
void vmul(raw_vector< T > vdst, T2 alpha)
Definition: ap.h:603
amp::ampf::ampf
ampf(mpfr_record *v)
Definition: svd_si.h:1111
amp::operator!=
const bool operator!=(const ampf< Precision > &op1, const ampf< Precision > &op2)
Definition: amp.h:611
ap::const_raw_vector::iStep
int iStep
Definition: ap.h:152
blas::copymatrix
void copymatrix(const ap::template_2d_array< amp::ampf< Precision > > &a, int is1, int is2, int js1, int js2, ap::template_2d_array< amp::ampf< Precision > > &b, int id1, int id2, int jd1, int jd2)
Definition: blas.h:288
ap::operator*
const complex operator*(const complex &lhs, const complex &rhs)
Definition: ap.cpp:41
ap::operator!=
const bool operator!=(const complex &lhs, const complex &rhs)
Definition: ap.cpp:14
lq::rmatrixlqunpackl
void rmatrixlqunpackl(const ap::template_2d_array< amp::ampf< Precision > > &a, int m, int n, ap::template_2d_array< amp::ampf< Precision > > &l)
Definition: lq.h:253
amp::vAdd
void vAdd(ap::raw_vector< ampf< Precision > > vDst, ap::const_raw_vector< ampf< Precision > > vSrc)
Definition: amp.h:1380
pi
#define pi
Definition: libparse.cc:1143
amp::acos
const ampf< Precision > acos(const ampf< Precision > &x)
Definition: amp.h:983
ap::iceil
int iceil(double x)
Definition: ap.cpp:153
reflections::generatereflection
void generatereflection(ap::template_1d_array< amp::ampf< Precision > > &x, int n, amp::ampf< Precision > &tau)
Definition: reflections.h:112
ap::template_2d_array::getcolumn
raw_vector< T > getcolumn(int iColumn, int iRowStart, int iRowEnd)
Definition: svd_si.h:939
amp::campf::campf
campf(signed int v)
Definition: svd_si.h:2103
amp::mpfr_reference::mpfr_reference
mpfr_reference()
amp::mpfr_record::refCount
unsigned int refCount
Definition: amp.h:36
ap::complex::operator*=
complex & operator*=(const double &v)
Definition: svd_si.h:78
ap::real_2d_array
template_2d_array< double > real_2d_array
Definition: ap.h:978
amp::pow
const ampf< Precision > pow(const ampf< Precision > &x, const ampf< Precision > &y)
Definition: amp.h:1063
ap::minreal
double minreal(double m1, double m2)
Definition: ap.cpp:177
rotations::applyrotationsfromtheright
void applyrotationsfromtheright(bool isforward, int m1, int m2, int n1, int n2, const ap::template_1d_array< amp::ampf< Precision > > &c, const ap::template_1d_array< amp::ampf< Precision > > &s, ap::template_2d_array< amp::ampf< Precision > > &a, ap::template_1d_array< amp::ampf< Precision > > &work)
Definition: rotations.h:238
i
int i
Definition: cfEzgcd.cc:125
ap::complex::operator+=
complex & operator+=(const double &v)
Definition: svd_si.h:76
amp::log
const ampf< Precision > log(const ampf< Precision > &x)
Definition: amp.h:1007
amp::halfpi
const ampf< Precision > halfpi()
Definition: amp.h:933
amp::campf::campf
campf(const ampf< Precision > &_x, const ampf< Precision > &_y)
Definition: svd_si.h:2110
res
CanonicalForm res
Definition: facAbsFact.cc:64
bidiagonal::multiplybyqfrombidiagonal
void multiplybyqfrombidiagonal(const ap::template_2d_array< amp::ampf< Precision > > &qp, int m, int n, const ap::template_1d_array< amp::ampf< Precision > > &tauq, ap::template_2d_array< amp::ampf< Precision > > &z, int zrows, int zcolumns, bool fromtheright, bool dotranspose)
Definition: bidiagonal.h:1055
bidiagonal::multiplybypfrombidiagonal
void multiplybypfrombidiagonal(const ap::template_2d_array< amp::ampf< Precision > > &qp, int m, int n, const ap::template_1d_array< amp::ampf< Precision > > &taup, ap::template_2d_array< amp::ampf< Precision > > &z, int zrows, int zcolumns, bool fromtheright, bool dotranspose)
Definition: bidiagonal.h:1270
blas::rowidxabsmax
int rowidxabsmax(const ap::template_2d_array< amp::ampf< Precision > > &x, int j1, int j2, int i)
Definition: blas.h:230
amp::mpfr_record::next
mpfr_record * next
Definition: amp.h:39
amp::ampf::toString
char * toString() const
Definition: amp.h:446
amp::ampf::getWritePtr
mpfr_ptr getWritePtr()
Definition: amp.h:303
amp::ampf::ampf
ampf(unsigned int v)
Definition: svd_si.h:1119
ap::complex::operator-=
complex & operator-=(const complex &z)
Definition: svd_si.h:83
amp::mpfr_storage::getList
static mpfr_record_ptr & getList(unsigned int Precision)
ap::real_1d_array
template_1d_array< double > real_1d_array
Definition: ap.h:974
bidiagonal::rmatrixbdunpackdiagonals
void rmatrixbdunpackdiagonals(const ap::template_2d_array< amp::ampf< Precision > > &b, int m, int n, bool &isupper, ap::template_1d_array< amp::ampf< Precision > > &d, ap::template_1d_array< amp::ampf< Precision > > &e)
Definition: bidiagonal.h:805
alpha
Variable alpha
Definition: facAbsBiFact.cc:52
amp::ampf::getMinNumber
static const ampf getMinNumber()
Definition: amp.h:558
ap::template_2d_array::wrongRow
bool wrongRow(int i) const
Definition: ap.h:963
svd
Definition: svd.h:64
T
static jList * T
Definition: janet.cc:31
amp::ldexp2
const ampf< Precision > ldexp2(const ampf< Precision > &x, mp_exp_t exponent)
Definition: amp.h:838
ap::template_2d_array::m_iLow2
long m_iLow2
Definition: ap.h:968
ap::template_1d_array::operator()
T & operator()(int i)
Definition: svd_si.h:734
ap::boolean_1d_array
template_1d_array< bool > boolean_1d_array
Definition: ap.h:976
amp::mpfr_reference::free
void free()
bidiagonal::rmatrixbdmultiplybyp
void rmatrixbdmultiplybyp(const ap::template_2d_array< amp::ampf< Precision > > &qp, int m, int n, const ap::template_1d_array< amp::ampf< Precision > > &taup, ap::template_2d_array< amp::ampf< Precision > > &z, int zrows, int zcolumns, bool fromtheright, bool dotranspose)
Definition: bidiagonal.h:649
h
static Poly * h
Definition: janet.cc:972
amp::sqr
const ampf< Precision > sqr(const ampf< Precision > &x)
Definition: amp.h:694
ap::complex
Definition: ap.h:60
ap::template_1d_array::m_iHigh
long m_iHigh
Definition: ap.h:797
blas::vectoridxabsmax
int vectoridxabsmax(const ap::template_1d_array< amp::ampf< Precision > > &x, int i1, int i2)
Definition: blas.h:183
mod2.h
amp::operator/
const ampf< Precision > operator/(const ampf< Precision > &op1, const ampf< Precision > &op2)
Definition: amp.h:683
ap::template_2d_array::m_iVecSize
long m_iVecSize
Definition: ap.h:967
feFopen.h
ap::raw_vector::raw_vector
raw_vector(T *Data, int Length, int Step)
Definition: svd_si.h:178
ap::boolean_2d_array
template_2d_array< bool > boolean_2d_array
Definition: ap.h:980
T2
void T2(ideal h)
Definition: cohomo.cc:2754
fa
BOOLEAN fa(leftv res, leftv args)
Definition: cohomo.cc:3007
amp::campf::campf
campf(const campf &z)
Definition: svd_si.h:2111
ap::const_raw_vector::GetLength
int GetLength() const
Definition: svd_si.h:153
amp::operator<
const bool operator<(const ampf< Precision > &op1, const ampf< Precision > &op2)
Definition: amp.h:617
amp::sin
const ampf< Precision > sin(const ampf< Precision > &x)
Definition: amp.h:951
omAlloc
#define omAlloc(size)
Definition: omAllocDecl.h:210
ap::template_2d_array::getrow
raw_vector< T > getrow(int iRow, int iColumnStart, int iColumnEnd)
Definition: svd_si.h:947
ap::sign
int sign(double x)
Definition: ap.cpp:126
ap::vsub
void vsub(raw_vector< T > vdst, const_raw_vector< T > vsrc)
Definition: ap.h:533
bidiagonal::unpackqfrombidiagonal
void unpackqfrombidiagonal(const ap::template_2d_array< amp::ampf< Precision > > &qp, int m, int n, const ap::template_1d_array< amp::ampf< Precision > > &tauq, int qcolumns, ap::template_2d_array< amp::ampf< Precision > > &q)
Definition: bidiagonal.h:981
amp::mpfr_reference::ref
mpfr_record * ref
Definition: amp.h:75
bidiagonal::rmatrixbdunpackpt
void rmatrixbdunpackpt(const ap::template_2d_array< amp::ampf< Precision > > &qp, int m, int n, const ap::template_1d_array< amp::ampf< Precision > > &taup, int ptrows, ap::template_2d_array< amp::ampf< Precision > > &pt)
Definition: bidiagonal.h:575
blas::columnidxabsmax
int columnidxabsmax(const ap::template_2d_array< amp::ampf< Precision > > &x, int i1, int i2, int j)
Definition: blas.h:206
amp::mpfr_storage::newMpfr
static mpfr_record * newMpfr(unsigned int Precision)
ap::const_raw_vector::iLength
int iLength
Definition: ap.h:152
amp::mpfr_reference::getWritePtr
mpfr_ptr getWritePtr()
amp::ampf::ampf
ampf(signed short v)
Definition: svd_si.h:1120
amp::mpfr_record_ptr
mpfr_record * mpfr_record_ptr
Definition: amp.h:42
ap::ifloor
int ifloor(double x)
Definition: ap.cpp:150
amp::ampf::ampf
ampf(const ampf< Precision2 > &r)
Definition: svd_si.h:1142
amp::mpfr_reference::~mpfr_reference
~mpfr_reference()
bdsvd::bidiagonalsvddecomposition
bool bidiagonalsvddecomposition(ap::template_1d_array< amp::ampf< Precision > > &d, ap::template_1d_array< amp::ampf< Precision > > e, int n, bool isupper, bool isfractionalaccuracyrequired, ap::template_2d_array< amp::ampf< Precision > > &u, int nru, ap::template_2d_array< amp::ampf< Precision > > &c, int ncc, ap::template_2d_array< amp::ampf< Precision > > &vt, int ncvt)
Definition: bdsvd.h:229
ap::template_2d_array::~template_2d_array
~template_2d_array()
Definition: svd_si.h:823
amp::twopi
const ampf< Precision > twopi()
Definition: amp.h:942
bdsvd::svdv2x2
void svdv2x2(amp::ampf< Precision > f, amp::ampf< Precision > g, amp::ampf< Precision > h, amp::ampf< Precision > &ssmin, amp::ampf< Precision > &ssmax, amp::ampf< Precision > &snr, amp::ampf< Precision > &csr, amp::ampf< Precision > &snl, amp::ampf< Precision > &csl)
Definition: bdsvd.h:1189
ap::complex::x
double x
Definition: ap.h:98
amp::mpfr_reference::operator=
mpfr_reference & operator=(const mpfr_reference &r)
Definition: amp.cpp:94
ap::integer_2d_array
template_2d_array< int > integer_2d_array
Definition: ap.h:977
amp::campf::campf
campf(long double v)
Definition: svd_si.h:2098
amp::operator+=
campf< Precision > & operator+=(campf< Precision > &lhs, const campf< Precision > &rhs)
Definition: amp.h:1145
amp::ampf::ampf
ampf(float v)
Definition: svd_si.h:1115
bidiagonal::rmatrixbd
void rmatrixbd(ap::template_2d_array< amp::ampf< Precision > > &a, int m, int n, ap::template_1d_array< amp::ampf< Precision > > &tauq, ap::template_1d_array< amp::ampf< Precision > > &taup)
Definition: bidiagonal.h:194
beta
Variable beta
Definition: facAbsFact.cc:99
ap::minint
int minint(int m1, int m2)
Definition: ap.cpp:167
amp::mpfr_reference
Definition: amp.h:62
amp::ampf::ampf
ampf(unsigned char v)
Definition: svd_si.h:1123
ap::ap_error::make_assertion
static void make_assertion(bool bClause)
Definition: svd_si.h:57
amp::ampf::getAlgoPascalMinNumber
static const ampf getAlgoPascalMinNumber()
amp::ampf::isNegativeNumber
bool isNegativeNumber() const
amp::mpfr_storage::getRandState
static gmp_randstate_t * getRandState()
reflections::applyreflectionfromtheright
void applyreflectionfromtheright(ap::template_2d_array< amp::ampf< Precision > > &c, amp::ampf< Precision > tau, const ap::template_1d_array< amp::ampf< Precision > > &v, int m1, int m2, int n1, int n2, ap::template_1d_array< amp::ampf< Precision > > &work)
Definition: reflections.h:280
amp::minimum
const ampf< Precision > minimum(const ampf< Precision > &x, const ampf< Precision > &y)
Definition: amp.h:732
amp::ampf::getUlp
static const ampf getUlp()
ap::pi
double pi()
Definition: ap.cpp:156
amp::ampf::getMaxNumber
static const ampf getMaxNumber()
amp::ampf::getUlp256
static const ampf getUlp256()
svd::rmatrixsvd
bool rmatrixsvd(ap::template_2d_array< amp::ampf< Precision > > a, int m, int n, int uneeded, int vtneeded, int additionalmemory, ap::template_1d_array< amp::ampf< Precision > > &w, ap::template_2d_array< amp::ampf< Precision > > &u, ap::template_2d_array< amp::ampf< Precision > > &vt)
Definition: svd.h:140
amp::ampf::InitializeAsString
void InitializeAsString(const char *s)
Definition: amp.h:284
amp::ampf::getUlpOf
const ampf getUlpOf()
Definition: amp.h:343
amp::ampf::isPositiveNumber
bool isPositiveNumber() const
qr::qrdecompositionunpacked
void qrdecompositionunpacked(ap::template_2d_array< amp::ampf< Precision > > a, int m, int n, ap::template_2d_array< amp::ampf< Precision > > &q, ap::template_2d_array< amp::ampf< Precision > > &r)
Definition: qr.h:420
bidiagonal::unpackdiagonalsfrombidiagonal
void unpackdiagonalsfrombidiagonal(const ap::template_2d_array< amp::ampf< Precision > > &b, int m, int n, bool &isupper, ap::template_1d_array< amp::ampf< Precision > > &d, ap::template_1d_array< amp::ampf< Precision > > &e)
Definition: bidiagonal.h:1413
amp::campf::campf
campf(signed short v)
Definition: svd_si.h:2105
amp::ampf::InitializeAsSLong
void InitializeAsSLong(signed long v)
Definition: amp.h:260
amp::atan2
const ampf< Precision > atan2(const ampf< Precision > &y, const ampf< Precision > &x)
Definition: amp.h:999
amp::ampf::ampf
ampf(const std::string &s)
Definition: svd_si.h:1129
amp::__AMP_BINARY_OPI
__AMP_BINARY_OPI(char) __AMP_BINARY_OPI(short) __AMP_BINARY_OPI(long) __AMP_BINARY_OPI(int) __AMP_BINARY_OPF(float) __AMP_BINARY_OPF(double) __AMP_BINARY_OPF(long double) template< unsigned int Precision > const ampf< Precision > pi()
Definition: amp.h:890
tau
void tau(int **points, int sizePoints, int k)
Definition: cfNewtonPolygon.cc:461
ap::complex::complex
complex()
Definition: svd_si.h:70
amp::campf::campf
campf()
Definition: svd_si.h:2097
amp::ampf::ampf
ampf(double v)
Definition: svd_si.h:1114
amp::ampf::operator-=
ampf & operator-=(const T &v)
Definition: svd_si.h:1197
amp::maximum
const ampf< Precision > maximum(const ampf< Precision > &x, const ampf< Precision > &y)
Definition: amp.h:723
amp::mpfr_record::Precision
unsigned int Precision
Definition: amp.h:37
amp::ampf::getMaxNumber
static const ampf getMaxNumber()
Definition: amp.h:549
blas::inplacetranspose
void inplacetranspose(ap::template_2d_array< amp::ampf< Precision > > &a, int i1, int i2, int j1, int j2, ap::template_1d_array< amp::ampf< Precision > > &work)
Definition: blas.h:318
__AMP_BINARY_OPF
#define __AMP_BINARY_OPF(type)
lq::rmatrixlq
void rmatrixlq(ap::template_2d_array< amp::ampf< Precision > > &a, int m, int n, ap::template_1d_array< amp::ampf< Precision > > &tau)
Definition: lq.h:113
amp::operator<=
const bool operator<=(const ampf< Precision > &op1, const ampf< Precision > &op2)
Definition: amp.h:629
bdsvd::rmatrixbdsvd
bool rmatrixbdsvd(ap::template_1d_array< amp::ampf< Precision > > &d, ap::template_1d_array< amp::ampf< Precision > > e, int n, bool isupper, bool isfractionalaccuracyrequired, ap::template_2d_array< amp::ampf< Precision > > &u, int nru, ap::template_2d_array< amp::ampf< Precision > > &c, int ncc, ap::template_2d_array< amp::ampf< Precision > > &vt, int ncvt)
Definition: bdsvd.h:186
bidiagonal
Definition: bidiagonal.h:46
ap::maxint
int maxint(int m1, int m2)
Definition: ap.cpp:162
ap::maxrealnumber
const double maxrealnumber
Definition: svd_si.h:995
ap::csqr
const complex csqr(const complex &z)
Definition: ap.cpp:120
amp::frexp2
const ampf< Precision > frexp2(const ampf< Precision > &x, mp_exp_t *exponent)
Definition: amp.h:819
amp::log10
const ampf< Precision > log10(const ampf< Precision > &x)
Definition: amp.h:1023
amp::operator>=
const bool operator>=(const ampf< Precision > &op1, const ampf< Precision > &op2)
Definition: amp.h:635
amp::asin
const ampf< Precision > asin(const ampf< Precision > &x)
Definition: amp.h:975
WerrorS
void WerrorS(const char *s)
Definition: feFopen.cc:24
blas::upperhessenberg1norm
amp::ampf< Precision > upperhessenberg1norm(const ap::template_2d_array< amp::ampf< Precision > > &a, int i1, int i2, int j1, int j2, ap::template_1d_array< amp::ampf< Precision > > &work)
Definition: blas.h:254
m
int m
Definition: cfEzgcd.cc:121
ap::complex_2d_array
template_2d_array< complex > complex_2d_array
Definition: ap.h:979
ap::trunc
int trunc(double x)
Definition: ap.cpp:147
ap::template_1d_array::wrongIdx
bool wrongIdx(int i) const
Definition: ap.h:793
qr::rmatrixqr
void rmatrixqr(ap::template_2d_array< amp::ampf< Precision > > &a, int m, int n, ap::template_1d_array< amp::ampf< Precision > > &tau)
Definition: qr.h:123
ap::template_2d_array::operator()
const T & operator()(int i1, int i2) const
Definition: svd_si.h:880
bdsvd::extsignbdsqr
amp::ampf< Precision > extsignbdsqr(amp::ampf< Precision > a, amp::ampf< Precision > b)
Definition: bdsvd.h:1096
NULL
#define NULL
Definition: omList.c:10
string
#define string
Definition: libparse.cc:1250
ap::template_1d_array::setbounds
void setbounds(int iLow, int iHigh)
Definition: svd_si.h:743
qr::qrdecomposition
void qrdecomposition(ap::template_2d_array< amp::ampf< Precision > > &a, int m, int n, ap::template_1d_array< amp::ampf< Precision > > &tau)
Definition: qr.h:303
ap::const_raw_vector::GetStep
int GetStep() const
Definition: svd_si.h:156
ap::vmoveneg
void vmoveneg(raw_vector< T > vdst, const_raw_vector< T > vsrc)
Definition: ap.h:295
amp::mpfr_record
Definition: amp.h:35
amp::vSub
void vSub(ap::raw_vector< ampf< Precision > > vDst, ap::const_raw_vector< ampf< Precision > > vSrc)
Definition: amp.h:1416
l
int l
Definition: cfEzgcd.cc:93
amp::ampf::getUlp512
static const ampf getUlp512()
Definition: amp.h:535
ap::vdotproduct
T vdotproduct(const_raw_vector< T > v1, const_raw_vector< T > v2)
Definition: ap.h:181
ap::template_1d_array::m_iLow
long m_iLow
Definition: ap.h:797
ap::randominteger
int randominteger(int maxv)
Definition: ap.cpp:141
amp::campf::x
ampf< Precision > x
Definition: amp.h:1126
ap::template_1d_array::getcontent
const T * getcontent() const
Definition: svd_si.h:767
amp::ampf::ampf
ampf(unsigned long v)
Definition: svd_si.h:1117
amp::campf::campf
campf(signed long v)
Definition: svd_si.h:2101
amp::unsigned32
unsigned long unsigned32
Definition: amp.h:31
v
const Variable & v
< [in] a sqrfree bivariate poly
Definition: facBivar.h:37
ap::template_2d_array::operator=
const template_2d_array & operator=(const template_2d_array &rhs)
Definition: svd_si.h:851
amp::mpfr_storage::newMpfr
static mpfr_record * newMpfr(unsigned int Precision)
Definition: amp.cpp:11
amp::ampf::ampf
ampf()
Definition: svd_si.h:1110
qr::rmatrixqrunpackq
void rmatrixqrunpackq(const ap::template_2d_array< amp::ampf< Precision > > &a, int m, int n, const ap::template_1d_array< amp::ampf< Precision > > &tau, int qcolumns, ap::template_2d_array< amp::ampf< Precision > > &q)
Definition: qr.h:193
amp::operator-=
campf< Precision > & operator-=(campf< Precision > &lhs, const campf< Precision > &rhs)
Definition: amp.h:1157
ap::template_2d_array::m_iHigh1
long m_iHigh1
Definition: ap.h:968
amp::campf::campf
campf(unsigned long v)
Definition: svd_si.h:2102
amp::ampf::ampf
ampf(signed char v)
Definition: svd_si.h:1122
ap::complex::operator/=
complex & operator/=(const double &v)
Definition: svd_si.h:79
s
const CanonicalForm int s
Definition: facAbsFact.cc:55
amp::campf::campf
campf(unsigned short v)
Definition: svd_si.h:2106
amp::ampf::toString
char * toString() const
amp::mpfr_reference::getReadPtr
mpfr_srcptr getReadPtr() const
ap::complex::operator/=
complex & operator/=(const complex &z)
Definition: svd_si.h:85
amp::ampf::getUlp256
static const ampf getUlp256()
Definition: amp.h:521
amp::mpfr_record::value
mpfr_t value
Definition: amp.h:38
ap::template_2d_array::getcolumn
const_raw_vector< T > getcolumn(int iColumn, int iRowStart, int iRowEnd) const
Definition: svd_si.h:955
qr
Definition: qr.h:46
ap::template_1d_array::operator()
const T & operator()(int i) const
Definition: svd_si.h:725
bidiagonal::rmatrixbdmultiplybyq
void rmatrixbdmultiplybyq(const ap::template_2d_array< amp::ampf< Precision > > &qp, int m, int n, const ap::template_1d_array< amp::ampf< Precision > > &tauq, ap::template_2d_array< amp::ampf< Precision > > &z, int zrows, int zcolumns, bool fromtheright, bool dotranspose)
Definition: bidiagonal.h:422
amp::mpfr_storage::deleteMpfr
static void deleteMpfr(mpfr_record *ref)
Definition: amp.cpp:30
amp::cos
const ampf< Precision > cos(const ampf< Precision > &x)
Definition: amp.h:959
ap::integer_1d_array
template_1d_array< int > integer_1d_array
Definition: ap.h:973
ap::template_2d_array::setcontent
void setcontent(int iLow1, int iHigh1, int iLow2, int iHigh2, const T *pContent)
Definition: svd_si.h:912
ap::template_1d_array::template_1d_array
template_1d_array(const template_1d_array &rhs)
Definition: svd_si.h:679
ap::complex_1d_array
template_1d_array< complex > complex_1d_array
Definition: ap.h:975
ap::const_raw_vector
Definition: ap.h:137
ap::template_1d_array::template_1d_array
template_1d_array()
Definition: svd_si.h:667
lq::lqdecompositionunpacked
void lqdecompositionunpacked(ap::template_2d_array< amp::ampf< Precision > > a, int m, int n, ap::template_2d_array< amp::ampf< Precision > > &l, ap::template_2d_array< amp::ampf< Precision > > &q)
Definition: lq.h:408
amp::operator*
const ampf< Precision > operator*(const ampf< Precision > &op1, const ampf< Precision > &op2)
Definition: amp.h:675
ap::const_raw_vector::GetData
const T * GetData() const
Definition: svd_si.h:150
amp
Definition: amp.h:20
amp::ampf::getAlgoPascalMinNumber
static const ampf getAlgoPascalMinNumber()
Definition: amp.h:583
ap::complex::complex
complex(const complex &z)
Definition: svd_si.h:73
amp::ampf::getAlgoPascalMaxNumber
static const ampf getAlgoPascalMaxNumber()
ap::const_raw_vector::pData
T * pData
Definition: ap.h:149
amp::signed32
signed long signed32
Definition: amp.h:32
blas::matrixvectormultiply
void matrixvectormultiply(const ap::template_2d_array< amp::ampf< Precision > > &a, int i1, int i2, int j1, int j2, bool trans, const ap::template_1d_array< amp::ampf< Precision > > &x, int ix1, int ix2, amp::ampf< Precision > alpha, ap::template_1d_array< amp::ampf< Precision > > &y, int iy1, int iy2, amp::ampf< Precision > beta)
Definition: blas.h:381
ap::template_1d_array::m_Vec
T * m_Vec
Definition: ap.h:793
amp::ampf::InitializeAsULong
void InitializeAsULong(unsigned long v)
Definition: amp.h:268
ap::template_1d_array::setcontent
void setcontent(int iLow, int iHigh, const T *pContent)
Definition: svd_si.h:754
lq::rmatrixlqunpackq
void rmatrixlqunpackq(const ap::template_2d_array< amp::ampf< Precision > > &a, int m, int n, const ap::template_1d_array< amp::ampf< Precision > > &tau, int qrows, ap::template_2d_array< amp::ampf< Precision > > &q)
Definition: lq.h:177
amp::campf::campf
campf(unsigned char v)
Definition: svd_si.h:2108
ap::template_1d_array::~template_1d_array
~template_1d_array()
Definition: svd_si.h:673
amp::ampf::toDouble
double toDouble() const
amp::operator>
const bool operator>(const ampf< Precision > &op1, const ampf< Precision > &op2)
Definition: amp.h:623
amp::round
const signed long round(const ampf< Precision > &x)
Definition: amp.h:804
amp::campf::campf
campf(double v)
Definition: svd_si.h:2099
amp::ampf::InitializeAsZero
void InitializeAsZero()
Definition: amp.h:252
ap::raw_vector::GetData
T * GetData()
Definition: svd_si.h:180
amp::csqr
const campf< Precision > csqr(const campf< Precision > &z)
Definition: amp.h:1232
rotations::applyrotationsfromtheleft
void applyrotationsfromtheleft(bool isforward, int m1, int m2, int n1, int n2, const ap::template_1d_array< amp::ampf< Precision > > &c, const ap::template_1d_array< amp::ampf< Precision > > &s, ap::template_2d_array< amp::ampf< Precision > > &a, ap::template_1d_array< amp::ampf< Precision > > &work)
Definition: rotations.h:100