3x3 complex matrix

Pages: 12
closed account (48T7M4Gy)
XCode handles this as follows via its toolbar menu:

Before starting choose a directory to store the data file, then

Product
Scheme
Edit scheme
Run
Working directory (about halfway down)
Tick - Use custom working directory and select the directory
Close
Superseded
Last edited on
Original matrix:
(1,1)	(2,0)	(0,1)	
(2,0)	(0,0)	(-1,0)	
(0,-1)	(-1,0)	(2,0)	

QR iterations: 177   Residual: 3.67698e-017

Eigenvalues by QR algorithm are:
(-1.78305,0.34898)
(3.31839,0.360747)
(1.46466,0.290273)


Eigenvalue (-1.78305,0.34898)
Eigenvector:
(0.51872,0.282683)
(-0.630727,-0.441571)
(-0.241276,-0.00186396)
Check error: 4.61444e-015


Eigenvalue (3.31839,0.360747)
Eigenvector:
(-0.392029,-0.455038)
(-0.296263,-0.425037)
(0.0457244,0.607235)
Check error: 1.79832e-014


Eigenvalue (1.46466,0.290273)
Eigenvector:
(-0.251079,-0.476689)
(-0.527375,-0.136913)
(0.230525,-0.599764)
Check error: 1.26956e-014


Note: eigenvectors defined up to a constant complex factor, so may look very different.

FIRST PART OF CODE (REST IN A SECOND POST)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <cmath>
#include <complex>
#include <vector>
#include <algorithm>
#include <cassert>
using namespace std;


const double SMALL = 1.0E-30;          // used to stop divide-by-zero
const double NEARZERO = 1.0E-10;       // interpretation of "zero"

using cmplx  = complex<double>;        // complex number
using vec    = vector<cmplx>;          // vector
using matrix = vector<vec>;            // matrix (=collection of (row) vectors)


// Prototypes
matrix readMatrix();
int howBig( string filename );
void printMatrix( string title, const matrix &A );

matrix matMul( const matrix &A, const matrix &B );
matrix matSca( cmplx c, const matrix &A );
matrix matLin( cmplx a, const matrix &A, cmplx b, const matrix &B );
vec matVec( const matrix &A, const vec &V );
vec vecSca( cmplx c, const vec &V );
vec vecLin( cmplx a, const vec &U, cmplx b, const vec &V );
double vecNorm( const vec &V );
double matNorm( const matrix &A );
double subNorm( const matrix &T );
matrix identity( int N );
matrix hermitianTranspose( const matrix &A );

cmplx shift( const matrix &A );
void Hessenberg( const matrix &A, matrix &P, matrix &H );
void QRFactoriseGivens( const matrix &A, matrix &Q, matrix &R );
void QRHessenberg( const matrix &A, matrix &P, matrix &T );
bool eigenvectorUpper( const matrix &T, matrix &E );


//========


int main()
{
   // Read matrix
   matrix A = readMatrix();
   printMatrix( "\nOriginal matrix:", A );

   int N = A.size();
   matrix P, T, E;

   // Compute eigenvalues by QR-Hessenberg method. 
   // Gives a Schur decomposition:  A = P T P-1;   P is unitary, T is upper-triangular
   QRHessenberg( A, P, T );
   cout << "\nEigenvalues by QR algorithm are:\n";
   for ( int L = 0; L < N; L++ ) cout << T[L][L] << '\n';

   // Compute eigenvectors
   bool OK = eigenvectorUpper( T, E );           // Find the eigenvectors of T
   E = matMul( P, E );                           // Rotate eigenvectors to those of A
   for ( int L = 0; L < N; L++ )
   {
      cmplx lambda = T[L][L];
      vec V( N );
      for ( int j = 0; j < N; j++ ) V[j] = E[j][L];

      cout << "\n\nEigenvalue " << lambda << "\nEigenvector:\n";
      for ( int j = 0; j < N; j++ ) cout << V[j] << '\n';

      // Check matrix norm of   A v - lambda v
      cout << "Check error: " << vecNorm( vecLin( 1.0, matVec( A, V ), -lambda, V ) ) << endl;
   }
}

//========

matrix readMatrix()                    // Input the matrix
{
   string filename = "matrix.dat";

   // Determine how large the matrix is and set array sizes accordingly
   int N = howBig( filename );         
   matrix A( N, vec(N) );             

   // Read from file
   ifstream in( filename );   assert( in );
   for ( int i = 0; i < N; i++ )
   {
      for ( int j = 0; j < N; j++ ) in >> A[i][j];
   }
   in.close();

   return A;
}

//========

int howBig( string filename )          // Reads one line to count the elements in it
{
   string s;
   ifstream in( filename );   assert( in );
   getline( in, s );
   in.close();

   stringstream ss( s );               // Creates a stream from this line
   int N = 0;
   cmplx dummy;
   while( ss >> dummy ) N++;           // Increments N for as many values as present

   return N;
}

//========

void printMatrix( string title, const matrix &A )
{
   cout << title;   if ( title != "" ) cout << '\n';

   int m = A.size(), n = A[0].size();            // A is an m x n matrix

   for ( int i = 0; i < m; i++ )
   {
      for ( int j = 0; j < n; j++ )
      {
         double x = A[i][j].real();   if ( abs( x ) < NEARZERO ) x = 0.0;
         double y = A[i][j].imag();   if ( abs( y ) < NEARZERO ) y = 0.0;
         cout << cmplx( x, y ) << '\t';
      }
      cout << '\n';
   }
}

//========

matrix matMul( const matrix &A, const matrix &B )          // Matrix times matrix
{
   int mA = A.size(),   nA = A[0].size();
   int mB = B.size(),   nB = B[0].size();   assert( mB == nA );
   matrix C( mA, vec( nB, 0.0 ) );

   for ( int i = 0; i < mA; i++ )
   {
      for ( int j = 0; j < nB; j++ )
      {
         for ( int k = 0; k < nA; k++ ) C[i][j] += A[i][k] * B[k][j];
      }
   }
   return C;
}

//========

matrix matSca( cmplx c, const matrix &A )                  // Scalar multiple of matrix
{
   int m = A.size(),   n = A[0].size();
   matrix C = A;

   for ( int i = 0; i < m; i++ )
   {
      for ( int j = 0; j < n; j++ ) C[i][j] *= c;
   }
   return C;
}

//========

matrix matLin( cmplx a, const matrix &A, cmplx b, const matrix &B )  // Linear combination of matrices
{
   int m = A.size(),   n = A[0].size();   assert( B.size() == m && B[0].size() == n );
   matrix C = matSca( a, A );

   for ( int i = 0; i < m; i++ )
   {
      for ( int j = 0; j < n; j++ ) C[i][j] += b * B[i][j];
   }
   return C;
}

//========

vec matVec( const matrix &A, const vec &V )                // Matrix times vector
{
   int mA = A.size(),   nA = A[0].size();
   int mV = V.size();   assert( mV == nA );
   vec C( mA, 0.0 );

   for ( int i = 0; i < mA; i++ )
   {
      for ( int k = 0; k < nA; k++ ) C[i] += A[i][k] * V[k];
   }
   return C;
}

//========

vec vecSca( cmplx c, const vec &V )                        // Scalar multiple of vector
{
   int n = V.size();
   vec W = V;
   for ( int j = 0; j < n; j++ ) W[j] *= c;
   return W;
}

//========

vec vecLin( cmplx a, const vec &U, cmplx b, const vec &V ) // Linear combination of vectors
{
   int n = U.size();   assert( V.size() == n );
   vec W = vecSca( a, U );
   for ( int j = 0; j < n; j++ ) W[j] += b * V[j];
   return W;
}

//========
double vecNorm( const vec &V )                             // Complex vector norm
{
   int n = V.size();
   double result = 0.0;
   for ( int j = 0; j < n; j++ ) result += norm( V[j] );
   return sqrt( result );
}

//========
double matNorm( const matrix &A )                          // Complex matrix norm
{
   int m = A.size(),   n = A[0].size();
   double result = 0.0;
   for ( int i = 0; i < m; i++ )
   {
      for ( int j = 0; j < n; j++ ) result += norm( A[i][j] );
   }
   return sqrt( result );
}

//========

double subNorm( const matrix &T )                          // Below leading diagonal of square matrix
{
   int n = T.size();   assert( T[0].size() == n );
   double result = 0.0;
   for ( int i = 1; i < n; i++ )
   {
      for ( int j = 0; j < i; j++ ) result += norm( T[i][j] );
   }
   return sqrt( result );
}

//======== 

Last edited on
Second part of code (SAME FILE)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
matrix identity( int N )                                   // N x N Identity matrix
{
   matrix I( N, vec( N, 0.0 ) );
   for ( int i = 0; i < N; i++ ) I[i][i] = 1.0;
   return I;
}

//========

matrix hermitianTranspose( const matrix &A )               // Hermitian transpose
{
   int m = A.size(),   n = A[0].size();
   matrix AH( n, vec( m ) );                               // Note: transpose is an n x m matrix
   for ( int i = 0; i < n; i++ )
   {
      for ( int j = 0; j < m; j++ ) AH[i][j] = conj( A[j][i] );
   }
   return AH;
}

//========

cmplx shift( const matrix &A )                             // Wilkinson shift in QR algorithm
{
   int N = A.size();
   cmplx s = 0.0;
   int i = N - 1;
// while ( i > 0 && abs( A[i][i-1] ) < NEARZERO ) i--;     // Deflation (not sure about this)

   if ( i > 0 )
   {
      cmplx a = A[i-1][i-1], b = A[i-1][i], c = A[i][i-1], d = A[i][i];        // Bottom-right elements
      cmplx delta = sqrt( ( a + d ) * ( a + d ) - 4.0 * ( a * d - b * c ) ); 
      cmplx s1 = 0.5 * ( a + d + delta );
      cmplx s2 = 0.5 * ( a + d - delta );
      s = ( norm( s1 - d ) < norm( s2 - d ) ? s1 : s2 );
   }
   return s;
}

//========

void Hessenberg( const matrix &A, matrix &P, matrix &H )
// Reduce A to Hessenberg form A = P H P-1 where P is unitary, H is Hessenberg
//                             i.e. P-1 A P = H
// A Hessenberg matrix is upper triangular plus single non-zero diagonal below main diagonal
{
   int N = A.size();

   H = A;
   P = identity( N );

   for ( int k = 0; k < N - 2; k++ )             // k is the working column
   {
      // X vector, based on the elements from k+1 down in the kth column
      double xlength = 0;
      for ( int i = k + 1; i < N; i++ ) xlength += norm( H[i][k] );
      xlength = sqrt( xlength );

      // U vector ( normalise X - rho.|x|.e_k )
      vec U( N, 0.0 );
      cmplx rho = 1.0, xk = H[k+1][k];
      double axk = abs( xk );
      if ( axk > NEARZERO ) rho = -xk / axk;
      U[k+1] = xk - rho * xlength;
      double ulength = norm( U[k+1] );
      for ( int i = k + 2; i < N; i++ )
      {
         U[i] = H[i][k];
         ulength += norm( U[i] );
      }
      ulength = max( sqrt( ulength ), SMALL );
      for ( int i = k + 1; i < N; i++ ) U[i] /= ulength;

      // Householder matrix: P = I - 2 U U*T
      matrix PK = identity( N );
      for ( int i = k + 1; i < N; i++ )
      {
         for ( int j = k + 1; j < N; j++ ) PK[i][j] -= 2.0 * U[i] * conj( U[j] );
      }

      // Transform as PK*T H PK.   Note: PK is unitary, so PK*T = P
      H = matMul( PK, matMul( H, PK ) );
      P = matMul( P, PK );
   }
}


//========


void QRFactoriseGivens( const matrix &A, matrix &Q, matrix &R )
{
   // Factorises a Hessenberg matrix A as QR, where Q is unitary and R is upper triangular
   // Uses N-1 Givens rotations
   int N = A.size();

   Q = identity( N );
   R = A;

   for ( int i = 1; i < N; i++ )       // i is the row number
   {
      int j = i - 1;                   // aiming to zero the element one place below the diagonal
      if ( abs( R[i][j] ) < SMALL ) continue;

      // Form the Givens matrix        
      cmplx c =        R[j][j]  ;           
      cmplx s = -conj( R[i][j] );                       
      double length = sqrt( norm( c ) + norm( s ) );    
      c /= length;               
      s /= length;               
      cmplx cstar = conj( c );         //  G*T = ( c* -s )     G = (  c  s  )     <--- j
      cmplx sstar = conj( s );         //        ( s*  c )         ( -s* c* )     <--- i
      matrix RR = R;
      matrix QQ = Q;
      for ( int m = 0; m < N; m++ ) 
      {
         R[j][m] = cstar * RR[j][m] - s     * RR[i][m];
         R[i][m] = sstar * RR[j][m] + c     * RR[i][m];    // Should force R[i][j] = 0.0
         Q[m][j] = c     * QQ[m][j] - sstar * QQ[m][i];
         Q[m][i] = s     * QQ[m][j] + cstar * QQ[m][i];
      }
   }
}

//========

void QRHessenberg( const matrix &A, matrix &P, matrix &T )
// Apply the QR algorithm to the matrix A. 
//
// Multi-stage:
//    - transform to a Hessenberg matrix
//    - apply QR factorisation based on Givens rotations
//    - uses (single) Wilkinson shift - double-shift version in development
//
// Should give a Shur decomposition A = P T P-1 where P is unitary, T is upper triangular
//                             i.e. P-1 A P = T
// Eigenvalues of A should be the diagonal elements of T
// If A is hermitian T would be diagonal and the eigenvectors would be the columns of P
{
   const int ITERMAX = 10000;
   const double TOLERANCE = 1.0e-10;

   int N = A.size();

   matrix Q( N, vec( N ) ), R( N, vec( N ) ), Told( N, vec( N ) );
   matrix I = identity( N );

   // Stage 1: transform to Hessenberg matrix ( T = Hessenberg matrix, P = unitary transformation )
   Hessenberg( A, P, T );


   // Stage 2: apply QR factorisation (using Givens rotations)
   int iter = 1;
   double residual = 1.0;
   while( residual > TOLERANCE && iter < ITERMAX )
   {
      Told = T;

      // Spectral shift
      cmplx mu = shift( T );
      if ( abs( mu ) < NEARZERO ) mu = 1.0;   // prevent unitary matrices causing a problem
      T = matLin( 1.0, T, -mu, I );

      // Basic QR algorithm by Givens rotation
      QRFactoriseGivens( T, Q, R );
      T = matMul( R, Q );
      P = matMul( P, Q );

      // Reverse shift
      T = matLin( 1.0, T, mu, I );

      // Calculate residuals
      residual = matNorm( matLin( 1.0, T, -1.0, Told ) );            // change on iteration
      residual += subNorm( T );                                      // below-diagonal elements
//    cout << "\nIteration: " << iter << "   Residual: " << residual << endl;
      iter++;
   }
   cout << "\nQR iterations: " << iter << "   Residual: " << residual << endl;
   if ( residual > TOLERANCE ) cout << "***** WARNING ***** QR algorithm not converged\n";
}

//========

bool eigenvectorUpper( const matrix &T, matrix &E )
// Find the eigenvectors of upper-triangular matrix T; returns them as column vectors of matrix E
// The eigenvalues are necessarily the diagonal elements of T
// NOTE: if there are repeated eigenvalues, then THERE MAY NOT BE N EIGENVECTORS
{
   bool fullset = true;
   int N = T.size();
   E = matrix( N, vec( N, 0.0 ) );               // Columns of E will hold the eigenvectors

   matrix TT = T;
   for ( int L = N - 1; L >= 0; L-- )            // find Lth eigenvector, working from the bottom
   {
      bool ok = true;
      vec V( N, 0.0 );
      cmplx lambda = T[L][L];
      for ( int k = 0; k < N; k++ ) TT[k][k] = T[k][k] - lambda;          // TT = T - lambda I
                                                                          // Solve TT.V = 0
      V[L] = 1.0;                                // free choice of this component
      for ( int i = L - 1; i >= 0; i-- )         // back-substitute for other components
      {
         V[i] = 0.0;
         for ( int j = i + 1; j <= L; j++ ) V[i] -= TT[i][j] * V[j];
         if ( abs( TT[i][i] ) < NEARZERO )       // problem with repeated eigenvalues
         {
            if ( abs( V[i] ) > NEARZERO ) ok = false;     // incomplete set; use the lower-L one only
            V[i] = 0.0;
         }
         else
         {
            V[i] = V[i] / TT[i][i];
         }
      }

      if ( ok )
      {
         // Normalise
         double length = vecNorm( V );    
         for ( int i = 0; i <= L; i++ ) E[i][L] = V[i] / length;
      }
      else
      {
         fullset = false;
         for ( int i = 0; i <= L; i++ ) E[i][L] = 0.0;
      }
   }

   if ( !fullset )
   {
      cout << "\n***** WARNING ***** Can't find N independent eigenvectors\n";
      cout << "   Some will be set to zero\n";
   }

   return fullset;
}

Last edited on
@lastchance, thank you. I have been trying to understand the code but I must be doing something wrong because I am getting lots of errors.

1. error: expected ‘;’ before ‘lambda’
cmplx lambda = T[L][L];( but line above is: vec V( N, 0.0 );)
2. error: ‘TT’ was not declared in this scope
for ( int k = 0; k < N; k++ ) TT[k][k] = T[k][k] - lambda;
3. error: invalid types ‘const int[int]’ for array subscript
for ( int k = 0; k < N; k++ ) TT[k][k] = T[k][k] - lambda;
4. error: ‘V’ was not declared in this scope
V[L] = 1.0; // free choice of this component
5. invalid types ‘int[int]’ for array subscript
for ( int i = 0; i <= L; i++ ) E[i][L] = V[i] / length;

Some variables were not declared. TT is this a float or an int?
Should V be a float?
Should lambda be a float?

^
@zeviah5,
Have you copied the WHOLE code into a SINGLE file and compiled with a c++11 compiler? It was too big for a single post, so I had to split it into two parts: you will have to put them back together again.

Please do not remove any of the lines of code - even if you think they might be wrong. If there are any that your compiler does not like then please tell us: it will give us a clue to what is wrong. It is impossible to relate your declared errors to the code as given.

I don't see any good reason for the errors that you cite. There is a possible indication that the 'using' statements on lines 15-17 of the first part are not being picked up correctly (or have you removed or commented these out?): these are defining cmplx, vec and matrix types and are simply aliases for much longer expressions. If these are a problem it might be possible to use typedef or #define statements instead.



Some variables were not declared. TT is this a float or an int?

TT is declared and initialised as
matrix TT = T;
No it should not be a float or an int; TT is a matrix of complex numbers. The matrix type was declared in the first part of the code by the aliases
1
2
3
using cmplx  = complex<double>;        // complex number
using vec    = vector<cmplx>;          // vector
using matrix = vector<vec>;            // matrix (=collection of (row) vectors) 





Should V be a float?

No, V is a vector of complex numbers. You are finding the eigenvectors of a complex matrix, so you would expect the eigenvectors to have complex components. V is declared as
vec V( N, 0.0 );
with vec being an alias for vector< complex<double> >.




Should lambda be a float?

No; lambda is, with very common notation, an eigenvalue and for a complex matrix you would expect it to be a complex number. A float (or the higher-precision variant double) is an ordinary real number, not a complex one. lambda is declared as cmplx, which is here being used as an alias for complex<double>.



The code is far from perfect. I'm not happy with the shifting routine (and have just edited it). The QR algorithm is not guaranteed to converge and is not good for large sparse matrices. Indeed, @jonnin's comment about "routines to get the matrix into some factored specialized form before you can solve the problem at hand" was very prescient. I already had a simple QR algorithm solver coded ... but it didn't converge for your particular case, so I had to try a different approach. Here, I had to rotate to Hessenberg form and then use the Givens rotations approach to QR factorisation.

For what it's worth, I coded mainly from the following references:
http://people.inf.ethz.ch/arbenz/ewp/Lnotes/chapter4.pdf
https://www.math.kth.se/na/SF2524/matber15/qrmethod.pdf
http://www.ams.org/journals/mcom/2002-71-240/S0025-5718-01-01387-4/S0025-5718-01-01387-4.pdf
although I had to google a lot of references for the shifting modification, which is supposed to help convergence. Note also that in many places the methods in these references are for real matrices and I had to work out what the complex generalisation would be.

I suspect - but haven't got a copy - that the repeatedly-cited book "Matrix Computations" by Golub and Van Loan would also be useful.



Please try to copy both parts of the code into a SINGLE source file and compile. If this gives problems please:
- tell us the operating system you are using (Windows / linux /mac / ....)
- what compiler you are using, and if you are still using it within Codeblocks
- the full set of error messages, or, if it is too long, the first part.



Finally, I am pre-supposing that you know what complex numbers are:
https://en.wikipedia.org/wiki/Complex_number
and what matrix eigenvalues and eigenvectors are about:
https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors
Some of your queries about whether items should be ints or floats may possibly suggest otherwise.
Last edited on
@lastchance, I did put code together.

The compiler was old hence the errors. I updated and the code ran beautifully. Thank you.
That's good news, @zeviah5. It was a fun coding exercise (in places), but probably you want to have a look at the libraries suggested early in the thread as well.
@lastchance, how can I link the code to a library? Say I want to use armadillo.
how can I link the code to a library? Say I want to use armadillo.


http://arma.sourceforge.net/faq.html - under linking.

But since I've written my own solvers, not used library routines, and I don't use your IDE, I couldn't realistically give you instruction on that.

Maybe other forum users can chip in?
@lastchance, thank you.

Can the above code be used for more than the 3 x 3 matrix?

Can the above code be used for more than the 3 x 3 matrix?


Yes - see the 4x4 example at the bottom of this post. I just added another row and column to your original matrix. The technique ought to work (in principle) for bigger square matrices, but it wouldn't be recommended for large sparse ones. What size matrix do you envisage?

From bitter experience, I know that most iterative algorithms are breakable - you need to understand some of the maths behind it so that you can fix it or try different approaches.

I've just had to make a minor tweak to one of the routines - cmplx shift( const matrix &A ) - to get it to converge: you may want to re-download it - sorry! The choice of shifting factor is the one bit of theory that I don't fully understand, so I'm coding blind (or, at least, befogged) here.

Another reminder: although eigenvalues are unique (up to their order) the eigenvectors are only determined up to a multiplying factor, and if that factor is a complex number then they can end up looking very different. If there are repeated eigenvalues then the eigenvectors can be an infinity of different linear combinations.

I doubt that you have a whole assignment to find eigenvalues and eigenvectors of complex matrices. Is it part of some bigger project? If so, what? In most of the applications that I know with complex matrices - e.g. quantum mechanics - the matrices have a special form: Hermitian (they are equal to the complex conjugate of their transpose; as a result the eigenvalues end up being real numbers). Such matrices have specialised algorithms which may be faster and more reliable.

I have no direct experience of Armadillo, but glancing through their documentation, you would probably want one of two possible routines:
eig_gen - eigen decomposition of dense general square matrix
http://arma.sourceforge.net/docs.html#eig_gen
or
schur - Schur decomposition
http://arma.sourceforge.net/docs.html#schur
My function invocation QRHessenberg( A, P, T ); is roughly equivalent to the latter. I did the Schur decomposition with a QR algorithm and then extracted the eigenvalues and eigenvectors from the resulting matrices.



Example of 4x4 matrix:

Original matrix:
(1,1)	(2,0)	(0,1)	(4,2)	
(2,0)	(0,0)	(-1,0)	(0,1)	
(0,-1)	(-1,0)	(2,0)	(1,3)	
(2,3)	(1,1)	(2,0)	(0,1)	

QR iterations: 195   Residual: 2.07123e-016

Eigenvalues by QR algorithm are:
(4.29187,3.94284)
(3.21411,0.0723803)
(-3.28985,-2.11911)
(-1.21612,0.103885)


Eigenvalue (4.29187,3.94284)
Eigenvector:
(-0.496779,0.481255)
(-0.0857658,0.160645)
(-0.29336,0.0557843)
(-0.555418,0.301298)
Check error: 2.06487e-014


Eigenvalue (3.21411,0.0723803)
Eigenvector:
(0.322952,0.0598844)
(0.344618,0.203043)
(-0.49125,-0.687109)
(-0.129332,0.0442102)
Check error: 3.16981e-014


Eigenvalue (-3.28985,-2.11911)
Eigenvector:
(-0.479899,0.0639965)
(0.0763902,-0.395412)
(-0.136875,-0.468584)
(0.542391,0.26631)
Check error: 4.0852e-014


Eigenvalue (-1.21612,0.103885)
Eigenvector:
(-0.250737,0.313859)
(0.745493,-0.4159)
(0.268376,-0.0963382)
(-0.140825,0.0935537)
Check error: 2.36705e-015

Last edited on
@lastchance, I eventually would like to be able to grow a n x n tridiagonal matrix- and try to manipulate it and study quantum effects on it ( bigger project).


Yes Sir, reading through Armadillo and trying to understand. Thank you so much.

You are teaching me so much Thank you.

The QRHessenberg decomposition you used, first I am knowing. I know a little LU decomposition. Did you learn about these methods from Numerical Analysis or another course?






@zeviah5

The methods I used are for dense matrices. If you have matrices that are of specialised types - particularly tridiagonal - then you you should use the much faster methods specific to them. Solving tridiagonal systems is ubiquitous and fast - look up tridiagonal matrix algorithm or Thomas algorithm for solvers; similar specialist methods exist for eigenvalues. However, the first sample matrix that you gave is not tridiagonal.

I would recommend you find a decent book on matrix solution methods: I discover that there is a much more recent version (4th edition, 2013) of Golub and van Loan, and there are other texts.

No, I didn't learn these methods on a course: I had to go away and read up on them. Sometimes it's good to learn something new and I can find uses at work for eigenvalue extractors.
Last edited on
Topic archived. No new replies allowed.
Pages: 12