-
Notifications
You must be signed in to change notification settings - Fork 2
/
chap5.tex
203 lines (192 loc) · 11.3 KB
/
chap5.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
%!TEX root = ./main.tex
\section{Eigenvalues, Eigenvectors, and
Invariant Subspaces}
\subsection{Invariant Subspaces}
\begin{definition}
Let $T \in \c L(V,V)$ on a vector space $V \neq \lb 0 \rb$. A subspace $U \subseteq V$ is called an invariant subspace is invariant under $T$ if $T\vec u \in U \ \forall \vec u \in U$.
\end{definition}
\begin{example}
For any $T \in \c L(V,V)$, the following subspaces are invariant:
\begin{enumerate}
\item $\lb 0 \rb$
\item $V$
\item $\nul T = \lb \vec v \in V : T\vec v = 0 \rb$ \\
If $T\vec v \in \nul T$, then $T\vec v = 0 \in \nul T$.
\item $\range T = \lb \vec w \in W : \vec w = T\vec v \text{ for some } \vec v \in V \rb$ \\
So $Tw \in \range T$.
\end{enumerate}
\end{example}
\begin{question}
What are $1$-dimensional invariant subspaces?
\end{question}
\begin{answer}
Then $U = \spa(\vec u)$ for some $\vec u \neq 0$. Invariant means $T\vec u = \lambda \vec u$ for some $\lambda \in \b F$, where $\vec u$ is the eigenvector of $T$ and $\lambda$ is the eigenvalues.
\end{answer}
\begin{remark}
$\vec u \neq 0$ if $\vec u$ is a eigenvector is $T$. $\lambda = 0$ is possible.
\end{remark}
\begin{proposition} Let $T$ be a linear operator in $V$, then the following are equivalent
\begin{enumerate}
\item $\lambda$ is a eigenvalue of $T$.
\item $T - \lambda\b I$ is not invertible.
\item $T - \lambda \b I$ is not injective.
\item $T - \lambda \b I$ is not surjective.
\end{enumerate}
\end{proposition}
We have already proven that statement $2,3,4$ are logically equivalent.
\begin{theorem}
Suppose $\li vm$ are eigenvectors of $T \in \c L(V)$ corresponding to distinct eigenvalues $\li \lambda m$ will be linearly independent.
\end{theorem}
\begin{proof}
Suppose $\li{\vec v}m$ are linearly independent. By linear dependence lemma, we find a the minimum index $k \leq m$ such that $\vec v_k \in \spa (\li{\vec v}{k-1})$. i.e.
\begin{equation} \label{eqn1}
\vec v_k = \lincomb{\alpha}{\vec v}{k-1}
\end{equation}
Apply linear transformation on both sides
\begin{equation} \label{eqn2}
T\vec v_k = T\lincomb{\alpha}{\vec v}{k-1}
\end{equation}
\begin{equation} \label{eqn3}
\lambda \vec v_k = \alpha_1 \lambda_1 \vec v_1 + \alpha_2 \lambda_2 \vec v_2 + \cdots + \alpha_n \lambda_n \vec v_n
\end{equation}
We multiply by equation \ref{eqn1} by $\lambda_m$ and subtract by from \ref{eqn3} and we get \[ 0 = \alpha_1 (\lambda_1 - \lambda_k)\vec v_1 + \alpha_2(\lambda_2 - \lambda_k)\vec v_2 + \cdots + \alpha_{k-1} (\lambda_{k-1} - \lambda_k)\vec v_{k-1}\]
A contradiction since $k$ is not the minimum index with the property chosen above. Therefore the list $\li{\vec v}m$ must be linearly independent.
\end{proof}
\begin{corollary}
An operator $T \in \c L(V)$ has at most $\boxed{\dim V}$ distinct eigenvalues.
\end{corollary}
\subsubsection{Restriction Operators}
\begin{definition}
Suppose $T \in \c L(V)$ and $U$ is a subspace of $V$ invariant under $T$. Then the restriction operator $T|_U \in \c L(U)$ is deifned by $T|_U(\vec u) = T\vec u$ for all $\vec u \in U$.
\end{definition}
\subsection{Eigenvectors and Upper-Triangular
Matrices}
\subsubsection{Polynomials in T}
\begin{definition}
Suppose $T \in \c L(V)$, then $T^k$ is defined as
\[ T^k := \underbrace{k \circ k \circ \cdots \circ k}_{k \text{ times}}\]
Notice that $T^0 = \b I, T^1 = T$.
\end{definition}
\begin{definition}
If $p(x) = a_0 + a_1x + \cdots + a_nx^n$, then we can define $p(T)$ as $a_o\b I + a_1T + a_2T + \cdots + a_nT^n$.
\end{definition}
\begin{example}
Let $V := \c P(\b R), S: p \mapsto 3p'' + 2p' + p, D: p \mapsto p'$. We can see that $S$ can be expressed as $S = D^0 + 2D + 3D^2$. Therefore \[\c M(S) = 3\c M^2(D) + 2\c M(D) + M(\b I)\] we need to have to take the same basis for inputs and output when forming $\c M(\cdot)$.
\noindent Let's use our favorite basis $1,x,x^2, x^3$. We then can see
\[ \c M(D) = \bml 0 & 1 & 0 & 0 \\ 0 & 0 & 2 & 0 \\ 0 & 0 & 0 & 3 \\ 0 & 0 & 0 & 0 \bmr, \c M(S) = \bml 1 & 2 & 6 & 0\\ 0 & 1 & 4 & 18\\ 0 & 0 & 1 & 6 \\ 0 & 0 & 0 & 1 \bmr\]
\end{example}
\begin{question}
What is the best matrix representation for an operator?
\end{question}
\begin{question}
What information about eigenvalues/eigenvectors can be read off from a matrix representation?
\end{question}
\begin{theorem}
Suppose $T \in \c L(V)$ and $\li{\vec v}n$ is a basis of $V$. Then the following are logically equivalent:
\begin{enumerate}
\item $\c M(T)$ is upper triangular.
\item $T\vec v_j \in \spa (\li{\vec v}j)$ $\forall j = 1,2, \ldots, n$.
\item $\spa (\li{\vec v}j)$ is invariant under $T$ $\forall j = 1,2,\ldots, n$.
\end{enumerate}
\end{theorem}
\newpage
\begin{proof}
$1) \implies 2) $ \[\bml * & * & * & * & \cdots & * \\ & * & * & * & \cdots & * \\ & & * & * & \cdots & * \\ & & & * & \cdots & * \\ & & & & \ddots & \vdots \\ & & & & & * \bmr\] We can see that $2)$ holds true by inspection. \\
$2) \implies 3)$ Consider $T\vec v_h$ for $h \leq j$, by $2)$ we have $T\vec v_k \in \spa(\li{\vec v}h) \subseteq \spa (\li{\vec v}j)$. So $\spa (\li{\vec v}j)$ is invariant under $T$. \\
$3) \implies 2)$ Consider $T\vec v_j$, by $3)$ it is a linear combination of $\li{\vec v}j$ because $T\vec v_j \in \spa(\li{\vec v}j)$ so $\c M(T)(i,j) = 0$ if $i > j$.
\end{proof}
\begin{question}
What about conditions for lower-triangular matrices?
\end{question}
\begin{lemma}
Over $\b C$, every linear operator has at least one eigenvalue.
\end{lemma}
\begin{proof}
Take $\vec v \in V\ \backslash \lb 0 \rb$, and consider the list $\vec v, T\vec v, T^2\vec v, \ldots, T^n\vec v$ where $n = \dim V$. There is a nontrivial linear combination of these vectors which is $0$. Suppose the equation \[a_0\vec v_1 + a_1T\vec v + a_2T^2\vec v + \cdots + a_nT^n\vec v = 0\]
i.e. $p(T)v = 0$ for nonconstant $p(x) : = a_0 + a_1x + a_2x^2 + \cdots + a_nx^n$. By the fundamental theorem of algebra $p$ splits into linear factors over $\b C$.
\[ p(x) = c(x - \lambda_1)(x - \lambda_2) \cdots (x - \lambda_m)\] for some $m \leq n$. Therefore
\[ p(T)v = c(T - \lambda_1 \b I)(T - \lambda_2 \b I) \cdots (T - \lambda_m \b I)\]
Therefore at least one of these factors is not injective. This shows that $T$ has at least $1$ eigenvalue.
\end{proof}
\begin{theorem}
For any $T \in \c L(V)$, $V$ is finite dimensional vector space over $\b C$, there exists its matrix representation $\c M(T)$ which is upper-triangular.
\end{theorem}
\newpage
\begin{proof}
We can induct on the dimension of $V$.
\textit{Base Step.} $n = 1$ is trivially true. \\
\textit{Inductive Hypothesis.} Suppose Theorem holds for all vector spaces of dimension less than $\dim V$. \\
\textit{Inductive Step.} Consider $\lambda \in \b C$ an eigenvalue of $T$ by lemma. We can define
\[U := \range (T -\lambda \b I)\]
$U$ is a subspace of $V$. By the characterization of eigenvalues, $T - \lambda \b I$ is not surjective, hence $\range T - \lambda \b I \not\subseteq V$, hence $\dim \range (T - \lambda \b I) < \dim V$.
We want to show that $U$ is invariant under $T$. Suppose $\vec v \in U$, then \[T\vec v = \underbrace{(T - \lambda \b I)\vec v}_{\in U} + \underbrace{\lambda \vec v}_{\in U}\] therefore we know that $U$ is invariant under $T$.
Consider \[T|_U \in \c L(U) : (T|_U)(\vec v) := T\vec v \forall \vec v \in U\]
If $U \neq \lb 0 \rb$, then there is a basis $\li{\vec u}m$ of $U$ ($m < n$) such that the matrix representation of $T/U$ with respect to $\li{\vec u}m$ is upper triangular by the inductive hypothesis. Extend $\li{\vec u}m$ to a basis of $V$, $\li{\vec u}m, \li{\vec v}k$. We compute
\[T\vec v_j = \underbrace{(T - \lambda \b I)\vec v_j}_{\in U = \spa (\li{\vec u}m)} + \lambda \vec v_j\] We also know that $T\vec u_l \in \spa (\li {\vec u}{l-1})$. We can see the matrix representation and hence we are done
\[ \begin{array}{cc}
\\ \\ \\ \\ m \\ \\ \\
\end{array}\left[\begin{array}{cccc|ccccccc}
* & * & \cdots & * & * & * & * \\
0 & * & \cdots & * & * & * & * \\
\vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \vdots \\
0 & 0 & \cdots & 0 & * & * & * \\
\hline
0 & 0 & \cdots & 0 & \lambda & 0 & 0 \\
0 & 0 & \cdots & 0 & 0 & \lambda & 0 \\
0 & 0 & \cdots & 0 & 0 & 0 & \lambda \\
\end{array}\right]\]
\end{proof}
\begin{question}
What about eigenvalues of a upper-triangular matrix?
\end{question}
\begin{theorem}
An upper triangular matrix is invertible if and only if all its diagonal entries are nonzero.
\end{theorem}
\begin{proof}
Suppose all diagonal entries are nonzero. Prove surjectivity.
\begin{align*}
T\vec v_1 &=A_{1,1} v_1, A_{1,1} \neq 0 \implies \vec v_1 \in \range T\\
T\vec v_2 &=A_{1,2} \vec v_1 + A_{2,2} \vec v_2 , A_{2,2} \neq 0 \implies \vec v_2 \in \range T \\
\vdots & \hspace{5cm} \implies \\
T\vec v_n &= A_{1,n} \vec v_1 + A_{2,n} \vec v_2 + \cdots + A_{n,n} \vec v_n \neq 0 \implies \vec v_n \in \range T
\end{align*}
Therefore $\range T = V$, so $T$ is surjective, hence $T$ is invertible.
Suppose at least one one diagonal entry is $0$ we want to show that $T$ is not invertible. Say $A_{j,j} = 0$ for some $j$ and upper triangular matrix $A$. If $j = 1$, then $v_1 \in \null T$, hence $T$ is not invertible, and we are done. If $j > 1$, consider $U := \spa (\li{\vec v}j)$. $T$ maps $U$ to $\spa (\li{\vec v}{j-1})$. This shows $T|_U$ us not surjective, then we know that $T|_U$ is not injective and there exists $\vec u \in U$ such that $\vec u \in \null T|_U \implies \vec u \in \null T$. Therefore $T$ is not injective. Hence $T$ is not invertible.
\end{proof}
\begin{corollary}
An upper triangular matrix / operator in upper triangular form has the diagonal elements / entries as its eigenvalues.
\end{corollary}
\begin{example}The matrix
\[ A = \bml
5 & * & * & * & * & \\
0 & 9 & * & * & * & \\
0 & 0 & 1 & * & * & \\
0 & 0 & 0 & 8 & * & \\
0 & 0 & 0 & 0 & 10 & \\
\bmr\]
has eigenvalue $1,5,9,8,10$.
\end{example}
\begin{example}
$T: \c P_n(\b R) \to \c P_n(\b R) : p \mapsto 3p'' - 5'p' + 7p$ has eigenvalues $3,-5,7$.
\end{example}
\subsection{Eigenspaces and Diagonal Matrices}
\begin{definition}
Suppose $T \in \c L(V)$ and $\lambda \in \b F$. The eigenspace of $T$ correspoding to $\lambda$, denoted as $E(\lambda, T)$ is defined as
\[ E(\lambda, T) := \lb \vec v \in V : T\vec v = \lambda \vec v \rb = \nul (T - \lambda I)\]
\end{definition}
\begin{definition}
An operator $T \in \c L(T)$ is called diagonalizable if the operator has a diagonal matrix with repsect to some basis of $V$.
\end{definition}
\begin{theorem}
For $T \in \c L(V)$, where $V$ is a finite dimensional vector space, then the following are equivalent
\begin{enumerate}
\item $\c M(T)$ is a diagonal matrix.
\item the corresponding basis for $V$ consists of eigenvalue of $T$.
\item $V = U_1 \oplus U_2 \oplus \cdots \oplus U_n$ where $\dim U_j = 1$ and $U_j$ is invariant under $T$ for all $j$.
\item $V = W_1 \oplus W_2 \oplus \cdots \oplus W_k$, where $V/W_l = \lambda_l \b I$ for all $l$ and $W_l$ is invariant under $T$.
\item $\dim V = \dim W_1 + \dim W_2 + \cdots + W_k$, where $W_e = \null (T - \lambda_e \b I)$.
\end{enumerate}
\end{theorem}
\begin{proof}
Refer to Axler Page 157.
\end{proof}