diff --git a/README.md b/README.md index 445b66f..d9920d4 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Make sure to compile with xelatex (required by fontspec). 2. After you launched the docker daemon (started Docker Desktop) you can simply run execute the `./build_latex_local.sh` file to generate the pdf files. They are located in a newly created subfolder called `build`. (Most probably you have to change the execution permissions with `chmod +x build_latex_local.sh` before you are able to run the script for the first time.) -### Native LaTeX compiler +### Native LaTeX Compiler If you want to use your own native installed LaTeX compiler we recommend to use [TeXLive](https://www.tug.org/texlive/) which integrates well within visual studio code. @@ -80,3 +80,7 @@ The LaTeX template of this summary can be found [here](https://github.com/MeierT ## License Distributed under the MIT License. See [LICENSE](LICENSE) for more information. + +## Disclaimer + +All information, images, and materials presented here are sourced from the official lecture slides of the course "Control Systems II" taught by Prof. Dr. Emilio Frazzoli (Spring Semester 2024) and the textbook authored by Gioele Zardini. This course summary is intended solely for educational and academic purposes. Any other use or distribution is strictly prohibited. diff --git a/src/BSB/reach_observability.tex b/src/BSB/reach_observability.tex index 8d24c81..b87d7f4 100644 --- a/src/BSB/reach_observability.tex +++ b/src/BSB/reach_observability.tex @@ -1,7 +1,7 @@ % Please ignore the mess I've created here, I don't feel comfortable either. \begin{center} \begin{tikzcd}[ampersand replacement=\&] - \substack{\text{unstable} \\ \text{modes}} \&[2em] \substack{\text{all} \\ \text{modes}} \&[-3em] \\[-2em] + \substack{\text{unstable} \\ \text{modes} \\ \in{} \mathcal{R} \\ \in{} \mathcal{O}} \&[2em] \substack{\text{all} \\ \text{modes}} \&[-3em] \\[-2em] \& \mathcal{C} \arrow[d, teal, xshift=-0.5ex, "{\color{teal}\begin{matrix} %chktex 18 \mathsf{CT}: & \text{always} \\ @@ -10,7 +10,7 @@ "' %chktex 18 ] \& {\footnotesize\begin{cases} \mathsf{CT:} & \mathbf{x}_0 = 0 \to~\mathbf{x_f} \\ - \mathsf{DT:} & \mathbf{x_f} \to~0 + \mathsf{DT:} & \mathbf{x}_0 = \mathbf{x_f} \to~0 \end{cases}} \\[2em] \mathcal{S} \& \mathcal{R} \arrow[l]\arrow[u, xshift=0.5ex] \& diff --git a/src/images/Kalman_decomp.png b/src/images/Kalman_decomp.png index 0844b51..1102a78 100644 --- a/src/images/Kalman_decomp.png +++ b/src/images/Kalman_decomp.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1932d7d4add44a03b02e8fdf7c3d28da5c7ae41598c86759a9848ddc8561e4ff -size 24376 +oid sha256:3f14823ca673d292bf99fb9dfbc15f7851f492720a154897b98d520512fae54c +size 25405 diff --git a/src/images/SGT_interconn.png b/src/images/SGT_interconn.png new file mode 100644 index 0000000..e0dba67 --- /dev/null +++ b/src/images/SGT_interconn.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b81c9ed049181a265d874845dddab940c67091eb054933ba4fc3ea56256a8b9a +size 26015 diff --git a/src/images/digit_controller_design.drawio b/src/images/digit_controller_design.drawio index e2c1b8a..7432a45 100644 --- a/src/images/digit_controller_design.drawio +++ b/src/images/digit_controller_design.drawio @@ -1,6 +1,6 @@ - + - + @@ -48,17 +48,6 @@ - - - - - - - - - - - @@ -81,9 +70,20 @@ - + + + + + + + + + + + + diff --git a/src/images/digit_controller_design.pdf b/src/images/digit_controller_design.pdf index 8093283..d9a9bcb 100644 Binary files a/src/images/digit_controller_design.pdf and b/src/images/digit_controller_design.pdf differ diff --git a/src/sections/LQG.tex b/src/sections/LQG.tex index 686bc97..b3eb8c1 100644 --- a/src/sections/LQG.tex +++ b/src/sections/LQG.tex @@ -151,7 +151,7 @@ \subsection{Transfer Function} \textbf{Remarks} \begin{itemize} - \item In $G_{yr}^{cl}$, only the closed-loop poles of the controller are retained (observer and system are cancelled) and open-loop zeros are retained. + \item In $G_{yr}^{cl}$, only the closed-loop poles of the controller (observer and system are cancelled) and open-loop zeros are retained. \item $G_{uy}$, has the same order as the plant and the number of closed-loop poles is doubled (all stable) \end{itemize} diff --git a/src/sections/LQR.tex b/src/sections/LQR.tex index 0365d86..83e7a67 100644 --- a/src/sections/LQR.tex +++ b/src/sections/LQR.tex @@ -13,14 +13,14 @@ \subsection{Cost Functional} \begin{itemize} \item is symmetric i.e. $\mathbf{Q}=\mathbf{Q}^{\mathsf{T}}$ \item is positive semidefinite i.e. $\mathbf{x}^{\mathsf{T}} \mathbf{Qx} \geq 0$ or $\mathrm{eig}(\mathbf{Q}) \geq 0$ + % TODO: Why must it be sqrt (below)? Compare with LQE technical conditions \item must give positive weights to unstable modes to penalize them in the cost function (the pair $\mathbf{A}, \sqrt{\mathbf{Q}}$ is detectable) \end{itemize} \item $\mathbf{R}$ \begin{itemize} \item must be positive definite i.e. $\mathbf{u}^{\mathsf{T}} \mathbf{Ru} >0\; \forall \mathbf{u}\neq \mathbf{0}$ or $\mathrm{eig}(\mathbf{R}) > 0$ (otherwise not every control effort is penalized). - \item is symmetric (see derivation in H2-Sythesis Section~\ref{h2_synth}) + \item is symmetric (see derivation in H2-Synthesis Section~\ref{h2_synth}) \end{itemize} - % TODO: Why must Q but not R be symmetric? -> I think this should be true. -> From H2 Synthesis: R = D^TD which yields a symmetric matrix \item The pair $\mathbf{A,B}$ must be stabilizable \item In general, there is an additional cross-coupling term $\mathbf{x}^{\mathsf{T}}\mathbf{Nu}$ in $J$ which is often neglected i.e.\ equal to $0$ \end{itemize} @@ -168,12 +168,20 @@ \subsection{Symmetric Root Locus} \begin{itemize} \item $2n$ branches, where $n$ is the size of $\mathbf{A}$ + \begin{itemize} + \item like in standard RL, every branch starts in an OL pole + \item but asymptotic behavior different from standard RL + \end{itemize} \item symmetric to the real and imaginary axis \item LQR closed-loop poles are all in the LHP \item $\rho \rightarrow \infty$ (expensive control):\newline CL poles approach stable OL poles and the mirror-images of the unstable OL poles (all on LHP) \item $\rho \rightarrow 0$ (cheap control):\newline CL poles approach MP OL zeros and the mirror-images of the NMP OL zeros or go to infinity along the LHP asymptotes (all on LHP) + \item note that for $\rho \rightarrow \infty$ there are no asymptotes to $\infty$ but only convergence towards (mirror) OL poles + \begin{itemize} + \item for $\rho \rightarrow 0$ there is also convergence to $\infty$ + \end{itemize} \end{itemize} \begin{examplesection}[Example: Symmetric Root Locus] diff --git a/src/sections/MIMO_intro.tex b/src/sections/MIMO_intro.tex index 35d56d4..07d9292 100644 --- a/src/sections/MIMO_intro.tex +++ b/src/sections/MIMO_intro.tex @@ -119,7 +119,16 @@ \subsubsection{Stability} Same condition as in SISO systems: \begin{itemize} - \item \textbf{Stable} if $\mathrm{Re}(p_i \le 0)$ and the algebraic multiplicity of poles with zero real part is equal to their geometric multiplicity. + \item \textbf{Stable} if $\mathrm{Re}(p_i \le 0)$ and the algebraic multiplicity of poles with zero real part is equal to their geometric multiplicity. Reminder: + \begin{itemize} + \item Algebraic multiplicity: multiplicity of eigenvalue $\lambda_i$ ($\lambda_i=0$ for in case) + \item Geometric multiplicity: + \begin{itemize} + \item Number of independent eigenvectors to eigenvalue $\lambda_i$ + \item Dimension of eigenspace $\ker(\lambda \mathbb{1}-\mathbf{A})$ (number of free parameters). + \item E.g.\ if $\lambda_i$ has algebraic multiplicity 2, then $\ker(\lambda \mathbb{1}-\mathbf{A})$ must be a plane. + \end{itemize} + \end{itemize} \item \textbf{Asymptotically stable}\ if $\mathrm{Re}(p_i<0)$ \end{itemize} @@ -191,7 +200,6 @@ \subsubsection{Transmission Zeros} n_{\max}(s)=(s+3)(s+2) \end{equation*} 5. Identify zeros: $z_1=-3,\;z_2=-2$ - % TODO: We could add the calculation of the zero directions here \end{examplesection} \subsubsection{Invariant Zeros} @@ -214,6 +222,7 @@ \subsubsection{Invariant Zeros} \mathbf{u}_i \end{bmatrix}=\mathbf{0} \end{equation*} +and then either solving the linear system. \ptitle{Remarks} @@ -276,7 +285,7 @@ \subsubsection{Gilbert's Realization} Given \begin{equation*} - \mathbf{G}(s)=\frac{\mathbf{H}(s)}{d(s)}+\mathbf{D} + \mathbf{G}(s)=\frac{\mathbf{H}(s)}{d(s)}+\mathbf{D},\quad \mathbf{G}\in \mathbb{C}^{l\times m} \end{equation*} where \begin{itemize} @@ -286,8 +295,11 @@ \subsubsection{Gilbert's Realization} \newpar{} \begin{enumerate} - \item Calculate the \textbf{poles} $p_i$ of the system by determine the roots of $d(s)$. - \item If $d(s)$ has \textbf{no repeated roots}, one can use Gilbert's method (otherwise the generalized Gilbert's method has to be used). + \item Calculate $\mathbf{D}=\lim_{s\to\infty}\mathbf{G}(s)$ + \item Calculate the \textbf{poles} $p_i$ of the system by determining the roots of $d(s)$. + \begin{itemize} + \item If $d(s)$ has \textbf{no repeated roots}, one can use Gilbert's method (otherwise the generalized Gilbert's method has to be used). + \end{itemize} \item Perform a (matrix) partial fraction expansion of $\mathbf{G}(s)$ \begin{equation*} \mathbf{G}(s)=\frac{\mathbf{R_1}}{s-p_1}+\frac{\mathbf{R_2}}{s-p_2}+\ldots+\frac{\mathbf{R_{n_d}}}{s-p_{n_d}}+\mathbf{D} @@ -298,6 +310,7 @@ \subsubsection{Gilbert's Realization} \item Calculate the ranks $r_i = \text{rank}(\mathbf{R_i})$ which indicate the number of poles at location $p_i$ that are needed. \begin{itemize} \item The order of the resulting state space model will be $n=\sum_{i=1}^{n_d}r_i\geq n_d$ + \item Reminder: The row rank and column rank of a matrix are \textbf{always equal}! \end{itemize} \item Now the $\mathbf{A}$ matrix can be assembled \begin{equation*} @@ -338,30 +351,9 @@ \subsubsection{Gilbert's Realization} \begin{itemize} \item Figures out the minimum number of ``copies'' of each pole that we need to construct a realization of a MIMO transfer function. \item $\mathbf{A}$ simply contains diagonal matrices of dimension $r_i \times r_i$ with $p_i$ on their diagonals. + \item Transmission zeros and invariant zeros are equal in this (minimal) realization. \end{itemize} -% TODO: Add an example? -% \begin{examplesection}[Example of Gilbert's Realization] -% Given the TF -% \begin{equation*} -% \mathbf{G}(s) = \begin{bmatrix} -% 1 & \frac{1}{s+1} \\ -% 0 & 1 -% \end{bmatrix} -% \end{equation*} -% The LCM denominator $d(s)$ is -% \begin{equation*} -% d(s) = s+1 -% \end{equation*} -% and the feed-through term is -% \begin{equation*} -% \mathbf{D} = \mathbf{I} -% \end{equation*} -% -% \end{examplesection} - -% \subsection{Signal Amplification} - \subsection{Singular Value Decomposition (SVD)} Any(!) matrix $\mathbf{A}\in \mathbb{C}^{m\times n}$ can be decomposed as @@ -388,9 +380,9 @@ \subsection{Singular Value Decomposition (SVD)} 0 & \sigma_2 & 0 \end{bmatrix}}_{\textsf{Scaling \& Dimensions}} \underbrace{\begin{bmatrix} - \text{---} & \mathbf{v_1} & \text{---} \\ - \text{---} & \mathbf{v_2} & \text{---} \\ - \text{---} & \mathbf{v_3} & \text{---} + \text{---} & \mathbf{v_1}^H & \text{---} \\ + \text{---} & \mathbf{v_2}^H & \text{---} \\ + \text{---} & \mathbf{v_3}^H & \text{---} \end{bmatrix}}_{\textsf{Rotation}} \end{equation*} with eigenvalues @@ -404,7 +396,8 @@ \subsection{Singular Value Decomposition (SVD)} \begin{itemize} \item See Appendix:~\ref{app:mat_prop} for detailed understanding. - \item $\mathbf{U},\:\mathbf{V}$ are unitary i.e. $\mathbf{U}^H \mathbf{U}=\mathbf{U}\mathbf{U}^H=\mathbf{I}$ + \item $\mathbf{U},\:\mathbf{V}$ are unitary i.e. $\mathbf{U}^H \mathbf{U}=\mathbf{U}\mathbf{U}^H=\mathbf{I}$. + \item For real $\mathbf{A}$, $\mathbf{A}^H \mathbf{A}$ and $\mathbf{A} \mathbf{A}^H$ are \textbf{symmetric}. \item Unitary implies positive semi-definite and real, non-negative eigenvalues. \item (Upper- and lower-) diagonal matrices have their singular values (or eigenvalues) on the main diagonal. \end{itemize} @@ -433,18 +426,22 @@ \subsubsection{Procedure} \mathbf{0} & \cdots & \mathbf{0} \end{bmatrix} \end{align*} - \item calculate the eigenvectors of $\mathbf{A}^{\mathsf{H}}\mathbf{A}$. Use the same order as before and normalize the vectors if necessary. + \item calculate the eigenvectors of $\mathbf{A}^{\mathsf{H}}\mathbf{A}$ (or start with 4.\ if $m not assumed for this example: If you remove them (=0), you're getting the the same F,K/L as in the LQR/LQE chapters (both original ones and h2 synthesis) - \subsection[H-infinity Synthesis]{$\mathcal{H}_\infty$ Synthesis} $\mathcal{H}_\infty$ minimizes the worst-case input-output gain (singular values). @@ -171,7 +185,7 @@ \subsubsection{LQG} \item The expected covariances of the disturbance and sensor noise \end{enumerate} i.e.\ \textbf{optimal} w.r.t.\ time-domain specifications. \item $\mathcal{H}_2$ specifications are given in \textbf{time domain}: difficult to handle frequency-domain specifications. - \item For $\mathcal{H}_2$ design one could use a ``mixed sensitivity'' approach, and further augment the plant $P$ with frequency-dependent weigthing functions. + \item For $\mathcal{H}_2$ design one could use a ``mixed sensitivity'' approach, and further augment the plant $P$ with frequency-dependent weighting functions. \item $\mathcal{H}_\infty$ provides a more direct way to handle \textbf{frequency-domain specifications.} i.e.\ it allows to achieve a desired level of robustness to disturbances and noise (``Bode obstacles''). \end{itemize} @@ -204,7 +218,7 @@ \subsubsection{LQG} \begin{equation*} \|\mathbf{z}\|_{\mathcal{L}_2}^2-\gamma^2\|\mathbf{w}\|_{\mathcal{L}_2}^2 < 0 \end{equation*} -where we search for the smallest $\gamma$ such that the controller can achieve negative cost. We \textbf{need} $\boldsymbol{\gamma}\mathbf{\le1}$ for a stabilizing controller as otherways for larger gamma one could easily achieve negative cost even though the energy of the disturbance gets not damped at all! The formula can be understood as +where we search for the smallest $\gamma$ such that the controller can achieve negative cost. We \textbf{need} $\boldsymbol{\gamma}\mathbf{\le1}$ for a stabilizing controller as otherwise for larger gamma one could easily achieve negative cost even though the energy of the disturbance gets not damped at all! The formula can be understood as \begin{itemize} \item $\mathbf{z}$ is the performance output given some disturbance $\mathbf{w}$ \item We want to damp $\mathbf{z}$ given $\mathbf{w}$ (remember the definition of the induced $\mathcal{L}_2$ norm above) @@ -222,16 +236,16 @@ \subsubsection{LQG} \item $\begin{bmatrix}\mathbf{A}-j\omega \mathbf{I}&\mathbf{B}_w\end{bmatrix},\begin{bmatrix}\mathbf{A}^{\mathsf{H}}-j\omega \mathbf{I}&\mathbf{C}_z^{\mathsf{H}}\end{bmatrix}$ must have full row rank \end{enumerate} \newpar{} -A controller $\mathbf{K}$ fulfilling the cost inequality exists, only if +A controller $\mathbf{K}$ fulfilling the cost inequality exists, only if \begin{enumerate} \item The following ARE has a solution for $\mathbf{X}_{\infty}$ - \begin{equation*} - \mathbf{A}^{\mathsf{H}}\mathbf{X}_{\infty}+\mathbf{X}_{\infty}\mathbf{A}+\mathbf{C}_{z}^{\mathsf{H}}\mathbf{C}_{z}=\mathbf{X}_{\infty}(\mathbf{B}_{u}\mathbf{B}_{u}^{\mathsf{H}}-\gamma^{-2}\mathbf{B}_{w}\mathbf{B}_{w}^{\mathsf{H}})\mathbf{X}_{\infty} - \end{equation*} + \begin{equation*} + \mathbf{A}^{\mathsf{H}}\mathbf{X}_{\infty}+\mathbf{X}_{\infty}\mathbf{A}+\mathbf{C}_{z}^{\mathsf{H}}\mathbf{C}_{z}=\mathbf{X}_{\infty}(\mathbf{B}_{u}\mathbf{B}_{u}^{\mathsf{H}}-\gamma^{-2}\mathbf{B}_{w}\mathbf{B}_{w}^{\mathsf{H}})\mathbf{X}_{\infty} + \end{equation*} \item The following ARE has a solution for $\mathbf{Y}_{\infty}$ - \begin{equation*} - \mathbf{A}\mathbf{Y}_\infty + \mathbf{Y}_\infty \mathbf{A}^{\mathsf{H}} + \mathbf{B}_w^{\mathsf{H}} \mathbf{B}_w = \mathbf{Y}_\infty (\mathbf{C}_y \mathbf{C}_y^{\mathsf{H}} - \gamma^{-2} \mathbf{C}_z \mathbf{C}_z^{\mathsf{H}}) \mathbf{Y}_\infty - \end{equation*} + \begin{equation*} + \mathbf{A}\mathbf{Y}_\infty + \mathbf{Y}_\infty \mathbf{A}^{\mathsf{H}} + \mathbf{B}_w^{\mathsf{H}} \mathbf{B}_w = \mathbf{Y}_\infty (\mathbf{C}_y \mathbf{C}_y^{\mathsf{H}} - \gamma^{-2} \mathbf{C}_z \mathbf{C}_z^{\mathsf{H}}) \mathbf{Y}_\infty + \end{equation*} \item The matrix $\gamma^2 \mathbf{I} - \mathbf{Y}_{\infty} \mathbf{X}_{\infty}$ is positive definite \end{enumerate} @@ -250,14 +264,14 @@ \subsubsection{LQG} \begin{itemize} \item The conditions are only fulfilled if $\gamma\le 1$ \item For $\gamma>1$ use relaxed weights - \item The final controller gains are - \begin{equation*} - \mathbf{F}_u=-\mathbf{B}_u^{\mathsf{H}}\mathbf{X}_\infty,\quad \mathbf{F}_w=\frac1{\gamma^2}\mathbf{B}_w^{\mathsf{H}}\mathbf{X}_\infty - \end{equation*} - \item The final observer gain is - \begin{equation*} - \mathbf{L}=-{(\mathbf{I}-\gamma^{-2}\mathbf{Y}_\infty \mathbf{X}_\infty)}^{-1}\mathbf{Y}_\infty \mathbf{C}_y^{\mathsf{H}} - \end{equation*} + \item The final controller gains are + \begin{equation*} + \mathbf{F}_u=-\mathbf{B}_u^{\mathsf{H}}\mathbf{X}_\infty,\quad \mathbf{F}_w=\frac1{\gamma^2}\mathbf{B}_w^{\mathsf{H}}\mathbf{X}_\infty + \end{equation*} + \item The final observer gain is + \begin{equation*} + \mathbf{L}=-{(\mathbf{I}-\gamma^{-2}\mathbf{Y}_\infty \mathbf{X}_\infty)}^{-1}\mathbf{Y}_\infty \mathbf{C}_y^{\mathsf{H}} + \end{equation*} \end{itemize} \newpar{} diff --git a/src/sections/modal_decomposition.tex b/src/sections/modal_decomposition.tex index efdf9f2..cf4cf08 100644 --- a/src/sections/modal_decomposition.tex +++ b/src/sections/modal_decomposition.tex @@ -77,13 +77,25 @@ \subsubsection{Homogeneous Response} For a given initial condition $x(0)=x_0$ the homogeneous response can be computed as follows:\\ \ptitle{In Modal Coordinates} \begin{align*} - \tilde{\mathbf{x}}(t) & =e^{\tilde{\mathbf{A}}t}\tilde{\mathbf{x}}(0) \\ - \tilde{x}_i(t) & =e^{\lambda_{i}t}\tilde{x}_i(0) + \tilde{\mathbf{x}}(t) & = e^{\tilde{\mathbf{A}}t}\tilde{\mathbf{x}}(0) \\ + \tilde{x}_i(t) & = e^{\lambda_{i}t}\tilde{x}_i(0) \end{align*} -which means that each mode evolves independently (of other modes) over time.\\ +which means that +\begin{itemize} + \item each mode evolves independently (of other modes) over time. + \item $\tilde{\mathbf{x}}(t)$ is obtained elementwise from $\tilde{x}_i(t)$. +\end{itemize} + +\newpar{} \ptitle{In Standard Coordinates} + +The modes in standard coordinates are given by +\begin{equation*} + \mathbf{m}_i(t) = e^{\lambda_{i}t}\tilde{x}_i(0)\mathbf{v}_i +\end{equation*} +so that \begin{equation*} - \mathbf{x}(t)=\sum_{i=1}^{n}e^{\lambda_{i}t}\tilde{x}_i(0)\mathbf{v}_i + \mathbf{x}(t)=\sum_{i=1}^{n} \mathbf{m}_i(t) = \sum_{i=1}^{n}e^{\lambda_{i}t}\tilde{x}_i(0)\mathbf{v}_i \end{equation*} because for an initial condition $x_0 = v_i$ the time response is given by \begin{equation*} @@ -150,5 +162,5 @@ \subsubsection{Homogeneous Response} \cos(\omega t) \\ \textcolor{red}{-\omega\sin(\omega t)} \end{bmatrix} - \end{align*} + \end{align*} \end{examplesection} diff --git a/src/sections/modern_controller_synthesis.tex b/src/sections/modern_controller_synthesis.tex index 4be53c7..1fe1fbf 100644 --- a/src/sections/modern_controller_synthesis.tex +++ b/src/sections/modern_controller_synthesis.tex @@ -170,7 +170,7 @@ \subsection{State Space Representation} As $u(s)=K(s)y(s)$ the closed loop TF from the exogenous inputs $w$ to the performance outputs $z$ is given by the \textit{Linear Fractional Transformation}: \begin{equation*} - F(s) = G_{zw}(s) + G_{zu}(s)K(s){(I-G_{yu}(s))}^{-1}G_{yw}(s) + F(s) = G_{zw}(s) + G_{zu}(s)K(s){(I-G_{yu}(s) K(s))}^{-1}G_{yw}(s) \end{equation*} \textbf{MATLAB}: \texttt{F = lft(G, K)} @@ -284,9 +284,7 @@ \subsubsection{Controller Synthesis} \end{examplesection} \subsection{Youla's Q Parameterization} -% TODO: -%-Structure of this section could be optimized -%-Remark on SISO/MIMO + One can get all stabilizing controllers for a given plant as a function of a single stable transfer function Q(s). This is called the Youla parameterization (Q-parameterization). \begin{itemize} \item Extension of a full state feedback controller (including observer) with a system \textcolor{purple}{$Q$} that takes the innovation as an input. diff --git a/src/sections/mpc.tex b/src/sections/mpc.tex index 0652411..3186d51 100644 --- a/src/sections/mpc.tex +++ b/src/sections/mpc.tex @@ -58,7 +58,7 @@ \subsection{Terminal Cost}\label{mpc_terminal_cost} \end{equation*} If we ensure that the terminal cost of the next step $V(f(\mathbf{x,u}))$ is less or equal to the current terminal cost $V(\mathbf{x})$ we get a stabilizing control law. -However, the MPC control law will have better performance -- if not optimal, i.e. % TODO: Maybe explain this in more detail +However, the MPC control law will have better performance -- if not optimal, i.e. \begin{equation*} J^*_\infty(\mathbf{x}) \leq J^*_H(\mathbf{x}) \leq J^*_{CLF}(\mathbf{x}) \end{equation*} @@ -104,8 +104,6 @@ \subsubsection{Barrier Interior-Point Method} \newpar{} The problem is then solved iteratively using gradient descent or Newton method for $t=\mu^k t_0$ with $t_0>0, \mu > 1$ where $k$ is the iteration variable. The solution of the pervious step is used as a starting point for the next step. -% TODO: Add graphical example like the one drawn on the blackboard. - \newpar{} \ptitle{Remarks:} \begin{itemize} diff --git a/src/sections/nonlinear_systems.tex b/src/sections/nonlinear_systems.tex index 5464222..c45b4f4 100644 --- a/src/sections/nonlinear_systems.tex +++ b/src/sections/nonlinear_systems.tex @@ -100,7 +100,6 @@ \subsubsection{Lyapunov Functions} \begin{itemize} \item If there could be multiple equilibrium points one needs to use LaSalle. \item $V(\mathbf{x})=0$ must hold in any equilibrium point, even if there are more then one. - %TODO:: must the "only equilibrium point" from above be (0,...0)? -> I think not -> if multiple equilibria are possible, they can't all be 0 -> doesn't have to be 0 \item If one has more than one state vector, then \begin{equation*} \dot{V}=\sum_{i} \frac{\partial V(\mathbf{x}_i)}{\partial \mathbf{x}_i} \frac{\partial \mathbf{x}_i(t)}{\partial t} @@ -183,8 +182,8 @@ \subsection{Control Lyapunov Functions} \newpar{} A CLF satisfies \noindent\begin{align*} - V(\mathbf{x}) & \geq 0 & & \text{positive definite} \\ - V(\mathbf{x}) & = 0 \Leftrightarrow \mathbf{x} = 0 & & \text{radially unbounded} \\ + V(\mathbf{x}) & \geq 0 & & \text{positive definite} \\ + V(\mathbf{x}) & = 0 \Leftrightarrow \mathbf{x} = 0 & & \text{radially unbounded} \\ \frac{d}{dt} V(\mathbf{x}, \tilde{\mathbf{u}}(\mathbf{x})) & = \frac{\partial V(\mathbf{x})}{\partial \mathbf{x}} f(\mathbf{x}, \tilde{\mathbf{u}}(\mathbf{x})) \leq 0 & & \forall \mathbf{x}\neq 0 \end{align*} @@ -304,7 +303,7 @@ \subsubsection{Backstepping Control} Using $\dot{\mathbf{u}}_0=\dot{\mathbf{z}}-\dot{\mathbf{e}}$ one finally finds the stabilizing control law as \begin{align*} u_1(\mathbf{x},\mathbf{z}) = \frac{1}{g_1(\mathbf{x},\mathbf{z})} & \left(\frac{\partial \mathbf{u}_0(\mathbf{x})}{\partial \mathbf{x}}\left(f_0(\mathbf{x})+g_0(\mathbf{x})\mathbf{z}\right)-f_1(\mathbf{x},\mathbf{z}) \right. \\ - & \left. -\frac{\partial V_0(\mathbf{x})}{\partial \mathbf{x}}g_0(\mathbf{x})-k_1(\mathbf{z}-\mathbf{u}_0(\mathbf{x}))\right) + & \left. -\frac{\partial V_0(\mathbf{x})}{\partial \mathbf{x}}g_0(\mathbf{x})-k_1(\mathbf{z}-\mathbf{u}_0(\mathbf{x}))\right) \end{align*} \newpar{} @@ -323,9 +322,9 @@ \subsubsection{Backstepping Control} \paragraph{Recursive Backstepping} The same approach can be used recursively as \begin{align*} - \dot{\mathbf{x}} & =\quad f_{0}(\mathbf{x})+g_{0}(\mathbf{x})\mathbf{z}_{1}, \\ - \dot{\mathbf{z}}_{1} & =\quad f_{1}(\mathbf{x},\mathbf{z}_{1})+g_{1}(\mathbf{x},\mathbf{z}_{1})\mathbf{z}_{2}, \\ - & \vdots \\ + \dot{\mathbf{x}} & =\quad f_{0}(\mathbf{x})+g_{0}(\mathbf{x})\mathbf{z}_{1}, \\ + \dot{\mathbf{z}}_{1} & =\quad f_{1}(\mathbf{x},\mathbf{z}_{1})+g_{1}(\mathbf{x},\mathbf{z}_{1})\mathbf{z}_{2}, \\ + & \vdots \\ \dot{\mathbf{z}}_{m} & =\quad f_{m}(\mathbf{x},\mathbf{z}_{1},\ldots,\mathbf{z}_{m})+g_{1}(\mathbf{x},\mathbf{z}_{m})\mathbf{u} \end{align*} diff --git a/src/sections/observers.tex b/src/sections/observers.tex index 43face6..c8ccc6e 100644 --- a/src/sections/observers.tex +++ b/src/sections/observers.tex @@ -71,19 +71,19 @@ \subsubsection{Observer Pole Placement} $\mathbf{L}={\left[\ell_0,\cdots,\ell_{n-1}\right]}^{\mathsf{T}}$ can be obtained by comparing the eigenvalues of the observer with the desired ones: \noindent\begin{equation*} - \det\bigl(\lambda \mathbf{I} -(\mathbf{A}-\mathbf{LC})\bigr) \overset{!}{=} \varphi_{\mathrm{cl}}(\lambda) + \det\bigl(\lambda \mathbf{I} -(\mathbf{A}-\mathbf{LC})\bigr) \overset{!}{=} \varphi_{\mathrm{cl,des}}(\lambda) \end{equation*} \ptitle{Ackermann Observer Design} Similarly to state feedback we get \begin{align*} - \mathbf{L} & =\varphi_{\mathrm{cl}}(\mathbf{A})\mathbf{O}^{-1}\begin{bmatrix} - 0, & \ldots, & 0, & 1 - \end{bmatrix}^{\mathsf{T}} \\ - \varphi_{\mathrm{cl}}(s) & =s^n+\alpha_{n-1}s^{n-1}+\ldots+\alpha_0=(s-\lambda_1)\ldots(s-\lambda_n) \\ - \varphi_{\mathrm{cl}}(\mathbf{A}) & =\mathbf{A}^n+\alpha_{n-1}\mathbf{A}^{n-1}+\ldots+\alpha_0 \mathbf{I} \\ - & = (\mathbf{A}-\lambda_1 \mathbf{I})\ldots(\mathbf{A}-\lambda_n \mathbf{I}) + \mathbf{L} & =\varphi_{\mathrm{cl,des}}(\mathbf{A})\mathbf{O}^{-1}\begin{bmatrix} + 0, & \ldots, & 0, & 1 + \end{bmatrix}^{\mathsf{T}} \\ + \varphi_{\mathrm{cl,des}}(s) & =s^n+\alpha_{n-1}s^{n-1}+\ldots+\alpha_0=(s-\lambda_1)\ldots(s-\lambda_n) \\ + \varphi_{\mathrm{cl,des}}(\mathbf{A}) & =\mathbf{A}^n+\alpha_{n-1}\mathbf{A}^{n-1}+\ldots+\alpha_0 \mathbf{I} \\ + & = (\mathbf{A}-\lambda_1 \mathbf{I})\ldots(\mathbf{A}-\lambda_n \mathbf{I}) \end{align*} \textbf{Remarks:} @@ -183,25 +183,26 @@ \subsubsection{Optimal LQE Design} \end{equation*} is minimized by solving the Riccati equation (\textit{ARE}) \begin{equation*} - \mathbf{AY}+\mathbf{YA}^{\mathsf{T}}-\mathbf{YC}^{\mathsf{T}} \mathbf{R}^{-1}\mathbf{CY}+\mathbf{Q}=0 + \mathbf{AY}+\mathbf{YA}^{\mathsf{T}}-\mathbf{YC}^{\mathsf{T}} \mathbf{R}^{-1}\mathbf{CY}+\mathbf{Q}=\mathbf{0} \end{equation*} for the \textbf{positive definite} matrix $\mathbf{Y}$ and choosing the \textit{optimal estimation gain} $\mathbf{L}$ as \begin{equation*} - \mathbf{L}=\mathbf{R}^{-1}\mathbf{CY} + \mathbf{L}^{\mathsf{T}}=\mathbf{R}^{-1}\mathbf{CY} \end{equation*} \ptitle{Remarks}: \begin{itemize} - \item the system has to be detectable (check for observability) and ($\mathbf{A,Q}$) stabilizable (check for reachability)\newline + \item the system has to be detectable (check for observability) and ($\mathbf{A,Q}$) stabilizable (check for reachability, treat $\mathbf{Q}$ like $\mathbf{B}$)\newline (e.g. $n=2: \mathbf{\mathcal{R}}= \left[\mathbf{Q}\quad \mathbf{AQ}\right]$) - \item $\mathbf{Y}$ is \textit{symmetric}, \textit{positive definite} ($\mathrm{Re}(\lambda_i) > 0, \mathrm{Im}(\lambda_i) = 0$) and has to fulfil the \textit{Sylvester criterion} e.g.\ for $n=2$: + \item $\mathbf{Y}$ is \textit{real}, \textit{symmetric}, \textit{positive definite} ($\mathrm{Re}(\lambda_i) > 0, \mathrm{Im}(\lambda_i) = 0$) and has to fulfil the \textit{Sylvester criterion} e.g.\ for $n=2$: \noindent\begin{equation*} \mathbf{Y}=\begin{bmatrix} a & b \\ b & d \end{bmatrix}; \quad a>0, \quad ad-b^2>0 \end{equation*} + \item for symmetric $\mathbf{Y}$ one has that $\mathbf{Y}\mathbf{A}^{\mathsf{T}}={(\mathbf{AY})}^{\mathsf{T}}$ \item as expected from duality, $\mathbf{L}$ is the transpose of $\mathbf{K}$, obtained for the pair $(\mathbf{A}^{\mathsf{T}}, \mathbf{C}^{\mathsf{T}})$, and for weight matrices $\mathbf{Q}$ and $\mathbf{R}$. \item in practice, we can measure the noise to get a first estimate of $\mathbf{Q}$, $\mathbf{R}$ \item as a guideline, one should make the innovations $\mathbf{C}\boldsymbol{\eta}$ as white as possible diff --git a/src/sections/pole_placement.tex b/src/sections/pole_placement.tex index 763d0ec..4ef4c97 100644 --- a/src/sections/pole_placement.tex +++ b/src/sections/pole_placement.tex @@ -48,35 +48,59 @@ \subsubsection{Reachable Canonical Form} \end{align*} \subsubsection{General Case} -If the system is \textbf{controllable} but not in reachable canonical form, the following steps have to be applied +If the system is \textbf{controllable} but not in reachable canonical form, the following steps have to be applied to transform the system into reachable canonical form $\tilde{\mathbf{A}},\tilde{\mathbf{B}},\tilde{\mathbf{C}},\tilde{\mathbf{D}}$. \begin{enumerate} - \item Similarity transform of $\mathbf{A,B,C,D}$ into reachable canonical form $\mathbf{A',B',C',D'}$ with transformation matrix $\mathbf{T}$: + \item Calculate $\tilde{\mathbf{A}}$ by comparing the characteristic polynomial of $\mathbf{A}$ with the one of the parametric reachable canonical form $\tilde{\mathbf{A}}$. + \item Find transformation matrix $\mathbf{T}$ (using $\tilde{\mathbf{A}}$ from 1.\ and the known form for $\tilde{\mathbf{B}}$ to calculate $\tilde{\mathbf{R}}$): \noindent\begin{align*} - \mathbf{R}' & =\begin{bmatrix} - \mathbf{B}' & \mathbf{A}'\mathbf{B}' & \ldots & {(\mathbf{A}')}^{n-1}\mathbf{B}' - \end{bmatrix} \\ - & =\mathbf{TR} =\begin{bmatrix} - 0 & 0 & \dots & 1 \\ - \vdots & & \ddots & \\ - 0 & 1 & -a_{n-1} & \dots \\ - 1 & -a_{n-1} & a_{n-1}^2 - a_{n-2} & \dots \\ - \end{bmatrix} \\ - \mathbf{T} & = \mathbf{R'R}^{-1} + \tilde{\mathbf{R}} & =\begin{bmatrix} + \tilde{\mathbf{B}} & \tilde{\mathbf{A}}\tilde{\mathbf{B}} & \ldots & {(\tilde{\mathbf{A}})}^{n-1}\tilde{\mathbf{B}} + \end{bmatrix} \\ + & =\mathbf{TR} =\begin{bmatrix} + 0 & 0 & \dots & 1 \\ + \vdots & & \ddots & \\ + 0 & 1 & -a_{n-1} & \dots \\ + 1 & -a_{n-1} & a_{n-1}^2 - a_{n-2} & \dots \\ + \end{bmatrix} \\ + \mathbf{T} & = \tilde{\mathbf{R}}\mathbf{R}^{-1} \end{align*} - with $\mathbf{R}'$ the reachability matrix of the transformed system. Note that there are 2 different ways to calculate $\mathbf{R}'$ + with $\tilde{\mathbf{R}}$ the reachability matrix of the transformed system and $a_i$ are the coefficients of $\tilde{\mathbf{A}}$ + \item Calculate the $\tilde{\mathbf{C}}$ matrix. \item Apply method for reachable canonical form - \item Transform $\mathbf{K}'$ back to the original system: - \noindent\begin{align*} - \mathbf{K}^{\prime} & =\left[\alpha_{0}-a_{0},\quad\alpha_{1}-a_{1},\quad\ldots,\quad\alpha_{n-1}-a_{n-1}\right] \\ - \mathbf{K} & = \mathbf{K'RR}^{-1} - \end{align*} + \begin{equation*} + \mathbf{K}^{\prime} =\left[\alpha_{0}-a_{0},\quad\alpha_{1}-a_{1},\quad\ldots,\quad\alpha_{n-1}-a_{n-1}\right] + \end{equation*} + \item Transform $\tilde{\mathbf{K}}$ back to the original system: + \noindent\begin{equation*} + \mathbf{K} = \tilde{\mathbf{K}}\mathbf{RR}^{-1} + \end{equation*} which is possible if $\mathbf{R}$ is invertible (corresponds to controllability). \end{enumerate} -\textbf{Remarks}: -\begin{itemize} - \item $\mathbf{A'},\mathbf{A}$ share their eigenvalues, therefore a comparison of coefficients can be used. -\end{itemize} +\newpar{} +\ptitle{Reminder}\label{RCF} + +The reachable canonical form is given by + +\begin{equation*} + G(s)=\frac{b_{n-1}s^{n-1}+b_{n-2}s^{n-2}+\cdots+b_0}{s^n+a_{n-1}s^{n-1}+\cdots+a_0}+d\\ +\end{equation*} +and +\begin{align*} + \mathbf{A}' & =\begin{bmatrix} + 0 & 1 & 0 & 0 & \cdots & 0 \\ + 0 & 0 & 1 & 0 & \cdots & 0 \\ + \vdots & & & \ddots & & 1 \\ + -a_0 & -a_1 & & \cdots & & -a_{n-1} + \end{bmatrix}, & \mathbf{B}' =\begin{bmatrix} + 0 \\ + 0 \\ + \vdots \\ + 1\end{bmatrix} \\ + \mathbf{C}' & =\begin{bmatrix} + b_0 & b_1 & \cdots & b_{n-1} + \end{bmatrix}, & \mathbf{D}' =[d]; +\end{align*} \paragraph{Unreachable/Uncontrollable Modes} Controllability (or reachability) is a \textbf{necessary and sufficient condition} for \textbf{arbitrary} pole placement. @@ -97,13 +121,13 @@ \subsubsection{General Case} \subsubsection{Ackermann's Formula} Assuming that the system is \textbf{controllable}, Ackermann's formula can be used to calculate $\mathbf{K}$ for \textbf{both} CT and DT systems: \noindent\begin{align*} - \mathbf{K} & =\begin{bmatrix} - 0, & \ldots, & 0, & 1 - \end{bmatrix} - \mathbf{R}^{-1}\varphi_{cl}(\mathbf{A}) \\ - \varphi_{cl}(s) & =s^n+\alpha_{n-1}s^{n-1}+\ldots+\alpha_0=(s-\lambda_1)\ldots(s-\lambda_n) \\ - \varphi_{cl}(\mathbf{A}) & =\mathbf{A}^n+\alpha_{n-1}\mathbf{A}^{n-1}+\ldots+\alpha_0 \mathbf{I} \\ - & = (\mathbf{A}-\lambda_1 \mathbf{I})\ldots(\mathbf{A}-\lambda_n \mathbf{I}) + \mathbf{K} & =\begin{bmatrix} + 0, & \ldots, & 0, & 1 + \end{bmatrix} + \mathbf{R}^{-1}\varphi_{cl}(\mathbf{A}) \\ + \varphi_{cl,des}(s) & =s^n+\alpha_{n-1}s^{n-1}+\ldots+\alpha_0=(s-\lambda_1)\ldots(s-\lambda_n) \\ + \varphi_{cl,des}(\mathbf{A}) & =\mathbf{A}^n+\alpha_{n-1}\mathbf{A}^{n-1}+\ldots+\alpha_0 \mathbf{I} \\ + & = (\mathbf{A}-\lambda_1 \mathbf{I})\ldots(\mathbf{A}-\lambda_n \mathbf{I}) \end{align*} \newpar{} @@ -115,6 +139,7 @@ \subsubsection{Ackermann's Formula} \end{itemize} \subsubsection{Reference Scaling} +% TODO: This assumes D = 0, right? To ensure that the closed-loop systems follows unit steps with zero steady state error, the scaling vector $\mathbf{S}$ has to be chosen accordingly \noindent\begin{align*} G_{yr}^{cl}(s) & = G_{y\leftarrow r}^{cl}(s) =\mathbf{C}{(s\mathbf{I}-\mathbf{A}+\mathbf{BK})}^{-1}\mathbf{B}\bar{N}r, & \bar{N}=\mathbf{KS} \\ diff --git a/src/sections/reachability_observability.tex b/src/sections/reachability_observability.tex index ac6a647..054308e 100644 --- a/src/sections/reachability_observability.tex +++ b/src/sections/reachability_observability.tex @@ -22,7 +22,7 @@ \subsection{Reachability/Controllability} Mathematically expressed, a system is reachable if and only if the reachability matrix $\mathbf{R}$ has \textbf{full row rank} $n$. \begin{align*} \text{rank}(\mathbf{R}) & = n \\ - \mathbf{x} & \in \mathrm{Range}(\mathbf{R}) + \mathbf{x} & \in \mathrm{Range}(\mathbf{R}) \end{align*} \newpar{} @@ -46,7 +46,7 @@ \subsubsection{DT Systems: Controllability} \begin{itemize} \item A DT LTI system is controllable if, for any initial condition $\mathbf{x}_0$, there exists a control input that brings the state $x$ to $0$ in finite time (Note: For Reachability one has the ``opposite'' condition). \item Reachability always implies controllability and uncontrollable systems are never reachable - \item Controllability only implies reachability if $\mathbf{A}_d$ is invertible + \item Controllability only implies reachability iff $\mathbf{A}_d$ is invertible \item An unreachable DT system with non-invertible $\mathbf{A}_d$ could be controllable (Eigenvalues at 0). E.g.: \begin{itemize} \item $\mathbf{x}[k+1] = 0\mathbf{x}[k] + 0\mathbf{u}[k]$ is controllable (state goes to $0$) but unreachable as $\det(\mathbf{R})=0$. @@ -93,26 +93,14 @@ \subsection{Modal View: Stabilizability and Detectability} \ptitle{Stabilizability} \begin{itemize} \item A system is stabilizable if all \textbf{unstable modes are reachable}. - \begin{itemize} - \item I.e.\ it must be possible to bring all unstable modal components from $\mathbf{0}$ to any desired state in finite time. - \end{itemize} \item Reachability always implies stabilizability - \begin{itemize} - \item A stabilizable system can be unreachable: E.g.\ if stable modes can't be influenced from the input. - \end{itemize} \end{itemize} \newpar{} \ptitle{Detectability} \begin{itemize} \item A system is detectable if all \textbf{unstable modes are observable}. - \begin{itemize} - \item I.e.\ unstable modal behavior must be visible at the output - \end{itemize} \item Observability always implies detectability - \begin{itemize} - \item A detectable system can be unobservable: E.g.\ if stable modes don't influence the output. - \end{itemize} \end{itemize} \subsection{Kalman Decomposition} @@ -138,8 +126,8 @@ \subsection{Kalman Decomposition} \end{bmatrix} \mathbf{u} \\ \mathbf{y} & = \begin{bmatrix} - 0 & \mathbf{C}_{ro} & 0 & \mathbf{C}_{r\bar{o}} - \end{bmatrix} + 0 & \mathbf{C}_{ro} & 0 & \mathbf{C}_{\bar{r}o} + \end{bmatrix}\mathbf{x} + \mathbf{Du} \end{align*} @@ -161,7 +149,7 @@ \subsection{Kalman Decomposition} \mathbf{y} & = \begin{bmatrix} 0 & \mathbf{C}_{ro} & 0 & \mathbf{C}_{r\bar{o}} \end{bmatrix} - + \mathbf{Du} + \mathbf{x} + \mathbf{Du} \end{align*} \newpar{} @@ -176,5 +164,5 @@ \subsection{Kalman Decomposition} \ptitle{Remarks} \begin{itemize} \item In the transfer function $u \rightarrow y$ only the modes corresponding to the reachable and observable modes will appear (others will be cancelled by a zero). - \item A minimal realization of a transfer function is a state-space model that is both reachable and observable. -\end{itemize} \ No newline at end of file + \item A realization is minimal \textbf{iff} it is reachable and observable. +\end{itemize} diff --git a/src/sections/references.tex b/src/sections/references.tex deleted file mode 100644 index e74827c..0000000 --- a/src/sections/references.tex +++ /dev/null @@ -1,3 +0,0 @@ -\section{References} - -\printbibliography[heading=none]{} diff --git a/src/sections/stab_and_perf_robustness.tex b/src/sections/stab_and_perf_robustness.tex index c2e55c5..5e85cfa 100644 --- a/src/sections/stab_and_perf_robustness.tex +++ b/src/sections/stab_and_perf_robustness.tex @@ -9,6 +9,9 @@ \subsubsection{Sufficient Condition} \begin{equation*} \left\|\mathbf{G_1}\right\|_{\mathcal{H}_\infty}\cdot\left\|\mathbf{G_2}\right\|_{\mathcal{H}_\infty}<1 \end{equation*} +\begin{center} + \includegraphics[width = 0.5\linewidth]{SGT_interconn.png} +\end{center} \ptitle{Remarks} \begin{itemize} @@ -167,7 +170,7 @@ \subsection{Robust Disturbance Rejection} \item A \textbf{transfer function matrix} describing all uncertainty input-output relations \end{itemize} We model an uncertainty block by -\begin{equation*} +\begin{equation*}\label{diag_unc_block} \begin{bmatrix} w_1 \\ w_2 @@ -206,18 +209,18 @@ \subsubsection{Robustness Assessment} \newpar{} \ptitle{Remarks} \begin{itemize} - \item Note that we ignored the diagonal structure of $\boldsymbol{\Delta}$ which makes our assumptions too conservative (see SSV for less conservatively). + \item Note that we ignored the diagonal structure of $\boldsymbol{\Delta}$ which makes our assumptions too conservative (see SSV for less conservatism). \item Also note that $\mathbf{M}$ has rank 1. In this case one can calculate the SSV exactly (see below). \end{itemize} \subsection{Structured Singular Value (SSV)} The condition from the \textit{unstructured SGT} is a conservative assumption as the SGT could be applied to an arbitrary $\boldsymbol{\Delta}$. -If $\boldsymbol{\Delta}$ has \textbf{block-diagonal structure}, less conservative robustness conditions can be applied. +For example, as the $\boldsymbol{\Delta}$ from~\ref{diag_unc_block} has block-diagonal structure, less conservative robustness conditions could be applied. \newpar{} \ptitle{Definition of SSV} -The SSV is defined with respect to a \textbf{class of perturbations} $\mathbb{D}$ as the smallest $\sigma_{\max}$ making $\mathbf{M}$ singular: +The SSV is defined with respect to a \textbf{class of perturbations} $\mathbb{D}$ as the inverse value of the smallest $\sigma_{\max}$ making $\mathbf{M}$ singular: \begin{equation*} \mu(\mathbf{M}):=\frac1{\inf\{\sigma_{\max}(\boldsymbol{\Delta}):\det(1-M\boldsymbol{\Delta})=0\}},\quad\boldsymbol{\Delta}\in\mathbb{D} \end{equation*}