%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % Scientific Word Wrap/Unwrap Version 2.5 % % Scientific Word Wrap/Unwrap Version 3.0 % % % % If you are separating the files in this message by hand, you will % % need to identify the file type and place it in the appropriate % % directory. The possible types are: Document, DocAssoc, Other, % % Macro, Style, Graphic, PastedPict, and PlotPict. Extract files % % tagged as Document, DocAssoc, or Other into your TeX source file % % directory. Macro files go into your TeX macros directory. Style % % files are used by Scientific Word and do not need to be extracted. % % Graphic, PastedPict, and PlotPict files should be placed in a % % graphics directory. % % % % Graphic files need to be converted from the text format (this is % % done for e-mail compatability) to the original 8-bit binary format. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % Files included: % % % % "/document/lec_4_17_00.tex", Document, 56796, 4/6/2000, 17:47:46, ""% % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%% Start /document/lec_4_17_00.tex %%%%%%%%%%%%%%%%%%% %\newtheorem{theorem}{Theorem} %\newtheorem{axiom}[theorem]{Axiom} %\newtheorem{conjecture}[theorem]{Conjecture} %\newtheorem{corollary}[theorem]{Corollary} %\newtheorem{definition}[theorem]{Definition} %\newtheorem{example}[theorem]{Example} %\newtheorem{exercise}[theorem]{Exercise} %\newtheorem{lemma}[theorem]{Lemma} %\newtheorem{proposition}[theorem]{Proposition} %\newtheorem{remark}[theorem]{Remark} \documentclass{article} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{graphicx} \usepackage{amsmath} %TCIDATA{OutputFilter=Latex.dll} %TCIDATA{Created=Thursday, May 06, 1999 09:11:45} %TCIDATA{LastRevised=Thursday, April 06, 2000 13:47:45} %TCIDATA{} %TCIDATA{Language=American English} %TCIDATA{CSTFile=webmath.cst} %TCIDATA{PageSetup=72,72,72,72,0} %TCIDATA{AllPages= %F=36,\PARA{038

\hfill \thepage} %} \newtheorem{acknowledgement}[theorem]{Acknowledgement} \newtheorem{algorithm}[theorem]{Algorithm} \newtheorem{case}[theorem]{Case} \newtheorem{claim}[theorem]{Claim} \newtheorem{conclusion}[theorem]{Conclusion} \newtheorem{condition}[theorem]{Condition} \newtheorem{criterion}[theorem]{Criterion} \newtheorem{notation}[theorem]{Notation} \newtheorem{problem}[theorem]{Problem} \newtheorem{solution}[theorem]{Solution} \newtheorem{summary}[theorem]{Summary} \newenvironment{proof}[1][P roof]{\textbf{#1.} }{\ \rule{0.5em}{0.5em}} \input{tcilatex} \begin{document} \section{\protect\vspace{1pt}Ma 116 Lecture 4/17/00} \vspace{1pt} \section{Matrices} \subsection{The \protect\vspace{1pt}Definition of a Matrix} An $m\times n$ \emph{matrix} is a rectangular array of quantities arranged in $m$ rows and $n$ columns. (We say the matrix is of order $m$ by $n$). \vspace{1pt} Notation: Let $a_{ij}$ $\ 1\leq i\leq m\qquad 1\leq j\leq n$ be $mn$ quantities. Then the matrix associated with these $a_{ij}$'s is denoted by \vspace{1pt} \begin{center} $A=\left[ a_{ij}\right] _{m\times n}=\left[ \begin{array}{rrrrrrr} a_{11} & a_{12} & . & . & . & . & a_{1n} \\ a_{21} & a_{22} & . & . & . & . & a_{2n} \\ a_{31} & a_{32} & a_{33} & . & . & . & a_{3n} \\ . & . & . & . & . & . & . \\ . & . & . & . & . & . & . \\ a_{m1} & a_{m2} & . & . & . & . & a_{mn}% \end{array} \right] $ \end{center} The quantities $a_{ij}$ are called the \emph{elements} of the matrix $A$. Definition. Two matrices $A=\left[ a_{ij}\right] $ and $B=\left[ b_{ij}% \right] $ are said to be \emph{equal} $\Leftrightarrow $\ they contain the same number of rows and columns and $a_{ij}=b_{ij}$ \ (\textit{for all)} $% i,j $. \vspace{1pt} \subsection{Special Matrices} \vspace{1pt}There are some special matrices which should be introduced. \vspace{1pt} If $n=1$ $\Longrightarrow A=\left[ a_{n1}\right] _{m\times 1}=\left[ \begin{array}{l} a_{11} \\ a_{12} \\ . \\ . \\ a_{m1}% \end{array} \right] .\qquad $ This is called a column matrix. If $m=1\Longrightarrow A=\left[ a_{1n}\right] _{1\times n}=$ $\left[ a_{11},a_{12},...,a_{1n}\right] .$ This is called a row matrix. \vspace{1pt} Both column and row matrices are referred to as \emph{vectors}. \vspace{1pt} \vspace{1pt}When $m=n,$ the we have a square matrix \ $\left[ \begin{array}{llll} a_{11} & . & . & a_{1n} \\ . & . & . & . \\ . & . & . & . \\ a_{n1} & . & . & a_{nn}% \end{array} \right] $ \vspace{1pt} The matrix $A$ with every element zero is called the \emph{zero} matrix. We will write $A=0$. The identity matrix may be defined as follows: let \ $\delta _{ij}=\left\{ \begin{array}{c} 0\qquad i\neq j \\ 1\text{ \ \ \ \ \ }i=j% \end{array} \right. $ The square matrix $I=\left[ \delta _{ij}\right] _{n\times n}$ is known as the \emph{identity} matrix. \vspace{1pt} \begin{center} \vspace{1pt}% \begin{equation*} I=\left[ \begin{array}{llll} 1 & 0 & . & 0 \\ 0 & 1 & 0 & 0 \\ . & & & 0 \\ 0 & 0 & . & 1% \end{array}% \right] \end{equation*} \vspace{1pt} \end{center} Thus the identity matrix is the matrix with $1^{\prime }s$ along its diagonal and $0^{\prime }s$ everywhere else. \vspace{1pt} \subsection{Operations on Matrices} \vspace{1pt} \paragraph{Addition:} \vspace{1pt} Let $A=\left[ a_{ij}\right] _{m\times n}$ and $B=\left[ b_{ij}\right] _{m\times n}.$ Then \begin{center} \qquad \qquad\ \ \ \begin{equation*} A+B=\left[ a_{ij}+b_{ij}\right] _{m\times n} \end{equation*} \end{center} \vspace{1pt} Thus $A+B$ is a matrix of order $m\times n$ whose $i,j$ entry is $% a_{ij}+b_{ij}$. \vspace{1pt} \paragraph{Example:} $A=\left[ \begin{array}{lll} 1 & -2 & 3 \\ 0 & -1 & 6% \end{array} \right] \qquad B=\left[ \begin{array}{lll} 6 & 4 & 7 \\ -1 & -2 & -6% \end{array} \right] $ \vspace{1pt} \qquad \qquad \begin{equation*} A+B=\left[ \begin{array}{lll} 1+6 & -2+4 & 3+7 \\ 0-1 & -1-2 & 6-6% \end{array}% \right] =\left[ \begin{array}{lll} 7 & 2 & 10 \\ -1 & -3 & 0% \end{array}% \right] \end{equation*} \paragraph{Example:} We can use SNB to add matrices. Thus \vspace{1pt} $\left[ \begin{array}{lll} 1 & -2 & 3 \\ 0 & -1 & 6% \end{array} \right] +\left[ \begin{array}{lll} 6 & 4 & 7 \\ -1 & -2 & -6% \end{array} \right] =\allowbreak \left[ \begin{array}{ccc} 7 & 2 & 10 \\ -1 & -3 & 0% \end{array} \right] $ \vspace{1pt} \subsubsection{Subtraction:} $\vspace{1pt}$ Let $A=\left[ a_{ij}\right] _{m\times n}$ and $B=\left[ b_{ij}\right] _{m\times n}.$ Then $A-B$ is the matrix defined by \qquad \qquad\ \ \ \ \ \ \ \ \ \begin{center} $A-B=\left[ a_{ij}-b_{ij}\right] _{m\times n}$ \end{center} \vspace{1pt} \vspace{1pt}Note: One can add and subtract only matrices of the same order. Such matrices are called \emph{conformable}. \vspace{1pt} \subsection{Scalar Multiplication} \vspace{1pt} Let $k$ be a scalar and $A$ a matrix of real numbers of order $m\times n$. Then $\vspace{1pt}$ \begin{center} \begin{equation*} kA=\left[ k\cdot a_{ij}\right] _{m\times n} \end{equation*} \end{center} \vspace{1pt} Example: \begin{equation*} 5\left[ \begin{array}{cccc} -1 & 0 & 5 & 7 \\ 2 & -8 & 4 & 22 \\ -7 & 1 & 0 & 6 \\ 8 & 3 & -3 & 4% \end{array}% \right] =\allowbreak \left[ \begin{array}{cccc} -5 & 0 & 25 & 35 \\ 10 & -40 & 20 & 110 \\ -35 & 5 & 0 & 30 \\ 40 & 15 & -15 & 20% \end{array}% \right] \end{equation*} \subsection{Some Properties of Addition and Scalar Multiplication} We now list the basic properties of vector addition and scalar multiplication. \paragraph{Theorem} Let $A,$ $B$ and $C$ be conformable $m\times n$ matrices whose entries are real numbers, and $k$ and $p$ arbitrary scalars. Then $1.$ $\ A+B=B+A$. $2.$ \ $A+\left( B+C\right) =\left( A+B\right) +C$ $3.$ \ There is an $m\times n$ matrix $0$ such that $0+A=A$ for each $A.$ $4.$ \ For each $A$ there is an $m\times n$ matrix $-A$ such that $A+\left( -A\right) =0.$ $5.$ \ $k\left( A+B\right) =kA+kB$ $6.$ \ $\left( k+p\right) A=kA+pA$ $7.$ \ $\left( kp\right) A=k\left( pA\right) .$ \paragraph{Proof:} $\left( 1\right) $ $\ A+B=\left[ a_{ij}\right] _{m\times n}+\left[ b_{ij}% \right] _{m\times n}=\left[ a_{ij}+b_{ij}\right] _{m\times n}$ \qquad \qquad $\qquad \qquad \qquad $ $\qquad \qquad \qquad \qquad \qquad =\left[ b_{ij}+a_{ij}\right] _{m\times n} $ \qquad \qquad \qquad commutativity of real numbers. \vspace{1pt} \qquad \qquad \qquad $\qquad \qquad =B+A$ $\left( 4\right) $ \ Note that $\left( -1\right) A=\left[ -a_{ij}\right] _{m\times n}\qquad \Longrightarrow A+\left( -1\right) A=0_{m\times n}$ \vspace{1pt} Remark: We denote $\left( -1\right) A$ by $-A$. \vspace{1pt} \subsection{The Transpose of a Matrix} If $A$ is an $m\times n$ matrix, the \emph{transpose} of $A$, denoted $A^{T}$% , is the $n\times m$ matrix whose entry $a_{st}$ is the same as the entry $% a_{ts}$ in the matrix $A$. Thus one gets the transpose of $A$ by interchanging the rows and the columns of $A.$ \vspace{1pt} \paragraph{Example:} $\left[ \begin{array}{ccc} 1 & 0 & -1 \\ 2 & 3 & -2 \\ 4 & 10 & 9% \end{array} \right] ^{T}=\allowbreak \left[ \begin{array}{ccc} 1 & 2 & 4 \\ 0 & 3 & 10 \\ -1 & -2 & 9% \end{array} \right] $ We note the following: \begin{itemize} \item $\left( A^{T}\right) ^{T}=A$. \item $\left( A+B\right) ^{T}=A^{T}+B^{T}$. \item For any scalar $r$, $\left( rA\right) ^{T}=rA^{T}$. \item If $A$ is a diagonal matrix, then $A=A^{T}$. \item A square matrix is said to be \emph{symmetric} if $A^{T}=A$ and \emph{% skew-symmetric} if $A^{T}=-A$. \end{itemize} \paragraph{\protect\vspace{1pt}Example:} $\left[ \begin{array}{ccc} 1 & 2 & 3 \\ 2 & -1 & 4 \\ 3 & 4 & 0% \end{array} \right] $ is symmetric and $\left[ \begin{array}{ccc} 0 & 2 & 3 \\ -2 & 0 & 4 \\ -3 & -4 & 0% \end{array} \right] $ is skew-symmetric. \subsubsection{\protect\vspace{1pt}} \subsection{Multiplication of Matrices} \qquad \qquad Consider a system of $m$ equations in $n$ unknowns \qquad \qquad \begin{center} \begin{eqnarray*} a_{11}x_{1}+a_{12}x_{2}+\cdots +a_{1n}x_{n} &=&d_{1} \\ a_{21}x_{1}+a_{22}x_{2}+\cdots +a_{2n}x_{n} &=&d_{2} \\ \cdots &=&\cdots \qquad \\ \cdots &=&\cdots \\ \ddots &=&\ddots \\ a_{m1}x_{1}+a_{m2}x_{2}+\cdots +a_{mn}x_{n} &=&d_{m} \end{eqnarray*} \vspace{1pt} \end{center} Here the $a_{ij}$ and $d_{i}$ are given scalars and the $x_{j}$ are the unknowns. \begin{center} \qquad \qquad \qquad \qquad \end{center} If we let $A=\left[ a_{ij}\right] _{m\times n}$\ , \vspace{1pt} \begin{center} $X=\left[ \begin{array}{l} x_{1} \\ \vdots \\ \vdots \\ x_{n}% \end{array} \right] _{n\times 1},\qquad $and$\qquad D=\left[ \begin{array}{l} d_{1} \\ \vdots \\ \vdots \\ d_{m}% \end{array} \right] _{m\times 1}$ \end{center} \vspace{1pt} \qquad \qquad \qquad \qquad $\qquad \qquad \qquad \qquad \qquad $ Then it is natural to write $AX=D$ to represent the system above. Hence we want \vspace{1pt} \begin{center} $\left[ \begin{array}{lllll} a_{11} & \cdots & \cdots & \cdots & a_{1m} \\ a_{21} & \cdots & \cdots & \cdots & a_{2m} \\ \cdots & \cdots & \cdots & \ddots & \vdots \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ a_{m1} & \cdots & \cdots & \cdots & a_{mn}% \end{array} \right] \left[ \begin{array}{l} x_{1} \\ x_{2} \\ \vdots \\ \vdots \\ x_{n}% \end{array} \right] =\left[ \begin{array}{l} d_{1} \\ d_{2} \\ \vdots \\ \vdots \\ d_{m}% \end{array} \right] $ \end{center} \vspace{1pt} to be the same as our system above. $\Longrightarrow $ that multiplication should be defined by \vspace{1pt} \begin{center} \qquad \qquad $d_{i}=\sum\limits_{j=1}^{n}a_{ij}x_{j}$ \end{center} Notice that $A$ is $m\times n$ and $X$ is $n\times 1$ and $D$ is $m\times 1$% . Thus to multiply two matrices we must have the number of columns of the first matrix equal to the number of rows of the second matrix. To multiply two matrices $A$ and $B$ together, where $B$ is not a column matrix, we extrapolate as follows: \vspace{1pt} \vspace{1pt} \begin{center} $\left[ \begin{array}{lllll} a_{11} & \cdots & \cdots & \cdots & a_{1m} \\ a_{21} & \cdots & \cdots & \cdots & a_{2m} \\ \cdots & \cdots & \cdots & \ddots & \vdots \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ a_{m1} & \cdots & \cdots & \cdots & a_{mn}% \end{array} \right] _{m\times n}\left[ \begin{array}{lllll} b_{11} & \cdots & \cdots & \cdots & b_{1p} \\ b_{21} & \cdots & \cdots & \cdots & b_{2p} \\ \cdots & \cdots & \cdots & \ddots & \vdots \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ b_{n1} & \cdots & \cdots & \cdots & b_{np}% \end{array} \right] _{n\times p}=\left[ \begin{array}{lllll} c_{11} & \cdots & \cdots & \cdots & c_{1p} \\ c_{21} & \cdots & \cdots & \cdots & c_{2p} \\ \cdots & \cdots & \cdots & \ddots & \vdots \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ c_{m1} & \cdots & \cdots & \cdots & c_{mp}% \end{array} \right] _{m\times p}$ \vspace{1pt} \end{center} We see that \vspace{1pt} \begin{equation*} c_{11}=\sum\limits_{k=1}^{n}a_{1k}b_{k1} \end{equation*} \vspace{1pt}and in general that \begin{center} \begin{equation*} c_{ij}=\sum\limits_{k=1}^{n}a_{ik}b_{kj} \end{equation*} \end{center} \vspace{1pt} Definition. Let $A=\left[ a_{ij}\right] _{m\times n}$ and $B=\left[ b_{ij}% \right] _{n\times p}$ be matrices. Then $A$ $B$ is the $m\times p$ matrix $% C, $ where \vspace{1pt} \begin{center} \vspace{1pt}\qquad \qquad \begin{equation*} C=\left[ c_{ij}\right] _{m\times p}=\left[ \sum\limits_{k=1}^{n}a_{ik}b_{kj}% \right] _{m\times p} \end{equation*} \end{center} \vspace{1pt} Remark. $A$ $B\neq B$ $\,A$ necessarily. \vspace{1pt} In fact $B$ $A$ need not be defined. For example if $A$ is $2\times 3$ and \ $B$ is $3\times 4$, then $A$ $B$ will be $2\times 4$ whereas $B$ $A$ is not defined. \vspace{1pt} \vspace{1pt} \paragraph{Example:} $\left[ \begin{array}{lll} 1 & -1 & 0 \\ 4 & 1 & -1% \end{array} \right] _{2\times 3}\times \left[ \begin{array}{ll} 3 & 4 \\ -1 & -5 \\ 1 & 2% \end{array} \right] _{3\times 2}=\left[ \begin{array}{cc} \left( 1\right) \left( 3\right) +(-1)\left( -1\right) +\left( 0\right) \left( 1\right) & \left( 1\right) \left( 4\right) +\left( -1\right) \left( -5\right) +\left( 0\right) \left( 2\right) \\ \left( 4\right) \left( 3\right) +\left( 1\right) \left( -1\right) +\left( -1\right) \left( 1\right) & \left( 4\right) \left( 4\right) +\left( 1\right) \left( -5\right) +\left( -1\right) \left( 2\right)% \end{array} \right] _{2\times 2}$ $\qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad =% \left[ \begin{array}{ll} 4 & 9 \\ 10 & 9% \end{array} \right] _{2\times 2}$ \vspace{1pt} \qquad \qquad \qquad $\qquad \qquad \qquad $ \vspace{1pt} $\left[ \begin{array}{ll} 3 & 4 \\ -1 & -5 \\ 1 & 2% \end{array} \right] _{3\times 2}\times \left[ \begin{array}{lll} 1 & -1 & 0 \\ 4 & 1 & -1% \end{array} \right] _{2\times 3}=\left[ \begin{array}{lll} 19 & 1 & -4 \\ -21 & -4 & 5 \\ 9 & 1 & -2% \end{array} \right] _{3\times 3}$ \vspace{1pt} Note that using Evaluate in SNB gives the same result. \begin{center} \vspace{1pt} $\left[ \begin{array}{ll} 3 & 4 \\ -1 & -5 \\ 1 & 2% \end{array} \right] \left[ \begin{array}{lll} 1 & -1 & 0 \\ 4 & 1 & -1% \end{array} \right] =\allowbreak \left[ \begin{array}{ccc} 19 & 1 & -4 \\ -21 & -4 & 5 \\ 9 & 1 & -2% \end{array} \right] $ \end{center} \qquad \qquad \qquad $\qquad \qquad \qquad \qquad $ The following occur often for matrices. \vspace{1pt} \begin{enumerate} \item $A$ $B\neq B$ $A$ \end{enumerate} \paragraph{Example:} $\left[ \begin{array}{cc} 1 & 0 \\ 0 & 0% \end{array} \right] \left[ \begin{array}{cc} 0 & 1 \\ 0 & 0% \end{array} \right] =\left[ \begin{array}{cc} 0 & 1 \\ 0 & 0% \end{array} \right] \neq \left[ \begin{array}{cc} 0 & 1 \\ 0 & 0% \end{array} \right] \left[ \begin{array}{cc} 1 & 0 \\ 0 & 0% \end{array} \right] $ $=$ $\left[ \begin{array}{cc} 0 & 0 \\ 0 & 0% \end{array} \right] $ \begin{enumerate} \item[2.] $A$ $B=0$ but neither $A=0$ or $B=0$ \end{enumerate} \paragraph{Example:} $\left[ \begin{array}{cc} 0 & 1 \\ 0 & 0% \end{array} \right] \left[ \begin{array}{cc} 1 & 0 \\ 0 & 0% \end{array} \right] $ $=$ $\left[ \begin{array}{cc} 0 & 0 \\ 0 & 0% \end{array} \right] $ \begin{enumerate} \item[3.] $A$ $B=A$ $C$\ \ but $B\neq C$ \end{enumerate} \vspace{1pt} $\left[ \begin{array}{cc} 0 & 1 \\ 0 & 0% \end{array} \right] \left[ \begin{array}{cc} 1 & 0 \\ 0 & 0% \end{array} \right] =\left[ \begin{array}{cc} 0 & 1 \\ 0 & 0% \end{array} \right] \left[ \begin{array}{cc} 5 & 0 \\ 0 & 0% \end{array} \right] =\left[ \begin{array}{cc} 0 & 0 \\ 0 & 0% \end{array} \right] $ \vspace{1pt} \paragraph{Theorem} \vspace{1pt}Assume that $k$ is an arbitrary scalar, and that $A,$ $B,$ $C$ and $I$ are matrices of sizes such that the indicated operations can be performed. Then \vspace{1pt} $1.$ $IA=A,\qquad BI=B$ \vspace{1pt} $2.$ $A\left( BC\right) =\left( AB\right) C$ \vspace{1pt} $3.$ \ $A\left( B+C\right) =AB+AC,\qquad A\left( B-C\right) =AB-AC$ \vspace{1pt} $4.$ \ $\left( B+C\right) A=BA+CA,\qquad \left( B-C\right) A=BA-CA$ \vspace{1pt} $5.$ \ $k\left( AB\right) =\left( kA\right) B=A\left( kB\right) $ \vspace{1pt} $6.$ \ $\left( AB\right) ^{T}=B^{T}A^{T}.$ \vspace{1pt} \paragraph{Proof of 6} \vspace{1pt} Let $A=\left[ a_{ij}\right] _{m\times n}$ and $B=\left[ b_{ij}\right] _{n\times p}$. Then $A^{T}=\left[ a_{ij}^{\prime }\right] _{n\times m}$ and $% B^{T}=\left[ b_{ij}^{\prime }\right] _{p\times n},$ where $a_{ij}^{\prime }=a_{ji}$ and $b_{ij}^{\prime }=b_{ji}.$ The $\left( i,j\right) $ entry of $% B^{T}A^{T}$ is \vspace{1pt} \begin{equation*} \sum_{k=1}^{n}b_{ik}^{\prime }a_{kj}^{\prime }=\sum_{k=1}^{n}b_{ki}a_{jk}=\sum_{k=1}^{n}a_{jk}b_{ki} \end{equation*} This last term is the $\left( j,i\right) $ entry of $AB$ which means that it is the $\left( i,j\right) $ entry of $\left( AB\right) ^{T}.$ \vspace{1pt} \subsection{Operations with the Identity Matrix} \vspace{1pt} Consider the $3\times 3$ identity matrix \begin{equation*} I=\left[ \begin{array}{ccc} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1% \end{array} \right] \end{equation*} \vspace{1pt} and the matrix \begin{equation*} A=\left[ \begin{array}{cccc} 4 & 1 & 2 & 1 \\ 3 & 0 & 1 & 6 \\ 5 & 7 & 9 & 8% \end{array} \right] \end{equation*} \vspace{1pt} Note the following: \vspace{1pt} \begin{center} $IA=\allowbreak \left[ \begin{array}{cccc} 4 & 1 & 2 & 1 \\ 3 & 0 & 1 & 6 \\ 5 & 7 & 9 & 8% \end{array} \right] $ \vspace{1pt} $\left[ \begin{array}{ccc} 1 & 0 & 0 \\ 0 & 0 & 1 \\ 0 & 1 & 0% \end{array} \right] \left[ \begin{array}{cccc} 4 & 1 & 2 & 1 \\ 3 & 0 & 1 & 6 \\ 5 & 7 & 9 & 8% \end{array} \right] =\allowbreak \left[ \begin{array}{cccc} 4 & 1 & 2 & 1 \\ 5 & 7 & 9 & 8 \\ 3 & 0 & 1 & 6% \end{array} \right] \qquad $interchanged rows $2$ and $3$ \vspace{1pt} $\left[ \begin{array}{ccc} 2 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1% \end{array} \right] \left[ \begin{array}{cccc} 4 & 1 & 2 & 1 \\ 3 & 0 & 1 & 6 \\ 5 & 7 & 9 & 8% \end{array} \right] =\allowbreak \left[ \begin{array}{cccc} 8 & 2 & 4 & 2 \\ 3 & 0 & 1 & 6 \\ 5 & 7 & 9 & 8% \end{array} \right] \qquad $multiplied row $1$ by $2$ \vspace{1pt} $\left[ \begin{array}{ccc} 1 & 0 & 0 \\ 0 & 1 & 0 \\ -\frac{5}{4} & 0 & 1% \end{array} \right] \left[ \begin{array}{cccc} 4 & 1 & 2 & 1 \\ 3 & 0 & 1 & 6 \\ 5 & 7 & 9 & 8% \end{array} \right] =\allowbreak \left[ \begin{array}{cccc} 4 & 1 & 2 & 1 \\ 3 & 0 & 1 & 6 \\ 0 & \frac{23}{4} & \frac{13}{2} & \frac{27}{4}% \end{array} \right] \qquad $got a $0$ in row $3$ first column \vspace{1pt} \end{center} A matrix that is obtained from the identity matrix by interchanging two rows or by multiplying one row of the identity matrix by a nonzero constant is called an \emph{elementary matrix.} \subsection{\protect\vspace{1pt}Systems of Equations: Elimination Using Matrices I} \paragraph{\protect\vspace{1pt}Example:} Solve the system \begin{eqnarray*} x_{1}+x_{2}+2x_{3}+x_{4} &=&5 \\ 2x_{1}+3x_{2}-x_{3}-2x_{4} &=&2 \\ 4x_{1}+5x_{2}+2x_{3} &=&7 \end{eqnarray*} \vspace{1pt} \begin{center} $ \begin{array}{c} x_{1}+x_{2}+2x_{3}+x_{4}=5 \\ 2x_{1}+3x_{2}-x_{3}-2x_{4}=2 \\ 4x_{1}+5x_{2}+2x_{3}=7% \end{array} \qquad \left[ \begin{array}{lllll} 1 & 1 & 2 & 1 & 5 \\ 2 & 3 & -1 & -2 & 2 \\ 4 & 5 & 2 & 0 & 7% \end{array} \right] $ \vspace{1pt} \end{center} The matrix on the right that we have associated with the given system is called the \emph{augmented matrix }of the system.\emph{\ }The matrix \vspace{1pt} \begin{center} $A=\left[ \begin{array}{llll} 1 & 1 & 2 & 1 \\ 2 & 3 & -1 & -2 \\ 4 & 5 & 2 & 0% \end{array} \right] $ \end{center} \vspace{1pt}is called the \emph{coefficient matrix }of the system, and $C=% \left[ \begin{array}{l} 5 \\ 2 \\ 7% \end{array} \right] $ is called the \emph{constant matrix (vector)} of the system. It is clear that we can rewrite our system as \begin{equation*} AX=C \end{equation*} \vspace{1pt} where $X=\left[ \begin{array}{c} x_{1} \\ x_{2} \\ x_{3} \\ x_{4}% \end{array} \right] .$ \begin{center} \vspace{1pt} $ \begin{array}{c} x_{1}+x_{2}+2x_{3}+x_{4}=5 \\ 0+x_{2}-5x_{3}-4x_{4}=-8 \\ 0+x_{2}-6x_{3}-4x_{4}=-13% \end{array} \longleftrightarrow \left[ \begin{array}{lllll} 1 & 1 & 2 & 1 & 5 \\ 0 & 1 & -5 & -4 & -8 \\ 0 & 1 & -6 & -4 & -13% \end{array} \right] $ \end{center} $\qquad \qquad $ \begin{center} $ \begin{array}{c} x_{1}+0x_{2}+7x_{3}+5x_{4}=13 \\ 0+x_{2}-5x_{3}-4x_{4}=-8 \\ 0+0-x_{3}=-5% \end{array} \longleftrightarrow \left[ \begin{array}{lllll} 1 & 0 & 7 & 5 & 13 \\ 0 & 1 & -5 & -4 & -8 \\ 0 & 0 & -1 & 0 & -5% \end{array} \right] $ \end{center} \vspace{1pt} \begin{center} \qquad $ \begin{array}{c} x_{1}+0x_{2}+7x_{3}+5x_{4}=13 \\ 0+x_{2}-5x_{3}-4x_{4}=-8 \\ 0+0+x_{3}=5% \end{array} \longleftrightarrow \left[ \begin{array}{lllll} 1 & 0 & 0 & 5 & -22 \\ 0 & 1 & 0 & -4 & 17 \\ 0 & 0 & 1 & 0 & 5% \end{array} \right] $ \end{center} $\qquad $ Thus $x_{3}=5,\qquad x_{1}+5x_{4}=-22,\qquad x_{2}-4x_{4}=17$ or $x_{3}=5,\qquad x_{4}=t\qquad x_{1}=-5t-22\qquad x_{2}=4t+17$ \vspace{1pt} Note that we have an infinite number of solutions. However, if our operations had led to \begin{center} \vspace{1pt}$\left[ \begin{array}{lllll} 1 & 0 & 0 & 5 & -22 \\ 0 & 1 & 0 & -4 & 17 \\ \mathtt{0} & \mathtt{0} & \mathtt{0} & \mathtt{0} & \mathtt{5}% \end{array} \right] ,$ \vspace{1pt} \end{center} then there would not be any solution to the system, since the last row of the matrix would imply \vspace{1pt} \begin{equation*} 0x_{1}+0x_{2}+0x_{3}+0x_{4}=5 \end{equation*} \vspace{1pt} which is clearly impossible. \vspace{1pt} Definition: Systems of linear equations that have no solution are called \emph{inconsistent systems}; systems that have at least one solution are said to be \emph{consistent.} \vspace{1pt} \subsection{Systems of Equations: Elimination Using Matrices II} Note that the operations that were done on the matrix \begin{center} $\left[ \begin{array}{lllll} 1 & 1 & 2 & 1 & 5 \\ 2 & 3 & -1 & -2 & 2 \\ 4 & 5 & 2 & 0 & 7% \end{array} \right] $ \vspace{1pt} \end{center} associated with the system \begin{center} \vspace{1pt}$ \begin{array}{c} x_{1}+x_{2}+2x_{3}+x_{4}=5 \\ 2x_{1}+3x_{2}-x_{3}-2x_{4}=2 \\ 4x_{1}+5x_{2}+2x_{3}=7% \end{array} $ \end{center} that was considered in the previous section are equivalent to multiplying the matrix by a ``modified'' $3\times 3$ identity matrix, i.e., an appropriate elementary matrix. Thus \vspace{1pt} \paragraph{Example:} $\left[ \begin{array}{ccc} 1 & 0 & 0 \\ -2 & 1 & 0 \\ 0 & 0 & 1% \end{array} \right] \left[ \begin{array}{lllll} 1 & 1 & 2 & 1 & 5 \\ 2 & 3 & -1 & -2 & 2 \\ 4 & 5 & 2 & 0 & 7% \end{array} \right] =\allowbreak \left[ \begin{array}{ccccc} 1 & 1 & 2 & 1 & 5 \\ 0 & 1 & -5 & -4 & -8 \\ 4 & 5 & 2 & 0 & 7% \end{array} \right] $ \vspace{1pt} $\left[ \begin{array}{ccc} 1 & 0 & 0 \\ 0 & 1 & 0 \\ -4 & 0 & 1% \end{array} \right] \left[ \begin{array}{ccccc} 1 & 1 & 2 & 1 & 5 \\ 0 & 1 & -5 & -4 & -8 \\ 4 & 5 & 2 & 0 & 7% \end{array} \right] =\allowbreak \left[ \begin{array}{ccccc} 1 & 1 & 2 & 1 & 5 \\ 0 & 1 & -5 & -4 & -8 \\ 0 & 1 & -6 & -4 & -13% \end{array} \right] $ \vspace{1pt} Note also that $\left[ \begin{array}{ccc} 0 & 0 & 1 \\ 0 & 1 & 0 \\ 1 & 0 & 0% \end{array} \right] \left[ \begin{array}{lllll} 1 & 1 & 2 & 1 & 5 \\ 2 & 3 & -1 & -2 & 2 \\ 4 & 5 & 2 & 0 & 7% \end{array} \right] =\allowbreak \left[ \begin{array}{ccccc} 4 & 5 & 2 & 0 & 7 \\ 2 & 3 & -1 & -2 & 2 \\ 1 & 1 & 2 & 1 & 5% \end{array} \right] ,$ \vspace{1pt} whereas as \vspace{1pt} $\left[ \begin{array}{ccc} 1 & 0 & 0 \\ 0 & -2 & 0 \\ 0 & 0 & 1% \end{array} \right] \left[ \begin{array}{lllll} 1 & 1 & 2 & 1 & 5 \\ 2 & 3 & -1 & -2 & 2 \\ 4 & 5 & 2 & 0 & 7% \end{array} \right] =\allowbreak \left[ \begin{array}{ccccc} 1 & 1 & 2 & 1 & 5 \\ -4 & -6 & 2 & 4 & -4 \\ 4 & 5 & 2 & 0 & 7% \end{array} \right] $ \vspace{1pt} Thus we see that by multiplying the given matrix by various modifications of the identity matrix, we can make certain elements in the matrix a $0$, interchange two rows, and multiply a given row by a scalar. \subsection{\protect\vspace{1pt}Elementary Row Operations On Matrices I} \subsubsection{Equivalent Systems} Two linear systems are \textbf{equivalent }if they have the same solutions. \paragraph{Three Elementary Operations} Three basic \emph{elementary }operations are used to transform systems to equivalent systems. These are: \begin{enumerate} \item Interchanging the order of the equations in the system. \item Multiplying any equation by a nonzero constant. \item Replacing any equation in the system by its sum with a nonzero constant multiple of any other equation in the system (elimination step). \end{enumerate} \paragraph{Theorem:} Suppose that an elementary row is performed on a system of linear equations. Then the resulting system has the same set of solutions as the original, so the two systems are equivalent. \vspace{1pt} Operating on the rows of a matrix is equivalent to operating on equations. The row operations that are allowed are the same as the \hyperref{basic operations}{}{}{MA01_02.tex#basic operations} on linear systems of equations: \begin{enumerate} \item Interchanging the rows. \item Multiplying any row by a nonzero constant. \item Replacing any row by its sum with a nonzero constant multiple of any other row. (Add a multiple of one row to a different row.) \end{enumerate} \vspace{1pt} \paragraph{\protect\vspace{1pt}Example:} Find all solutions to the following system of equations \begin{eqnarray*} 3x+4y+z &=&1 \\ 2x+3y\text{ \ \ \ } &=&0 \\ 4x+3y-z &=&-2 \end{eqnarray*} The augmented matrix is \begin{equation*} \left[ \begin{array}{cccc} 3 & 4 & 1 & 1 \\ 2 & 3 & 0 & 0 \\ 4 & 3 & -1 & -2% \end{array} \right] \end{equation*} We could get a $1$ in the first row and first column by multiplying row $1$ $% \left( R_{1}\right) $ by $\frac{1}{3}.$ However, we can get a $1$ in this spot without obtaining fractions by subtracting row $2$ from row $1.$ Doing this we get \vspace{1pt} \begin{equation*} \left[ \begin{array}{cccc} 1 & 1 & 1 & 1 \\ 2 & 3 & 0 & 0 \\ 4 & 3 & -1 & -2% \end{array} \right] \end{equation*} We now use the $1$ that we have in the first row and first column to get zeroes below it. Hence we multiply row $1$ by $-2$ and add it to row $2.$We also multiply row $1$ by $-4$ and add it to row $3.$ we get \begin{equation*} \left[ \begin{array}{cccc} 1 & 1 & 1 & 1 \\ 0 & 1 & -2 & -2 \\ 0 & -1 & -5 & -6% \end{array} \right] \end{equation*} We now use the $1$ in the second row and second column to get zeroes above and below it. We get \vspace{1pt} \begin{equation*} \left[ \begin{array}{cccc} 1 & 0 & 3 & 3 \\ 0 & 1 & -2 & -2 \\ 0 & 0 & -7 & -8% \end{array} \right] \end{equation*} \vspace{1pt} We may now get a $1$ in the third row and third column by multiplying the third row by $-\frac{1}{7}.$ Doing this yields \vspace{1pt} \begin{equation*} \left[ \begin{array}{cccc} 1 & 0 & 3 & 3 \\ 0 & 1 & -2 & -2 \\ 0 & 0 & 1 & \frac{8}{7}% \end{array} \right] \end{equation*} \vspace{1pt} Now we can use the one in the last row to get zeroes for the entries above it. Doing this we get \vspace{1pt} \begin{equation*} \left[ \begin{array}{cccc} 1 & 0 & 0 & -\frac{3}{7} \\ 0 & 1 & 0 & \frac{2}{7} \\ 0 & 0 & 1 & \frac{8}{7}% \end{array} \right] \end{equation*} \vspace{1pt} Clearly the solution is $x=-\frac{3}{7},y=\frac{2}{7},z=\frac{8}{7}.$ \subsection{\protect\vspace{1pt}Elementary Row Operations On Matrices II} We may use ``variations'' of the $3\times 3$ identity matrix, i.e. multiplication by matrices, to perform elementary row operations. We do this for the matrix just considered. \paragraph{Example:} We perform the same operations on the matrix $\left[ \begin{array}{cccc} 3 & 4 & 1 & 1 \\ 2 & 3 & 0 & 0 \\ 4 & 3 & -1 & -2% \end{array}% \right] $ that we did using elementary row operations using matrices. $\left[ \begin{array}{ccc} 1 & -1 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1% \end{array}% \right] \left[ \begin{array}{cccc} 3 & 4 & 1 & 1 \\ 2 & 3 & 0 & 0 \\ 4 & 3 & -1 & -2% \end{array}% \right] =\allowbreak \left[ \begin{array}{cccc} 1 & 1 & 1 & 1 \\ 2 & 3 & 0 & 0 \\ 4 & 3 & -1 & -2% \end{array}% \right] \qquad -R_{2}+R_{1}$ $\left[ \begin{array}{ccc} 1 & 0 & 0 \\ -2 & 1 & 0 \\ -4 & 0 & 1% \end{array}% \right] \left[ \begin{array}{cccc} 1 & 1 & 1 & 1 \\ 2 & 3 & 0 & 0 \\ 4 & 3 & -1 & -2% \end{array}% \right] =\allowbreak \left[ \begin{array}{cccc} 1 & 1 & 1 & 1 \\ 0 & 1 & -2 & -2 \\ 0 & -1 & -5 & -6% \end{array}% \right] \qquad -2R_{1}+R_{2}\qquad -4R_{1}+R_{3}$ $\left[ \begin{array}{ccc} 1 & -1 & 0 \\ 0 & 1 & 0 \\ 0 & 1 & 1% \end{array}% \right] \left[ \begin{array}{cccc} 1 & 1 & 1 & 1 \\ 0 & 1 & -2 & -2 \\ 0 & -1 & -5 & -6% \end{array}% \right] =\allowbreak \left[ \begin{array}{cccc} 1 & 0 & 3 & 3 \\ 0 & 1 & -2 & -2 \\ 0 & 0 & -7 & -8% \end{array}% \right] \qquad -R_{2}+R_{1}\qquad R_{2}+R_{3}$ $\left[ \begin{array}{ccc} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & -\frac{1}{7}% \end{array}% \right] \left[ \begin{array}{cccc} 1 & 0 & 3 & 3 \\ 0 & 1 & -2 & -2 \\ 0 & 0 & -7 & -8% \end{array}% \right] =\allowbreak \left[ \begin{array}{cccc} 1 & 0 & 3 & 3 \\ 0 & 1 & -2 & -2 \\ 0 & 0 & 1 & \frac{8}{7}% \end{array}% \right] \qquad -\dfrac{1}{7}R_{3}$ $\left[ \begin{array}{ccc} 1 & 0 & -3 \\ 0 & 1 & 2 \\ 0 & 0 & 1% \end{array}% \right] \left[ \begin{array}{cccc} 1 & 0 & 3 & 3 \\ 0 & 1 & -2 & -2 \\ 0 & 0 & 1 & \frac{8}{7}% \end{array}% \right] =\allowbreak \left[ \begin{array}{cccc} 1 & 0 & 0 & -\frac{3}{7} \\ 0 & 1 & 0 & \frac{2}{7} \\ 0 & 0 & 1 & \frac{8}{7}% \end{array}% \right] \qquad 2R_{3}+R_{2}\qquad -3R_{3}+R_{1}$ \vspace{1pt} \subsection{\protect\vspace{1pt}Gaussian Elimination} \vspace{1pt} Definition: A matrix is said to be in \emph{row-echelon form} (and will be called a \emph{row-echelon} \emph{matrix}) if it satisfies the following three conditions: \vspace{1pt} 1. All \emph{zero rows }(consisting entirely of zeroes) are at the bottom. \vspace{1pt} 2. The first nonzero entry from the left in each nonzero row is a $1,$ called the \emph{leading} $1$ for that row. \vspace{1pt} 3. Each leading $1$ is to the right of all leading $1^{\prime }s$ in the rows above it. \vspace{1pt} \vspace{1pt} Definition: A row-echelon matrix is said to be in \emph{reduced row-echelon form }(and will be called a \emph{reduced row-echelon matrix}) if it satisfies the following condition: \vspace{1pt} 4. Each leading $1$ is the only nonzero entry in its column. \vspace{1pt} \paragraph{Examples} \vspace{1pt} The matrices \vspace{1pt}\qquad \qquad $\left[ \begin{array}{lllll} 0 & 1 & 0 & -3 & 2 \\ 0 & 0 & 1 & 4 & -5 \\ 0 & 0 & 0 & 0 & 1 \\ 0 & 0 & 0 & 0 & 0% \end{array} \right] \qquad \left[ \begin{array}{lll} 1 & 9 & 0 \\ 0 & 1 & -2 \\ 0 & 0 & 1% \end{array} \right] \qquad \left[ \begin{array}{ll} 0 & 1 \\ 0 & 0 \\ 0 & 0 \\ 0 & 0% \end{array} \right] $ are all in echelon form. The first and the third are in reduced row-echelon form. \vspace{1pt} \paragraph{Example:} \vspace{1pt} None of the matrices below is in row-echelon form. \vspace{1pt} \ $\left[ \begin{array}{lll} 1 & -3 & 0 \\ 0 & 0 & 0 \\ 0 & 1 & 2% \end{array} \right] $ \ violates $(1)$ \qquad $\left[ \begin{array}{llll} 1 & 0 & 6 & -1 \\ 0 & 0 & 2 & 4 \\ 0 & 0 & 0 & 0% \end{array} \right] $ \ violates $(2)$ \vspace{1pt} \qquad \qquad \qquad \qquad \qquad $\left[ \begin{array}{llll} 0 & 1 & -3 & -7 \\ 0 & 0 & 1 & 6 \\ 0 & 1 & 5 & -2% \end{array} \right] $ \ violates $(3)$ \paragraph{Example:} Insert the appropriate (elementary) matrices corresponding to the indicated row operations that transform \vspace{1pt} \vspace{1pt}\qquad \qquad \qquad \qquad \qquad \qquad \qquad $\left[ \begin{array}{lllll} -1 & -1 & 0 & 2 & -4 \\ 0 & 0 & 1 & -3 & 0 \\ 2 & 1 & 0 & 0 & 0 \\ 2 & 2 & 1 & -7 & 8% \end{array} \right] $ to row-reduced echelon form. \vspace{1pt} Adding $2\times R_{1}$ to $R_{4}$ yields \vspace{1pt}$\left[ \begin{array}{cccc} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 2 & 0 & 0 & 1% \end{array} \right] \left[ \begin{array}{lllll} -1 & -1 & 0 & 2 & -4 \\ 0 & 0 & 1 & -3 & 0 \\ 2 & 1 & 0 & 0 & 0 \\ 2 & 2 & 1 & -7 & 8% \end{array} \right] =\allowbreak \left[ \begin{array}{ccccc} -1 & -1 & 0 & 2 & -4 \\ 0 & 0 & 1 & -3 & 0 \\ 2 & 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & -3 & 0% \end{array} \right] $ \vspace{1pt} interchanging $R_{2}$ and $R_{3}$ $\allowbreak \left[ \begin{array}{cccc} 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1% \end{array} \right] \left[ \begin{array}{ccccc} -1 & -1 & 0 & 2 & -4 \\ 0 & 0 & 1 & -3 & 0 \\ 2 & 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & -3 & 0% \end{array} \right] =\allowbreak \left[ \begin{array}{ccccc} -1 & -1 & 0 & 2 & -4 \\ 2 & 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & -3 & 0 \\ 0 & 0 & 1 & -3 & 0% \end{array} \right] $ $\qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad $ Adding $\left( -1\right) \times R_{3}$ to $R_{4}$ yields $\left[ \begin{array}{cccc} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & -1 & 1% \end{array} \right] \allowbreak \left[ \begin{array}{ccccc} -1 & -1 & 0 & 2 & -4 \\ 2 & 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & -3 & 0 \\ 0 & 0 & 1 & -3 & 0% \end{array} \right] =\allowbreak \left[ \begin{array}{ccccc} -1 & -1 & 0 & 2 & -4 \\ 2 & 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & -3 & 0 \\ 0 & 0 & 0 & 0 & 0% \end{array} \right] $ \vspace{1pt} Adding $2\times R_{1}$ to $R_{2}$ yields $\left[ \begin{array}{cccc} 1 & 0 & 0 & 0 \\ 2 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1% \end{array} \right] \left[ \begin{array}{lllll} -1 & -1 & 0 & 2 & -4 \\ 2 & 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & -3 & 0 \\ 0 & 0 & 0 & 0 & 0% \end{array} \right] =\allowbreak \left[ \begin{array}{ccccc} -1 & -1 & 0 & 2 & -4 \\ 0 & -1 & 0 & 4 & -8 \\ 0 & 0 & 1 & -3 & 0 \\ 0 & 0 & 0 & 0 & 0% \end{array} \right] $ \vspace{1pt}Multiplying $R_{1}$ and $R_{2}$ by $-1$ yields $\left[ \begin{array}{cccc} -1 & 0 & 0 & 0 \\ 0 & -1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1% \end{array} \right] \left[ \begin{array}{lllll} -1 & -1 & 0 & 2 & -4 \\ 0 & -1 & 0 & 4 & -8 \\ 0 & 0 & 1 & -3 & 0 \\ 0 & 0 & 0 & 0 & 0% \end{array} \right] =\allowbreak \left[ \begin{array}{ccccc} 1 & 1 & 0 & -2 & 4 \\ 0 & 1 & 0 & -4 & 8 \\ 0 & 0 & 1 & -3 & 0 \\ 0 & 0 & 0 & 0 & 0% \end{array} \right] $ Eliminating the $1$ in row one column 2 yields $\left[ \begin{array}{cccc} 1 & -1 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1% \end{array} \right] \left[ \begin{array}{ccccc} 1 & 1 & 0 & -2 & 4 \\ 0 & 1 & 0 & -4 & 8 \\ 0 & 0 & 1 & -3 & 0 \\ 0 & 0 & 0 & 0 & 0% \end{array} \right] =\allowbreak \left[ \begin{array}{ccccc} 1 & 0 & 0 & 2 & -4 \\ 0 & 1 & 0 & -4 & 8 \\ 0 & 0 & 1 & -3 & 0 \\ 0 & 0 & 0 & 0 & 0% \end{array} \right] $ \paragraph{\protect\vspace{1pt}Example:} Solve the system \ $AX=C,$ where \vspace{1pt} $A=\left[ \begin{array}{llll} -1 & -1 & 0 & 2 \\ 0 & 0 & 1 & -3 \\ 2 & 1 & 0 & 0 \\ 2 & 2 & 1 & -7% \end{array} \right] ,\qquad X=\left[ \begin{array}{c} x_{1} \\ x_{2} \\ x_{3} \\ x_{3}% \end{array} \right] $ \ and $C=\left[ \begin{array}{l} -4 \\ 0 \\ 0 \\ 8% \end{array} \right] $ \qquad \qquad $\left[ \begin{array}{llll} -1 & -1 & 0 & 2 \\ 0 & 0 & 1 & -3 \\ 2 & 1 & 0 & 0 \\ 2 & 2 & 1 & -7% \end{array} \right] \left[ \begin{array}{c} x_{1} \\ x_{2} \\ x_{3} \\ x_{4}% \end{array} \right] =\allowbreak \left[ \begin{array}{c} -x_{1}-x_{2}+2x_{4} \\ x_{3}-3x_{4} \\ 2x_{1}+x_{2} \\ 2x_{1}+2x_{2}+x_{3}-7x_{4}% \end{array} \right] \allowbreak $ Hence the system is \begin{eqnarray*} -x_{1}-x_{2}+2x_{4} &=&-4 \\ x_{3}-3x_{4} &=&0 \\ 2x_{1}+x_{2} &=&0 \\ 2x_{1}+2x_{2}+x_{3}-7x_{4} &=&8 \end{eqnarray*} Solution is: $\left\{ x_{3}=3x_{4},x_{1}=-2x_{4}-4,x_{2}=4x_{4}+8,x_{4}=x_{4}\right\} \allowbreak . $ We see that this is indeed the case from the row-reduced echelon form $\left[ \begin{array}{lllll} -1 & -1 & 0 & 2 & -4 \\ 0 & 0 & 1 & -3 & 0 \\ 2 & 1 & 0 & 0 & 0 \\ 2 & 2 & 1 & -7 & 8% \end{array} \right] $, row echelon form: $\left[ \begin{array}{ccccc} 1 & 0 & 0 & 2 & -4 \\ 0 & 1 & 0 & -4 & 8 \\ 0 & 0 & 1 & -3 & 0 \\ 0 & 0 & 0 & 0 & 0% \end{array} \right] $ \vspace{1pt} Remark: In general, when the augmented matrix of a system has been carried to (reduced) row-echelon form, variables containing a leading $1$ are called \emph{leading variables.} The non-leading variables (if any) end up as parameters in the final solution, and the leading variables are given (by the equations) in terms of these parameters. \vspace{1pt} \paragraph{Theorem} Every matrix can be brought to (reduced) row-echelon form by a series of elementary row operations. \subsection{Gauss Elimination (Gauss Algorithm)} \vspace{1pt} Step 1. If the matrix consists entirely of zeros, stop, - it is \emph{already% } in row-echelon form. \vspace{1pt} Step 2. Otherwise, find the first column from the left containing a nonzero entry (call it $c)$, and move the row containing that entry to the top position. \vspace{1pt} Step 3. Now multiply that row by $\dfrac{1}{c}$ to create a leading $1.$ \vspace{1pt} Step 4. By subtracting multiples of that row from rows below it, make each entry below the leading $1$ zero. \vspace{1pt} Step 5. Cover the top row and repeat steps 1-4 on the submatrix consisting of the remaining rows. \vspace{1pt} The process stops when either no rows remain at step 5 or the remaining rows consist of zeros. \vspace{1pt} This procedure produces a matrix that is in row-echelon form. To get a matrix that is in row-reduced echelon form we add one more step. \vspace{1pt} Step 6. Starting with the last nonzero row work upward: For each row introduce zeros above the leading $1$ by adding suitable multiples to the corresponding rows. \vspace{1pt} Note: The entire procedure (steps1-6) is often called \emph{Gauss-Jordan Elimination.} \vspace{1pt} \paragraph{\protect\vspace{1pt}Example:} \vspace{1pt} Solve the system \begin{eqnarray*} x_{1}-3x_{2}+x_{3}-x_{4} &=&-1 \\ -x_{1}+3x_{2}+3x_{4}+x_{5} &=&3 \\ 2x_{1}-6x_{2}+3x_{3}-x_{5} &=&2 \\ -x_{1}+3x_{2}+x_{3}+5x_{4}+x_{5} &=&6 \end{eqnarray*} \vspace{1pt}using the Gauss algorithm and back substitution. The augmented matrix is \begin{center} $\left[ \begin{array}{cccccc} 1 & -3 & 1 & -1 & 0 & -1 \\ -1 & 3 & 0 & 3 & 1 & 3 \\ 2 & -6 & 3 & 0 & -1 & 2 \\ -1 & 3 & 1 & 5 & 1 & 6% \end{array} \right] $, row echelon form: $\left[ \begin{array}{cccccc} 1 & -3 & 0 & -3 & 0 & -4 \\ 0 & 0 & 1 & 2 & 0 & 3 \\ 0 & 0 & 0 & 0 & 1 & -1 \\ 0 & 0 & 0 & 0 & 0 & 0% \end{array} \right] $ \vspace{1pt} \end{center} The corresponding equations are \vspace{1pt} \begin{eqnarray*} x_{1}-3x_{2}-3x_{4} &=&-4 \\ x_{3}+2x_{4} &=&3 \\ x_{5} &=&-1 \end{eqnarray*} \vspace{1pt} Thus \vspace{1pt} \begin{center} $x_{5}=-1,\qquad x_{3}=3-2x_{4},\qquad x_{1}=3x_{2}+3x_{4}-4$ \vspace{1pt} \end{center} Letting $x_{2}=s$ and $x_{4}=t$ we have the infinite set of solutions \vspace{1pt} \begin{center} $x_{1}=3s+3t-4,\qquad x_{3}=3-2t,\qquad x_{5}=-1$ \vspace{1pt} \end{center} Let $\ X_{p}=\left[ \begin{array}{c} -4 \\ 0 \\ 3 \\ 0 \\ -1% \end{array} \right] $, $\ X_{n}=\left[ \begin{array}{c} 3s+3t \\ s \\ -2t \\ t \\ 0% \end{array} \right] $ , and$\ A=\left[ \begin{array}{ccccc} 1 & -3 & 1 & -1 & 0 \\ -1 & 3 & 0 & 3 & 1 \\ 2 & -6 & 3 & 0 & -1 \\ -1 & 3 & 1 & 5 & 1% \end{array} \right] .$ \vspace{1pt} Then $AX_{n}=\allowbreak \left[ \begin{array}{c} 0 \\ 0 \\ 0 \\ 0% \end{array} \right] $ and $AX_{p}=\allowbreak \left[ \begin{array}{c} -1 \\ 3 \\ 2 \\ 6% \end{array} \right] =b.$ Thus the solution of our system can be written as $% X=X_{n}+X_{p} $ where $X_{n}$ is a solution of the homogeneous system of equations $AX=0$ and $X_{p}$ is a solution of the non-homogeneous system $% AX=b.$ \vspace{1pt} \subsection{Inverse of a Matrix} \vspace{1pt}Definition: If $A$ is a square $n\times n$, a matrix $B$ is called the \emph{inverse} of $A$ if and only if \vspace{1pt} \begin{center} $AB=I$ \ \ and $\ \ \ BA=I.$ \vspace{1pt} \end{center} \vspace{1pt}A matrix $A$ that has an inverse is called an \emph{invertible or nonsingular matrix.} \paragraph{Example} \vspace{1pt}Show that the matrix $B=\left[ \begin{array}{cc} -1 & 1 \\ 1 & 0% \end{array} \right] $ is an inverse of $A=\left[ \begin{array}{cc} 0 & 1 \\ 1 & 1% \end{array} \right] .$ \vspace{1pt} $BA=\left[ \begin{array}{cc} -1 & 1 \\ 1 & 0% \end{array} \right] \left[ \begin{array}{cc} 0 & 1 \\ 1 & 1% \end{array} \right] =\allowbreak \left[ \begin{array}{cc} 1 & 0 \\ 0 & 1% \end{array} \right] $ and $AB=\left[ \begin{array}{cc} 0 & 1 \\ 1 & 1% \end{array} \right] \left[ \begin{array}{cc} -1 & 1 \\ 1 & 0% \end{array} \right] =\allowbreak \left[ \begin{array}{cc} 1 & 0 \\ 0 & 1% \end{array} \right] ,$ so $B$ is indeed an inverse of $A.$ \vspace{1pt} \paragraph{Example} The matrix $A=\left[ \begin{array}{cc} 0 & 0 \\ 1 & 1% \end{array} \right] $ is not invertible. For if $B=\left[ \begin{array}{cc} b_{11} & b_{12} \\ b_{21} & b_{22}% \end{array} \right] $ is any $2\times 2$ matrix, then \vspace{1pt} $\left[ \begin{array}{cc} 0 & 0 \\ 1 & 1% \end{array} \right] \left[ \begin{array}{cc} b_{11} & b_{12} \\ b_{21} & b_{22}% \end{array} \right] =\allowbreak \left[ \begin{array}{cc} 0 & 0 \\ b_{11}+b_{21} & b_{12}+b_{22}% \end{array} \right] \neq \left[ \begin{array}{cc} 1 & 0 \\ 0 & 1% \end{array} \right] =I.$ \vspace{1pt} \paragraph{Theorem} If $B$ and $C$ are both inverses of $A,$ then $B=C.$ \vspace{1pt} \paragraph{Proof:} Since $B$ and $C$ are both inverses of $A,$ \ $CA=I=AB.$ Hence \vspace{1pt} \begin{equation*} B=IB=\left( CA\right) B=C\left( AB\right) =CI=C \end{equation*} \vspace{1pt} Remark: If $A$ is invertible then the (unique) inverse of $A$ is denoted by $% A^{-1}.$ \vspace{1pt} \paragraph{Example:} Under what conditions is the $2\times 2$ matrix $A=\left[ \begin{array}{cc} a & b \\ c & d% \end{array} \right] $ invertible. When $A$ is invertible, find $A^{-1}.$ \vspace{1pt} We seek a matrix $A^{-1}=\left[ \begin{array}{cc} b_{11} & b_{12} \\ b_{21} & b_{22}% \end{array} \right] $ such that \vspace{1pt} $\left[ \begin{array}{cc} a & b \\ c & d% \end{array} \right] \left[ \begin{array}{c} b_{11} \\ b_{21}% \end{array} \right] =\left[ \begin{array}{c} 1 \\ 0% \end{array} \right] $ \ and $\left[ \begin{array}{cc} a & b \\ c & d% \end{array} \right] \left[ \begin{array}{c} b_{12} \\ b_{22}% \end{array} \right] =\left[ \begin{array}{c} 0 \\ 1% \end{array} \right] $ \vspace{1pt} We could form two augmented matrices (one for each system) and then put each of them in reduced row-echelon form. However, we may just as well do the entire reduction at the same time. Thus \vspace{1pt} $\left[ \begin{array}{cccc} a & b & 1 & 0 \\ c & d & 0 & 1% \end{array} \right] $, row echelon form: $\left[ \begin{array}{cccc} 1 & 0 & \frac{d}{da-cb} & -\frac{b}{da-cb} \\ 0 & 1 & -\frac{c}{da-cb} & \frac{1}{da-cb}a% \end{array} \right] .$ \vspace{1pt} Thus we see that $A$ is invertible $\Longleftrightarrow $ $ad-bc\neq 0.$ If this condition holds, then \vspace{1pt} \begin{center} $A^{-1}=\dfrac{1}{ad-bc}\left[ \begin{array}{cc} d & -b \\ -c & a% \end{array} \right] $ \end{center} \vspace{1pt} \paragraph{\protect\vspace{1pt}Theorem} \vspace{1pt}Suppose a system of $n$ equations in $n$ variables is written in matrix form as \vspace{1pt} \begin{equation*} AX=B \end{equation*} If the $n\times n$ coefficient matrix $A$ is invertible, then the system has the unique solution \vspace{1pt} \begin{equation*} X=A^{-1}B \end{equation*} \paragraph{Corollary} Suppose the system $AX=0$ of $n$ equations in $n$ unknowns a nontrivial solution. Then $A$ cannot be invertible. \vspace{1pt} \paragraph{Example:} Let $A=\left[ \begin{array}{ccc} 1 & -2 & 2 \\ 2 & 1 & 1 \\ 1 & 0 & 1% \end{array} \right] ,$ and $B=\left[ \begin{array}{c} 3 \\ 0 \\ -2% \end{array} \right] .$ Find $A^{-1}$ and use it to solve the system of equations $AX=B$. \vspace{1pt}We use Maple to find $A^{-1}.$ $\left[ \begin{array}{ccc} 1 & -2 & 2 \\ 2 & 1 & 1 \\ 1 & 0 & 1% \end{array} \right] $, inverse: $\left[ \begin{array}{ccc} 1 & 2 & -4 \\ -1 & -1 & 3 \\ -1 & -2 & 5% \end{array} \right] =A^{-1}$ Then \vspace{1pt} \begin{center} $X=A^{-1}B=\left[ \begin{array}{ccc} 1 & 2 & -4 \\ -1 & -1 & 3 \\ -1 & -2 & 5% \end{array} \right] \left[ \begin{array}{c} 3 \\ 0 \\ -2% \end{array} \right] =\allowbreak \left[ \begin{array}{c} 11 \\ -9 \\ -13% \end{array} \right] $ \end{center} \vspace{1pt} \subsection{\protect\vspace{1pt}The Calculation of $A^{-1}$ by Gauss-Jordan Elimination} Suppose we want to find the inverse of \begin{center} $A=\left[ \begin{array}{lllll} a_{11} & . & . & . & a_{1n} \\ . & . & . & . & . \\ . & . & . & . & . \\ a_{n1} & . & . & . & a_{nn}% \end{array} \right] $ \end{center} \vspace{1pt} Then we want a matrix $B$ such that $AB=I$. If $b_{i1}$ are the elements in the first column of $B$ then \vspace{1pt} \qquad \qquad $A\left[ \begin{array}{l} b_{11} \\ . \\ . \\ b_{n1}% \end{array} \right] =\left[ \begin{array}{l} 1 \\ 0 \\ . \\ 0% \end{array} \right] \Longrightarrow $ we must solve $AX=\left[ \begin{array}{l} 1 \\ 0 \\ . \\ 0% \end{array} \right] $ . We can solve this system by forming\qquad $\left[ A\text{ } \begin{array}{l} 1 \\ 0 \\ . \\ 0% \end{array} \right] ,$ and then row reducing this to reduced row-echelon form. \vspace{1pt} If $b_{i2}$ are elements in the second column of $B\Longrightarrow $ $A\left[ \begin{array}{l} b_{12} \\ . \\ . \\ b_{n2}% \end{array} \right] =\left[ \begin{array}{c} 0 \\ 1 \\ . \\ 0% \end{array} \right] \Longrightarrow $ \ we must solve $AX=\left[ \begin{array}{c} 0 \\ 1 \\ . \\ 0% \end{array} \right] $. Therefore form \vspace{1pt} \qquad $\left[ A\qquad \begin{array}{l} 1 \\ 0 \\ . \\ 0% \end{array} \right] $ \ and reduce to reduced row-echelon form. \vspace{1pt} \vspace{1pt} In general we need to solve the $n$ systems \begin{center} \qquad $AX=\left[ \begin{array}{l} 0 \\ 0 \\ . \\ . \\ 0 \\ 0 \\ 0 \\ 1 \\ 0 \\ . \\ . \\ 0% \end{array} \right] =C_{j}$ \end{center} \qquad\ \ \ \ \ \ \ \vspace{1pt}where $C_{j}$ is the $jth$ column of the $n\times n$ identity matrix. Rather doing the same row reduction $n$ times, we use the \emph{% Gauss-Jordan method} which computes $A^{-1}$ by solving all $n$ systems at the same time. \vspace{1pt} We can solve all these systems at once by forming $\left[ A|I\right] $ and then putting this matrix in reduced row-echelon form. \vspace{1pt} \subsection{Slide Show Example:} To view this example you will need Real Player G2 installed on your machine. Click \hyperref{Real}{}{}{http://www.real.com} to download the free Real Player G2. Now click \hyperref{Inverse Slice Show}{}{}{% http://attila.stevens-tech.edu:7070/ramgen/llevine1/inverse_94.rm} to see a slide show that explains how you find the inverse of a matrix. You may also view the example in SNB by clicking \hyperref{Slide Inverse Example}{}{}{% smatrices0607.ll.tex}. \paragraph{Example:} Find $A^{-1}$ for $A=\left[ \begin{array}{ccc} 2 & 7 & 1 \\ 1 & 4 & -1 \\ 1 & 3 & 0% \end{array} \right] $. We form $\left[ \begin{array}{cccccc} 2 & 7 & 1 & 1 & 0 & 0 \\ 1 & 4 & -1 & 0 & 1 & 0 \\ 1 & 3 & 0 & 0 & 0 & 1% \end{array} \right] .$ $\vspace{1pt}$ $\vspace{1pt}\left[ \begin{array}{ccc} 0 & 1 & 0 \\ 1 & 0 & 0 \\ 0 & 0 & 1% \end{array} \right] \left[ \begin{array}{cccccc} 2 & 7 & 1 & 1 & 0 & 0 \\ 1 & 4 & -1 & 0 & 1 & 0 \\ 1 & 3 & 0 & 0 & 0 & 1% \end{array} \right] =\allowbreak \left[ \begin{array}{cccccc} 1 & 4 & -1 & 0 & 1 & 0 \\ 2 & 7 & 1 & 1 & 0 & 0 \\ 1 & 3 & 0 & 0 & 0 & 1% \end{array} \right] $ \vspace{1pt} $\left[ \begin{array}{ccc} 1 & 0 & 0 \\ -2 & 1 & 0 \\ 0 & 0 & 1% \end{array} \right] \left[ \begin{array}{cccccc} 1 & 4 & -1 & 0 & 1 & 0 \\ 2 & 7 & 1 & 1 & 0 & 0 \\ 1 & 3 & 0 & 0 & 0 & 1% \end{array} \right] =\allowbreak \left[ \begin{array}{cccccc} 1 & 4 & -1 & 0 & 1 & 0 \\ 0 & -1 & 3 & 1 & -2 & 0 \\ 1 & 3 & 0 & 0 & 0 & 1% \end{array} \right] $ \vspace{1pt} $\left[ \begin{array}{ccc} 1 & 0 & 0 \\ 0 & 1 & 0 \\ -1 & 0 & 1% \end{array} \right] \left[ \begin{array}{cccccc} 1 & 4 & -1 & 0 & 1 & 0 \\ 0 & -1 & 3 & 1 & -2 & 0 \\ 1 & 3 & 0 & 0 & 0 & 1% \end{array} \right] =\allowbreak \left[ \begin{array}{cccccc} 1 & 4 & -1 & 0 & 1 & 0 \\ 0 & -1 & 3 & 1 & -2 & 0 \\ 0 & -1 & 1 & 0 & -1 & 1% \end{array} \right] $ $\allowbreak $ $\left[ \begin{array}{ccc} 1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1% \end{array} \right] \left[ \begin{array}{cccccc} 1 & 4 & -1 & 0 & 1 & 0 \\ 0 & -1 & 3 & 1 & -2 & 0 \\ 0 & -1 & 1 & 0 & -1 & 1% \end{array} \right] =\allowbreak \left[ \begin{array}{cccccc} 1 & 4 & -1 & 0 & 1 & 0 \\ 0 & 1 & -3 & -1 & 2 & 0 \\ 0 & -1 & 1 & 0 & -1 & 1% \end{array} \right] $ \vspace{1pt} $\left[ \begin{array}{ccc} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 1 & 1% \end{array} \right] \left[ \begin{array}{cccccc} 1 & 4 & -1 & 0 & 1 & 0 \\ 0 & 1 & -3 & -1 & 2 & 0 \\ 0 & -1 & 1 & 0 & -1 & 1% \end{array} \right] =\allowbreak \left[ \begin{array}{cccccc} 1 & 4 & -1 & 0 & 1 & 0 \\ 0 & 1 & -3 & -1 & 2 & 0 \\ 0 & 0 & -2 & -1 & 1 & 1% \end{array} \right] $ \vspace{1pt} $\left[ \begin{array}{ccc} 1 & -4 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1% \end{array} \right] \allowbreak \left[ \begin{array}{cccccc} 1 & 4 & -1 & 0 & 1 & 0 \\ 0 & 1 & -3 & -1 & 2 & 0 \\ 0 & 0 & -2 & -1 & 1 & 1% \end{array} \right] =\allowbreak \left[ \begin{array}{cccccc} 1 & 0 & 11 & 4 & -7 & 0 \\ 0 & 1 & -3 & -1 & 2 & 0 \\ 0 & 0 & -2 & -1 & 1 & 1% \end{array} \right] $ \vspace{1pt} $\allowbreak \left[ \begin{array}{ccc} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & -\frac{1}{2}% \end{array} \right] \left[ \begin{array}{cccccc} 1 & 0 & 11 & 4 & -7 & 0 \\ 0 & 1 & -3 & -1 & 2 & 0 \\ 0 & 0 & -2 & -1 & 1 & 1% \end{array} \right] =\allowbreak \left[ \begin{array}{cccccc} 1 & 0 & 11 & 4 & -7 & 0 \\ 0 & 1 & -3 & -1 & 2 & 0 \\ 0 & 0 & 1 & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2}% \end{array} \right] $ \vspace{1pt} $\left[ \begin{array}{ccc} 1 & 0 & 0 \\ 0 & 1 & 3 \\ 0 & 0 & 1% \end{array} \right] \allowbreak \left[ \begin{array}{cccccc} 1 & 0 & 11 & 4 & -7 & 0 \\ 0 & 1 & -3 & -1 & 2 & 0 \\ 0 & 0 & 1 & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2}% \end{array} \right] =\allowbreak \left[ \begin{array}{cccccc} 1 & 0 & 11 & 4 & -7 & 0 \\ 0 & 1 & 0 & \frac{1}{2} & \frac{1}{2} & -\frac{3}{2} \\ 0 & 0 & 1 & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2}% \end{array} \right] $ \vspace{1pt} $\left[ \begin{array}{ccc} 1 & 0 & -11 \\ 0 & 1 & 0 \\ 0 & 0 & 1% \end{array} \right] \left[ \begin{array}{cccccc} 1 & 0 & 11 & 4 & -7 & 0 \\ 0 & 1 & 0 & \frac{1}{2} & \frac{1}{2} & -\frac{3}{2} \\ 0 & 0 & 1 & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2}% \end{array} \right] =\allowbreak \left[ \begin{array}{cccccc} 1 & 0 & 0 & -\frac{3}{2} & -\frac{3}{2} & \frac{11}{2} \\ 0 & 1 & 0 & \frac{1}{2} & \frac{1}{2} & -\frac{3}{2} \\ 0 & 0 & 1 & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2}% \end{array} \right] $ \vspace{1pt} Thus \begin{center} $A^{-1}=\dfrac{1}{2}\left[ \begin{array}{ccc} -3 & -3 & 11 \\ 1 & 1 & -3 \\ 1 & -1 & -1% \end{array} \right] .$ \end{center} \subsection{Properties of Inverses} \vspace{1pt} \paragraph{Theorem} In the following all of the matrices are $n\times n.$ \vspace{1pt} 1. $I$ is invertible and $I^{-1}=I.$ \vspace{1pt} 2. If $A$ is invertible, so is $A^{-1},$ and $\left( A^{-1}\right) ^{-1}=A.$ \vspace{1pt} 3. If $A$ and $B$ are invertible, so is $AB,$ and $\left( AB\right) ^{-1}=B^{-1}A^{-1}.$ \vspace{1pt} 4. If $A_{1},A_{2},....,A_{k}$ are all invertible, so is the product $% A_{1}A_{2}\cdots A_{k}$ and $\left( A_{1}A_{2}\cdots A_{k}\right) ^{-1}=A_{k}^{-1}\cdots A_{2}^{-1}A_{1}^{-1}.$ \vspace{1pt} 5. If $A$ is invertible, so is $A^{k}$ for $k\geq 1$, and $\left( A^{k}\right) ^{-1}=\left( A^{-1}\right) ^{k}.$ \vspace{1pt} 6. If $A$ is invertible and $a\neq 0$ is a number, then $aA$ is invertible and $\left( aA\right) ^{-1}=\dfrac{1}{a}A^{-1}.$ \vspace{1pt} 7. If $A$ is invertible, so is its transpose $A^{T},$ and $\left( A^{T}\right) ^{-1}=\left( A^{-1}\right) ^{T}.$ \vspace{1pt} \paragraph{Example:} Let $A=\left[ \begin{array}{cc} 1 & 2 \\ 4 & -1% \end{array} \right] $ and $B=\left[ \begin{array}{cc} -1 & 1 \\ 2 & 3% \end{array} \right] .$ Then $AB=\left[ \begin{array}{cc} 1 & 2 \\ 4 & -1% \end{array} \right] \left[ \begin{array}{cc} -1 & 1 \\ 2 & 3% \end{array} \right] =\allowbreak \left[ \begin{array}{cc} 3 & 7 \\ -6 & 1% \end{array} \right] $ Hence \begin{center} $\left( AB\right) ^{-1}=\left[ \begin{array}{cc} 3 & 7 \\ -6 & 1% \end{array} \right] ^{-1}=\allowbreak \left[ \begin{array}{cc} \frac{1}{45} & -\frac{7}{45} \\ \frac{2}{15} & \frac{1}{15}% \end{array} \right] .$ \end{center} Also, $B^{-1}=\left[ \begin{array}{cc} -1 & 1 \\ 2 & 3% \end{array} \right] ^{-1}=\allowbreak \left[ \begin{array}{cc} -\frac{3}{5} & \frac{1}{5} \\ \frac{2}{5} & \frac{1}{5}% \end{array} \right] $ and $A^{-1}=\left[ \begin{array}{cc} 1 & 2 \\ 4 & -1% \end{array} \right] ^{-1}=\allowbreak \left[ \begin{array}{cc} \frac{1}{9} & \frac{2}{9} \\ \frac{4}{9} & -\frac{1}{9}% \end{array} \right] ,$ so that \vspace{1pt} \begin{center} $B^{-1}A^{-1}=\left[ \begin{array}{cc} -\frac{3}{5} & \frac{1}{5} \\ \frac{2}{5} & \frac{1}{5}% \end{array} \right] \left[ \begin{array}{cc} \frac{1}{9} & \frac{2}{9} \\ \frac{4}{9} & -\frac{1}{9}% \end{array} \right] =\allowbreak \left[ \begin{array}{cc} \frac{1}{45} & -\frac{7}{45} \\ \frac{2}{15} & \frac{1}{15}% \end{array} \right] =\left( AB\right) ^{-1}.$ \vspace{1pt} \end{center} \vspace{1pt} \end{document} %%%%%%%%%%%%%%%%%%%% End /document/lec_4_17_00.tex %%%%%%%%%%%%%%%%%%%%