\documentclass{article}
\usepackage{fullpage}
\usepackage{amsfonts, framed}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage[margin=2cm,top=2cm,footskip=1cm,nohead]{geometry}
\usepackage{setspace}
\newcommand*{\rowopswap}[2]{\ensuremath{R\sb{#1}\leftrightarrow R\sb{#2}}}
\newcommand*{\rowopmult}[2]{\ensuremath{#1R\sb{#2}}}
\newcommand*{\rowopadd}[3]{\ensuremath{#1R\sb{#2}+R\sb{#3}}}
\usepackage{wasysym}
\usepackage{multicol}
\usepackage{graphicx}
\usepackage{wrapfig}
\usepackage{subfigure}
\title{Proof Portfolio}
\author{Benjamin A. Sigmon}
\begin{document}
\maketitle
\newpage
Suppose that
$$
A=
\begin{pmatrix}
a&b\\
c&d
\end{pmatrix}
$$
is a $2 \times 2$ matrix where $ad - bc \neq 0$ . Prove that $A$ is nonsingular.
\vspace{1 in}
\begin{proof}
There are three cases which need to be shown: 1) $ad \neq 0$, 2) $bc \neq 0$, and 3) $ad \neq bc$. The third case, however, was given. If $ad \neq 0$ then $a$ and $d$ are nonzero. For simplicity, assume $c \neq 0$. Note:
\begin{align*}
\begin{pmatrix}
a&b\\
c&d
\end{pmatrix}
\xrightarrow{\rowopmult{c}{1}}
\begin{pmatrix}
ac&bc\\
c&d
\end{pmatrix}
\xrightarrow{\rowopmult{a}{2}}
\begin{pmatrix}
ac&bc\\
ac&ad
\end{pmatrix}
\xrightarrow{\rowopadd{-1}{1}{2}}
\begin{pmatrix}
ac&bc\\
0&ad-bc
\end{pmatrix}
\xrightarrow{\rowopmult{1/c}{2}}\\
\begin{pmatrix}
a&b\\
0&ad-bc
\end{pmatrix}
\xrightarrow{\rowopmult{1/ad-bc}{2}}
\begin{pmatrix}
a&b\\
0&1
\end{pmatrix}
\xrightarrow{\rowopadd{-b}{2}{1}}
\begin{pmatrix}
a&0\\
0&1
\end{pmatrix}
\xrightarrow{\rowopmult{1/a}{1}}
\begin{pmatrix}
1&0\\
0&1
\end{pmatrix}
\end{align*}
Since $ad - bc \neq 0 $, we can divide $ad - bc \neq 0 $ by $ad - bc \neq 0$ to get 1. The proof of row reduction to the $2 \times 2$ identity matrix when $bc \neq 0$ is similar, except Row 1 and Row 2 would need to be swapped once a zero was present in the first column of the first row. Showing that the matrix may be row reduced to the identity matrix satisfies the criteria for a matrix to be nonsingular.
\end{proof}
\newpage
Suppose that $A$ is an $m \times n$ matrix with a row where every entry is zero. Suppose that $B$ is an $n \times p$ matrix. Prove that $AB$ has a row where every entry is zero.
\vspace{1 in}
\begin{proof}
Let the $i$th row of matrix $A$ size $m \times n$ contain only zero entries and $B$ be an $n \times p$ matrix. Let $1 \leq j \leq p$ and $k \in \mathbb{N}$. Note:
\begin{align*}
[AB]_{ij} &= \sum_{k=1}^{n}[A]_{ik}[B]_{kj} &&\\
&= \sum_{k=1}^{n}0[B]_{kj} &&\\
&= \sum_{k=1}^{n}0 &&\\
&= 0
\end{align*}
This shows the $i$th row of $AB$ has all zero entries.
\end{proof}
\newpage
Prove property AAC of Theorem VSPCV. That is: If $\overrightarrow{\rm u}, \overrightarrow{\rm v}, \overrightarrow{\rm w} \in \mathbb{C}^{m}$, then
$\overrightarrow{\rm u}+ (\overrightarrow{\rm v} + \overrightarrow{\rm w}) = (\overrightarrow{\rm u} + \overrightarrow{\rm v}) + \overrightarrow{\rm w}$.
\vspace{1 in}
\begin{proof}
Let $\overrightarrow{\rm u}, \overrightarrow{\rm v}, \overrightarrow{\rm w} \in \mathbb{C}^{m}$ and $1 \leq i \leq m$. Note:
\begin{align*}
[\overrightarrow{\rm u} + (\overrightarrow{\rm v} + \overrightarrow{\rm w})]_{i} & = [\overrightarrow{\rm u}]_{i} + [\overrightarrow{\rm v} + \overrightarrow{\rm w}]_{i}\\
& = [\overrightarrow{\rm u}]_{i} + [\overrightarrow{\rm v}]_{i} + [\overrightarrow{\rm w}]_{i}\\
& = [\overrightarrow{\rm u} + \overrightarrow{\rm v}]_{i} + [\overrightarrow{\rm w}]_{i}\\
& = [(\overrightarrow{\rm u} + \overrightarrow{\rm v}) + \overrightarrow{\rm w}]_{i}
\end{align*}
This shows associativity of vector addition.
\end{proof}
\newpage
Suppose that $S$ is a linearly independent set of vectors, and $T$ is a subset of $S$ that is, $T \subseteq S$. Prove that $T$ is a linearly independent set of vectors.
\vspace{1 in}
\begin{proof}
Suppose $S = ({\overrightarrow{\rm u_{1}},\overrightarrow{\rm u_{2}},..., \overrightarrow{\rm u_{p}}, \overrightarrow{\rm v_{1}},\overrightarrow{\rm v_{2}},..., \overrightarrow{\rm v_{q}}})$ and $T \subseteq S$ where $T = (\overrightarrow{\rm v_{1}},\overrightarrow{\rm v_{2}},..., \overrightarrow{\rm v_{q}})$. Let $\alpha_{i},\beta_{j} \in \mathbb{C}$. Since $S$ is linearly independent, then, for $1 \leq i \leq p$ and $1 \leq j \leq q$:
$$
\sum_{i=1}^{p}\alpha_{i}[\overrightarrow{\rm u}]_{i} + \sum_{j=1}^{q}\beta_{j}[\overrightarrow{\rm v}]_{j} = \overrightarrow{\rm 0}
$$
only for $\alpha_{i}=0$ and $\beta_{j}=0$ by definition of linear independence. So considering $T$ as a subset of $S$:
$$
\sum_{j=1}^{q}\beta_{j}[\overrightarrow{\rm v}]_{j} = \overrightarrow{\rm 0}
$$
$\beta{j}=0$ $\forall$ $j$, showing $T$ to be linearly independent by definition. Thus, the subset of a linearly independent set is linearly independent.
\end{proof}
\newpage
Suppose that $T$ is a linearly dependent set of vector, and $T \subseteq S$. Prove that $S$ is a linearly dependent set of vectors.
\vspace{1 in}
\begin{proof}
Suppose $T = (\overrightarrow{\rm v_{1}}, \overrightarrow{\rm v_{2}}, ... , \overrightarrow{\rm v_{q}})$ and that $T \subseteq S$. Since $T$ is linearly dependent, there exists a nonzero $\alpha_{i}$, $1 \leq i \leq q$, $\alpha \in \mathbb{C}$, such that:
$$
\sum_{i=1}^{q}\alpha_{i}\overrightarrow{\rm v_{i}} = 0
$$
Notice that $S = (\overrightarrow{\rm u_{1}}, \overrightarrow{\rm u_{2}}, ... , \overrightarrow{\rm u_{p}}, \overrightarrow{\rm v_{1}}, \overrightarrow{\rm v_{2}}, ... , \overrightarrow{\rm v_{q}})$. To show linear dependence, a linear combination of $S$ must equal 0 besides the trivial solution. Let $\beta_{j}=0$ be a solution where $\beta \in \mathbb{C}$ for $1 \leq j \leq p$. Note:
$$
\sum_{j=1}^{p}\beta_{j}\overrightarrow{\rm u}_{j} + \sum_{i=1}^{q}\alpha_{i}\overrightarrow{\rm v}_{i} = \overrightarrow{\rm 0}
$$
is a solution to the system, where $\alpha_{i}$ was nonzero. Thus, by definition, $S$ is linearly dependent, since it has a nonzero solution.
\end{proof}
\newpage
Suppose that $U$ is a unitary matrix with eigenvalue $\lambda$. Prove that $\lambda$ has modulus 1, i.e. $\lambda\overline{\lambda} = 1$. (This says that all of the eigenvalues of a unitary matrix lie on the unit circle of the complex plane.)
\vspace{1 in}
\begin{proof}
Let $U$ be a unitary matrix of size $n$ with eigenvalue $\lambda \in \mathbb{C}$ and eigenvector $\overrightarrow{\rm x} \in \mathbb{C}$. Recall that an inner product of two vectors yields a scalar in $\mathbb{C}$. Note:
\begin{align*}
U\overrightarrow{\rm x} = &\lambda\overrightarrow{\rm x} &&\text{Definition EEM}
\end{align*}
and assume that
$$
(U\overrightarrow{\rm x})^* = \lambda^*\overrightarrow{\rm x}^*
$$
Also, recall that taking the adjoint of a scalar is the same as the conjugate. Now note:
\begin{align*}
\langle U\overrightarrow{\rm x}, U\overrightarrow{\rm x} \rangle & = (U\overrightarrow{\rm x})^*(U\overrightarrow{\rm x}) && = \lambda^*\overrightarrow{\rm x}^*\lambda\overrightarrow{\rm x}\\
& = \overrightarrow{\rm x}^*U^*U\overrightarrow{\rm x} && = \lambda\overline{\lambda}\overrightarrow{\rm x}\overrightarrow{\rm x}^*\\
& = \overrightarrow{\rm x}^*I\overrightarrow{\rm x} && = \lambda\overline{\lambda}\langle \overrightarrow{\rm x}, \overrightarrow{\rm x} \rangle\\
& = \langle \overrightarrow{\rm x}, \overrightarrow{\rm x} \rangle && = \lambda\overline{\lambda}\langle \overrightarrow{\rm x}, \overrightarrow{\rm x} \rangle\\
& = 1 && = \lambda\overline{\lambda}
\end{align*}
This shows that $\lambda$ has a modulus of 1, which is what we wanted to prove.
\end{proof}
\newpage
Suppose that $A$ is a square, singular matrix. Prove that the homogeneous system $\mathcal{LS}(A, \overrightarrow{\rm 0})$ has infinitely many solutions.
\vspace{1 in}
\begin{proof}
Suppose $A$ is a singular, square matrix. Note that homogeneity is indicative of consistency. Since $A$ is homogeneous and square, there exists at least one unique solution along with the trivial solution. Additionally, since $A$ is singular, or more importantly not nonsingular, not every column is a pivot column. This means that there is at least one dependent variable defined by other free variables, which reveals that there are infinite solutions.
\end{proof}
\newpage
Use Theorem EMP to prove part (2) of Theorem MMIM: If $A$ is an $m \times n$ matrix and $I_{m}$ is the identity matrix of size $m$, then $I_{m}A = A$.
\vspace{1 in}
\begin{proof}
Suppose $A$ is an $m \times n$ matrix and $I_{m}$ is the identity matrix of size $m$. For $1 \leq i \leq m$, $1 \leq j \leq n$, and $k \in \mathbb{N}$ $I_{m}A$ is given by:
\begin{align*}
[I_{m}A]_{ij} & = \sum_{k=1}^m[I_{m}]_{ik}[A]_{kj}\\
& = [I_{m}]_{ii}[A]_{ij} + \sum_{k=1, k \neq i}^{m}[I_{m}]_{ik}[A]_{kj}\\
& = 1[A]_{ij} + \sum_{k=1, k \neq i}^{m}0[A]_{kj}\\
& = [A]_{ij} + 0\\
& = [A]_{ij}
\end{align*}
This shows that $I_{m}A = A$. So multiplying by the identity matrix on the left preverse matrices.
\end{proof}
\newpage
Suppose that $A$ is a square matrix of size $n$ and $\alpha \in \mathbb{C}$ is a scalar. Prove that $det(\alpha A) = \alpha^n det(A)$.
\vspace{1 in}
\begin{proof}
Let $A$ be a square matrix of size $n$ and $\alpha \in \mathbb{C}$. We can write $\alpha A$ as follows:
$$\alpha A = E_{1}(\alpha)E_{2}(\alpha)...E_{n}(\alpha)A$$
Note that $det(E_{n}(\alpha)) = \alpha$ by Theorem DEM. So taking the determinate of $\alpha A$ gives:
\begin{align*}
det(\alpha A) & = det(E_{1}(\alpha)E_{2}(\alpha)...E_{n}(\alpha)A)\\
& = det(E_{1}(\alpha))det(E_{2}(\alpha))...det(E_{n}(\alpha))det(A)\\
& = \alpha \alpha ... \alpha det(A)\\
& = \alpha^n det(A)
\end{align*}
This shows $det(\alpha A) = \alpha^n det(A)$.
\end{proof}
\end{document}