\documentclass{article}
\usepackage{fullpage}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{bm}
% bold face letters for matrix
\newcommand{\Abf}{\ensuremath{\bm A}}
\newcommand{\Ibf}{\ensuremath{\bm I}}
\newcommand{\Mbf}{\ensuremath{\bm M}}
\newcommand{\Nbf}{\ensuremath{\bm N}}
% bold face letters for vector
\newcommand{\bbf}{\ensuremath{\bm b}}
\newcommand{\ebf}{\ensuremath{\bm e}}
\newcommand{\xbf}{\ensuremath{\bm x}}
\newcommand{\ybf}{\ensuremath{\bm y}}
% order: \order[st]{1}, \order{k}
\newcommand{\order}[2][th]{\ensuremath{{#2}^{\text{#1}}}}
% Matrix transpose
\newcommand{\trans}[1]{\ensuremath{{#1}^\intercal}}
% Matrix inverse \inv[2]{\Abf} or \inv{\Abf}
\newcommand{\inv}[2][1]{\ensuremath{{#2}^{\text{-}{#1}}}}
\begin{document}
\title{Two Simple Proofs for Cramer's Rule}
\author{Frank the Giant Bunny}
\maketitle
\thispagestyle{empty}
Given a non-singular linear system $\Abf\xbf = \bbf$,
\emph{Cramer's rule} states 
$x_k = \frac{\det\Abf_k}{\det\Abf}$
where $\Abf_k$ is obtained from $\Abf$ by replacing the
$\order{k}$ column $\Abf_{\ast k}$ by $\bbf$; that is,
\begin{equation}
  \Abf_k
  =
  \begin{bmatrix}
    \Abf_{\ast 1}, \cdots ,\Abf_{\ast k-1},
    \bbf, \Abf_{\ast k+1}, \cdots, \Abf_{\ast n}
  \end{bmatrix}
  =
  \Abf + (\bbf - \Abf_{\ast k})\trans{\ebf}_k
  \label{eq:rank-one}
\end{equation}
where $\ebf_k$ is the $\order{k}$ unit vector.
The proof for Cramer's rule usually begins with writing down
the cofactor expansion of $\det\Abf$. This note explains two
alternative and simple approaches.
As explained in the page 476 of Meyer's textbook\footnote{
  Carl D. Meyer, \emph{Matrix Analysis and Applied Linear Algebra},
  SIAM, 2001.
},
one can exploit the rank-one update form in \eqref{eq:rank-one}.
The \emph{Matrix Determinant Lemma} states that  
\begin{equation*}
  \det(\Abf + \xbf\trans{\ybf})
  =
  (1 + \trans{\ybf}\inv{\Abf}\xbf) \det\Abf
\end{equation*}
where $\Abf$ is an $n\times n$ non-singular matrix and
two vectors $\xbf,\ybf$ are $n\times 1$ column vectors.
Then
\begin{alignat*}{2}
  \det\Abf_k
  &=
  \det\big(\Abf + (\bbf - \Abf_{\ast k})\trans{\ebf}_k\big)
  &
  \quad\text{by definition of $\Abf_k$}
  \\
  &=
  \big\{
  1 + \trans{\ebf}_k\inv{\Abf}(\bbf-\Abf_{\ast k})
  \big\}
  \det\Abf
  &
  \quad\quad\text{by Matrix Determinant Lemma}
  \\
  &=
  \big\{
  1 + \trans{\ebf}_k(\xbf - \ebf_k)
  \big\}
  \det\Abf
  &
  \text{$\Abf\xbf = \bbf$ and $\Abf\ebf_k = \Abf_{\ast k}$}
  \\
  &=
  \big\{
  1 + (x_k - 1)
  \big\}
  \det\Abf
  &
  \text{$\trans{\ebf}_k\xbf = x_k$ and $\trans{\ebf}_k\ebf_k=1$}
  \\
  &=
  x_k\det\Abf
  &
  \text{by canceling out}
\end{alignat*}
which completes the proof.
Another simple proof due to Stephen M. Robinson\footnote{
  Stephen M. Robinson, 
  ``A Short Proof of Cramer's Rule'',
  \emph{Mathematics Magazine}, 43(2), 94--95, 1970.
} begins by viewing 
$x_k$ as a determinant
\begin{equation*}
  x_k = \det\Ibf_k = 
  \det
  \begin{bmatrix}
    \ebf_1 \cdots ,\ebf_{k-1},
    \xbf, \ebf_{k+1}, \cdots, \ebf_{n}
  \end{bmatrix}
\end{equation*}
where $\Ibf_k$ is obtained from the identity matrix $\Ibf$ by
replacing the $\order{k}$ column by $\xbf$.
Then $\Abf\Ibf_k$ directly yields the matrix $\Abf_k$ in
\eqref{eq:rank-one} without resort to rank-one update.
\begin{align*}
  \Abf\Ibf_k
  &=
    \Abf
    \begin{bmatrix}
      \ebf_1 \cdots ,\ebf_{k-1},
      \xbf, \ebf_{k+1}, \cdots, \ebf_{n}
    \end{bmatrix}
  \\
  &=
    \begin{bmatrix}
      \Abf\ebf_1 \cdots ,\Abf\ebf_{k-1},
      \Abf\xbf, \Abf\ebf_{k+1}, \cdots, \Abf\ebf_{n}
    \end{bmatrix}
  \\
  &=
    \begin{bmatrix}
      \Abf_{\ast 1}, \cdots ,\Abf_{\ast k-1},
      \bbf, \Abf_{\ast k+1}, \cdots, \Abf_{\ast n}
    \end{bmatrix}
    \\
  &=
    \Abf_k
\end{align*}
Then,
\begin{equation*}
  x_k
  = \det\Ibf_k
  = \det\inv{\Abf}\Abf\Ibf_k
  = \det\inv{\Abf}\Abf_k
  = \det\inv{\Abf}\det\Abf_k
  = \frac{\det\Abf_k}{\det\Abf}
\end{equation*}
which exploits the fact that $\det\inv{\Mbf} = 1/\det\Mbf$
and $\det\Mbf\Nbf = \det\Mbf\det\Nbf$ for two square matrices
$\Mbf$ and $\Nbf$ of the same size.
\end{document}