Skip to content

Commit

Permalink
Update ddasp_exercise_slides.tex
Browse files Browse the repository at this point in the history
bugfix wording: perceptron -> neuron
  • Loading branch information
fs446 committed Apr 3, 2024
1 parent ae56f61 commit 3b2217e
Showing 1 changed file with 16 additions and 16 deletions.
32 changes: 16 additions & 16 deletions slides/ddasp_exercise_slides.tex
Original file line number Diff line number Diff line change
Expand Up @@ -2977,9 +2977,9 @@ \subsection{Fundamentals}

\begin{frame}[t]{Output Layer for Regression Model}

$\cdot$ Output layer exhibits $i=1 \dots K$ perceptrons
$\cdot$ Output layer exhibits $i=1 \dots K$ neurons

$\cdot$ Activation function $\sigma(\cdot)$ for $i\text{-th}$ perceptron: \underline{linear}
$\cdot$ Activation function $\sigma(\cdot)$ for $i\text{-th}$ neuron: \underline{linear}

$$\sigma(z_i) = z_i$$

Expand All @@ -2993,9 +2993,9 @@ \subsection{Fundamentals}

\begin{frame}[t]{Output Layer for Binary Classification Model}

$\cdot$ Output layer exhibits two perceptrons with shared input weights, hence acting on same $z$
$\cdot$ Output layer exhibits two neurons with shared input weights, hence acting on same $z$

$\cdot$ Activation functions $\sigma(\cdot)_{1,2}$ for the two perceptrons: \underline{sigmoid} / complementary sigmoid
$\cdot$ Activation functions $\sigma(\cdot)_{1,2}$ for the two neurons: \underline{sigmoid} / complementary sigmoid

$$\sigma_1(z) = \frac{1}{1+\e^{-z}} = \frac{\e^{z}}{\e^{z}+1} \qquad\qquad \sigma_2(z) = 1-\sigma_1(z) = \frac{1}{1 + \e^{z}} = \frac{\e^{-z}}{\e^{-z}+1}$$

Expand Down Expand Up @@ -3029,9 +3029,9 @@ \subsection{Fundamentals}

\begin{frame}[t]{Output Layer for Binary Classification Model}

$\cdot$ Output layer exhibits a single perceptron
$\cdot$ Output layer exhibits a single neuron

$\cdot$ Activation function $\sigma(\cdot)$ for this single output perceptron: \underline{sigmoid}
$\cdot$ Activation function $\sigma(\cdot)$ for this single output neuron: \underline{sigmoid}

$$\sigma(z) = \frac{1}{1+\e^{-z}} = \frac{\e^{z}}{\e^{z}+1}$$

Expand Down Expand Up @@ -3064,15 +3064,15 @@ \subsection{Fundamentals}

\begin{frame}[t]{Output Layer for Multi-Class Classification Model}

$\cdot$ Output layer exhibits $i=1 \dots K$ perceptrons for $K$ mutually exclusive classes
$\cdot$ Output layer exhibits $i=1 \dots K$ neurons for $K$ mutually exclusive classes

$\cdot$ Activation function $\sigma(\cdot)$ for $i\text{-th}$ perceptron: \underline{softmax}
$\cdot$ Activation function $\sigma(\cdot)$ for $i\text{-th}$ neuron: \underline{softmax}

$$
\sigma(z_i) = \frac{\e^{z_i}}{\sum\limits_{i'=1}^{K} \e^{z_{i'}}} \qquad \text{hence, }\sum\limits_{i=1}^{K} \sigma(z_i) = 1
\text{, which couples the perceptrons}
\text{, which couples the neurons}
$$
%which couples the perceptrons in the output layer
%which couples the neurons in the output layer

$\cdot$ Derivatives to set up the Jacobian matrix

Expand Down Expand Up @@ -3127,7 +3127,7 @@ \subsection{Exercise 11}
\begin{itemize}
\item XOR is a classification problem, which cannot be handled by linear algebra
\item introduce two nonlinearities: add bias, non-linear activation function
\item perceptron concept
\item neuron / perceptron concept
\item general architecture of non-linear models
\end{itemize}
\end{frame}
Expand Down Expand Up @@ -3357,7 +3357,7 @@ \subsection{Exercise 11}

\begin{frame}[t]{A Non-Linear Model for XOR}

$\cdot$ weight matrix and bias vector to represent perceptron \textcolor{C0}{1} and \textcolor{C3}{2}
$\cdot$ weight matrix and bias vector to represent neurons \textcolor{C0}{1} and \textcolor{C3}{2}
$$
\bm{W}_\text{layer 1} =
\begin{bmatrix}
Expand All @@ -3373,7 +3373,7 @@ \subsection{Exercise 11}
\end{bmatrix}
$$

$\cdot$ weight vector and bias scalar to represent perceptron \textcolor{C1}{3}
$\cdot$ weight vector and bias scalar to represent neuron \textcolor{C1}{3}
$$
\bm{W}_\text{layer 2} =
\begin{bmatrix}
Expand Down Expand Up @@ -3416,7 +3416,7 @@ \subsection{Exercise 11}

$\cdot$ solution known from book Goodfellow et al. (2016): Deep Learning. MIT Press, Ch. 6.1

$\cdot$ weight matrix and bias vector to represent perceptron \textcolor{C0}{1} and \textcolor{C3}{2}
$\cdot$ weight matrix and bias vector to represent neurons \textcolor{C0}{1} and \textcolor{C3}{2}
$$
\bm{W}_\text{layer 1} =
\begin{bmatrix}
Expand All @@ -3432,7 +3432,7 @@ \subsection{Exercise 11}
\end{bmatrix}
$$

$\cdot$ weight vector and bias scalar to represent perceptron \textcolor{C1}{3}
$\cdot$ weight vector and bias scalar to represent neuron \textcolor{C1}{3}
$$
\bm{W}_\text{layer 2} =
\begin{bmatrix}
Expand Down Expand Up @@ -3616,7 +3616,7 @@ \subsection{Exercise 12}
\end{tikzpicture}
\end{center}

$\cdot$ Activation function $\sigma(\cdot)$ for this single output perceptron: \underline{sigmoid}
$\cdot$ Activation function $\sigma(\cdot)$ for this single output neuron: \underline{sigmoid}

$$\hat{y} = \sigma(z) = \frac{1}{1+\e^{-z}} = \frac{\e^{z}}{\e^{z}+1}\qquad\qquad
\frac{\partial \sigma(z)}{\partial z} = \frac{\e^{z}}{(\e^{z}+1)^2} = \sigma(z) \cdot (1-\sigma(z))
Expand Down

0 comments on commit 3b2217e

Please sign in to comment.