From 3b2217ebb7def7b848323c5a4e0d489150151413 Mon Sep 17 00:00:00 2001 From: Frank Schultz Date: Wed, 3 Apr 2024 13:13:27 +0200 Subject: [PATCH] Update ddasp_exercise_slides.tex bugfix wording: perceptron -> neuron --- slides/ddasp_exercise_slides.tex | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/slides/ddasp_exercise_slides.tex b/slides/ddasp_exercise_slides.tex index 8e680f4..262dcef 100644 --- a/slides/ddasp_exercise_slides.tex +++ b/slides/ddasp_exercise_slides.tex @@ -2977,9 +2977,9 @@ \subsection{Fundamentals} \begin{frame}[t]{Output Layer for Regression Model} -$\cdot$ Output layer exhibits $i=1 \dots K$ perceptrons +$\cdot$ Output layer exhibits $i=1 \dots K$ neurons -$\cdot$ Activation function $\sigma(\cdot)$ for $i\text{-th}$ perceptron: \underline{linear} +$\cdot$ Activation function $\sigma(\cdot)$ for $i\text{-th}$ neuron: \underline{linear} $$\sigma(z_i) = z_i$$ @@ -2993,9 +2993,9 @@ \subsection{Fundamentals} \begin{frame}[t]{Output Layer for Binary Classification Model} -$\cdot$ Output layer exhibits two perceptrons with shared input weights, hence acting on same $z$ +$\cdot$ Output layer exhibits two neurons with shared input weights, hence acting on same $z$ -$\cdot$ Activation functions $\sigma(\cdot)_{1,2}$ for the two perceptrons: \underline{sigmoid} / complementary sigmoid +$\cdot$ Activation functions $\sigma(\cdot)_{1,2}$ for the two neurons: \underline{sigmoid} / complementary sigmoid $$\sigma_1(z) = \frac{1}{1+\e^{-z}} = \frac{\e^{z}}{\e^{z}+1} \qquad\qquad \sigma_2(z) = 1-\sigma_1(z) = \frac{1}{1 + \e^{z}} = \frac{\e^{-z}}{\e^{-z}+1}$$ @@ -3029,9 +3029,9 @@ \subsection{Fundamentals} \begin{frame}[t]{Output Layer for Binary Classification Model} -$\cdot$ Output layer exhibits a single perceptron +$\cdot$ Output layer exhibits a single neuron -$\cdot$ Activation function $\sigma(\cdot)$ for this single output perceptron: \underline{sigmoid} +$\cdot$ Activation function $\sigma(\cdot)$ for this single output neuron: \underline{sigmoid} $$\sigma(z) = \frac{1}{1+\e^{-z}} = \frac{\e^{z}}{\e^{z}+1}$$ @@ -3064,15 +3064,15 @@ \subsection{Fundamentals} \begin{frame}[t]{Output Layer for Multi-Class Classification Model} -$\cdot$ Output layer exhibits $i=1 \dots K$ perceptrons for $K$ mutually exclusive classes +$\cdot$ Output layer exhibits $i=1 \dots K$ neurons for $K$ mutually exclusive classes -$\cdot$ Activation function $\sigma(\cdot)$ for $i\text{-th}$ perceptron: \underline{softmax} +$\cdot$ Activation function $\sigma(\cdot)$ for $i\text{-th}$ neuron: \underline{softmax} $$ \sigma(z_i) = \frac{\e^{z_i}}{\sum\limits_{i'=1}^{K} \e^{z_{i'}}} \qquad \text{hence, }\sum\limits_{i=1}^{K} \sigma(z_i) = 1 -\text{, which couples the perceptrons} +\text{, which couples the neurons} $$ -%which couples the perceptrons in the output layer +%which couples the neurons in the output layer $\cdot$ Derivatives to set up the Jacobian matrix @@ -3127,7 +3127,7 @@ \subsection{Exercise 11} \begin{itemize} \item XOR is a classification problem, which cannot be handled by linear algebra \item introduce two nonlinearities: add bias, non-linear activation function -\item perceptron concept +\item neuron / perceptron concept \item general architecture of non-linear models \end{itemize} \end{frame} @@ -3357,7 +3357,7 @@ \subsection{Exercise 11} \begin{frame}[t]{A Non-Linear Model for XOR} -$\cdot$ weight matrix and bias vector to represent perceptron \textcolor{C0}{1} and \textcolor{C3}{2} +$\cdot$ weight matrix and bias vector to represent neurons \textcolor{C0}{1} and \textcolor{C3}{2} $$ \bm{W}_\text{layer 1} = \begin{bmatrix} @@ -3373,7 +3373,7 @@ \subsection{Exercise 11} \end{bmatrix} $$ -$\cdot$ weight vector and bias scalar to represent perceptron \textcolor{C1}{3} +$\cdot$ weight vector and bias scalar to represent neuron \textcolor{C1}{3} $$ \bm{W}_\text{layer 2} = \begin{bmatrix} @@ -3416,7 +3416,7 @@ \subsection{Exercise 11} $\cdot$ solution known from book Goodfellow et al. (2016): Deep Learning. MIT Press, Ch. 6.1 -$\cdot$ weight matrix and bias vector to represent perceptron \textcolor{C0}{1} and \textcolor{C3}{2} +$\cdot$ weight matrix and bias vector to represent neurons \textcolor{C0}{1} and \textcolor{C3}{2} $$ \bm{W}_\text{layer 1} = \begin{bmatrix} @@ -3432,7 +3432,7 @@ \subsection{Exercise 11} \end{bmatrix} $$ -$\cdot$ weight vector and bias scalar to represent perceptron \textcolor{C1}{3} +$\cdot$ weight vector and bias scalar to represent neuron \textcolor{C1}{3} $$ \bm{W}_\text{layer 2} = \begin{bmatrix} @@ -3616,7 +3616,7 @@ \subsection{Exercise 12} \end{tikzpicture} \end{center} -$\cdot$ Activation function $\sigma(\cdot)$ for this single output perceptron: \underline{sigmoid} +$\cdot$ Activation function $\sigma(\cdot)$ for this single output neuron: \underline{sigmoid} $$\hat{y} = \sigma(z) = \frac{1}{1+\e^{-z}} = \frac{\e^{z}}{\e^{z}+1}\qquad\qquad \frac{\partial \sigma(z)}{\partial z} = \frac{\e^{z}}{(\e^{z}+1)^2} = \sigma(z) \cdot (1-\sigma(z))