library(tidyverse)
library(klaR)
+library(cluster)
+library(dplyr)
+library(tidyr)
+library(purrr)
+library(ggplot2)
+library(scales)
diff --git a/.DS_Store b/.DS_Store index c17c9ad..bc2d38f 100644 Binary files a/.DS_Store and b/.DS_Store differ diff --git a/.Rhistory b/.Rhistory new file mode 100644 index 0000000..6147190 --- /dev/null +++ b/.Rhistory @@ -0,0 +1,246 @@ +#| message: false +#| warning: false +library(tidyverse) +library(tidyclust) +centers <- tibble( +cluster = factor(1:3), +respondents = c(250, 500, 200), +q1 = c(1, 0, 1), +q2 = c(0, 1, 1), +q3 = c(0, 1, 1), +q4 = c(0, 0, 1) +) +set.seed(123) +labelled_respondents <- centers |> +mutate( +across( +starts_with("q"), +~map2(respondents, .x, function(x, y) { +rbinom(x, 1, max((y - 0.1), 0.1)) +}), +.names = "{col}" +) +) |> +select(-respondents) |> +unnest(cols = c(q1, q2, q3, q4)) |> +sample_n(n()) +labelled_respondents |> +pivot_longer(cols = -cluster, names_to = "question", values_to = "response") |> +mutate(response = response == 1) |> +ggplot(aes(x = response, y = question, color = cluster)) + +geom_jitter() + +theme_bw() + +labs(x = "Response", y = "Question", color = "Cluster", +title = "Visualization of simulated question responses by cluster") +k_means_example <- k_means(num_clusters = 3) |> +set_engine("stats", algorithm = "Lloyd") |> +fit(~ q1 + q2+ q3 + q4, +data = labelled_respondents +) +k_means_example_summary <- extract_fit_summary(k_means_example) +str(k_means_example_summary) +k_min <- 1 +k_max <- 7 +fit_k_means <- function(num_clusters) { +k_means(num_clusters = num_clusters) |> +set_engine("stats", algorithm = "Lloyd") |> +fit(~ q1 + q2+ q3 + q4, +data = labelled_respondents +) |> +extract_fit_summary() +} +fit_k_means(2) +k_min <- 1 +k_max <- 7 +kmeans_results <- tibble(k = k_min:k_max) |> +mutate( +kclust = map(k, ~kmeans(respondents, centers = .x, iter.max = iter_max)), +) +respondents_labelled +labelled_respondents +kmeans_results <- tibble(k = k_min:k_max) |> +mutate( +kclust = map(k, ~kmeans(labelled_respondents |> select(-cluster), centers = .x, iter.max = iter_max)), +) +kmeans_results <- tibble(k = k_min:k_max) |> +mutate( +kclust = map(k, ~kmeans(labelled_respondents |> select(-cluster), centers = .x, iter.max = 100)), +) +kmeans_results +k_means_results <- tibble(k = k_min:k_max) |> +mutate( +kclust = map(k, fit_k_means), +) +k_means_results +k_means_results <- tibble(k = k_min:k_max) |> +mutate( +k_means_result = map(k, fit_k_means), +) +k_means_results <- tibble(k = k_min:k_max) |> +mutate( +kclust = map(k, fit_k_means), +) +k_means_results +k_means_results |> +mutate(withinss = sse_within_total(kclust)) +k_means_results |> +mutate(withinss = map(kclust, sse_within_total)) +k_means_results +k_means_results$kclust[[1]] +k_means_results$kclust[[1]] |> sse_within_total() +k_means_results$kclust[[1]] |> summary() +k_means_results$kclust[[1]] |> sse_total() +k_means_results$kclust[[1]] |> summary() |> sse_total() +?sse_total +k_means_results$kclust[[1]] |> sse_total_vec() +k_means_results$kclust[[1]] |> extract_fit_summary() +k_means_results +k_means_results$kclust[[1]] |> summary() +k_means_results$kclust[[1]] |> extract_cluster_assignment() +k_means_results$kclust[[1]] |> extract_fit_summary() +k_means_results$kclust[1] |> extract_fit_summary() +k_means_results$kclust[1] |> str() +k_means_results$kclust[[1]] |> str() +tibble(k = k_min:k_max) +k_test <- means_results$kclust[[1]] +k_test <- k_means_results$kclust[[1]] +View(k_test) +class(k_test) +class(k_means_example) +fit_k_means <- function(num_clusters) { +k_means(num_clusters = num_clusters) |> +set_engine("stats", algorithm = "Lloyd") |> +fit(~ q1 + q2+ q3 + q4, +data = labelled_respondents +) +} +fit_k_means(3) +fit_k_means(3) |> class() +fit_k_means <- function(num_clusters) { +k_means(num_clusters = num_clusters) |> +set_engine("stats", algorithm = "Lloyd") |> +fit(~ q1 + q2+ q3 + q4, +data = labelled_respondents +) |> +extract_fit_summary() +} +k_means_results <- c|> +mutate( +kclust = map(k, fit_k_means), +) +k_means_results <- c|> +mutate( +kclust = map(k, fit_k_means), +) +fit_k_means <- function(num_clusters) { +k_means(num_clusters = num_clusters) |> +set_engine("stats", algorithm = "Lloyd") |> +fit(~ q1 + q2+ q3 + q4, +data = labelled_respondents +) |> +extract_fit_summary() +} +k_means_results <- tibble(k = k_min:k_max) |> +mutate( +kclust = map(k, fit_k_means), +) +k_means_results +kmeans_logwithindiss <- k_means_results |> +mutate(withinss = map(kclust, sse_within_total)) +k_means_results +k_means_results[[1]] +k_means_results$kclust[[1]] +k_means_results$kclust[[1]]$sse_within_total_total +k_means_results |> +mutate(withinss = map(kclust, ~.$sse_within_total_total)) +k_means_results |> +mutate(withinss = map_dbl(kclust, ~.$sse_within_total_total)) +k_means_results |> +mutate(withinss = map_dbl(kclust, ~sum(.$sse_within_total_total))) +kmeans_logwithindiss <- k_means_results |> +mutate(withinss = map_dbl(kclust, ~sum(.$sse_within_total_total))) +kmeans_logwithindiss <- k_means_results |> +mutate(withinss = map_dbl(kclust, ~sum(.$sse_within_total_total))) |> +mutate(logwithindiss = log(withinss) - log(withinss[k == 1])) +kmeans_logwithindiss +k_means_logwithindiss <- k_means_results |> +mutate(withinss = map_dbl(kclust, ~sum(.$sse_within_total_total))) |> +mutate(logwithindiss = log(withinss) - log(withinss[k == 1])) +k_means_metrics <- k_means_results |> +mutate(withinss = map_dbl(kclust, ~sum(.$sse_within_total_total))) |> +mutate(logwithiniss = log(withinss) - log(withinss[k == 1])) +k_means_metrics +k_means_metrics <- k_means_results |> +mutate(within_sse = map_dbl(kclust, ~sum(.$sse_within_total_total))) |> +mutate(log_within_sse = log(within_sse) - log(within_sse[k == 1])) +k_means_metrics <- k_means_results |> +mutate( +within_sse = map_dbl(kclust, ~sum(.$sse_within_total_total)), +log_within_sse = log(within_sse) - log(within_sse[k == 1]) +) +k_means_metrics +k_modes_example <- k_means(num_clusters = 3) |> +set_engine("klaR") |> +fit(~ q1 + q2+ q3 + q4, +data = labelled_respondents +) +k_modes_example +k_means(num_clusters = num_clusters) |> +set_engine("stats") |> +fit(~ q1 + q2+ q3 + q4, +data = labelled_respondents +) |> +extract_fit_summary() +fit_k_means <- function(num_clusters) { +k_means(num_clusters = num_clusters) |> +set_engine("stats") |> +fit(~ q1 + q2+ q3 + q4, +data = labelled_respondents +) |> +extract_fit_summary() +} +k_means_results <- tibble(k = k_min:k_max) |> +mutate( +kclust = map(k, fit_k_means), +) +k_modes_example |> extract_fit_summary() +k_modes_example |> extract_fit_summary() |> str() +fit_k_means <- function(num_clusters, engine) { +k_means(num_clusters = num_clusters) |> +set_engine(engine) |> +fit(~ q1 + q2+ q3 + q4, +data = labelled_respondents +) |> +extract_fit_summary() +} +k_means_results <- tibble(k = k_min:k_max) |> +mutate( +kclust = map(k, ~fit_k_means(., engine = "stats")), +) +k_modes_results <- tibble(k = k_min:k_max) |> +mutate( +kclust = map(k, ~fit_k_means(., engine = "klaR")), +) +k_modes_results +k_modes_results <- tibble(k = k_min:k_max) |> +mutate( +kclust = map(k, ~fit_k_means(., engine = "klaR")), +) |> +mutate( +within_sse = map_dbl(kclust, ~sum(.$sse_within_total_total)), +log_within_sse = log(within_sse) - log(within_sse[k == 1]) +) +k_modes_results +tibble(k = k_min:k_max) |> +mutate( +kclust = map(k, ~fit_k_means(., engine = "klaR")), +) +k_modes_results <- tibble(k = k_min:k_max) |> +mutate( +kclust = map(k, ~fit_k_means(., engine = "klaR")), +) +k_modes_results$kclust[[1]] +k_modes_results$kclust[[1]] |> str() +k_modes_results$kclust[[2]] |> str() +k_modes_results$kclust[[3]] |> str() +?sample_n diff --git a/.gitignore b/.gitignore index 823fe8e..4995c72 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ .quarto/* /.quarto/ +.Rproj.user diff --git a/_freeze/posts/clustering-binary-data/index/execute-results/html.json b/_freeze/posts/clustering-binary-data/index/execute-results/html.json index bc744a0..87b9a3f 100644 --- a/_freeze/posts/clustering-binary-data/index/execute-results/html.json +++ b/_freeze/posts/clustering-binary-data/index/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "e7846f0987fd454bf365312353e0195d", + "hash": "bada403db8728a070e1d5cab9665b0f2", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Clustering Binary Data\"\ndescription: \"An application of different clustering approaches to simulated survey responses\"\nauthor: \"Christoph Scheuch\"\ndate: \"2023-11-25\" \nimage: thumbnail.png\n---\n\n\nIn this post I tackle the challenge to extract a small number of typical respondent profiles from a large scale survey with multiple yes-no questions. This type of setting corresponds to a classification problem without knowing the true labels of the observations – also known as unsupervised learning. Since I regularly face tasks in this area, I decided to start an irregular series of blogs that touch upon practical aspects of unsupervised learning in R using tidy principles.\n\nTechnically speaking, I have a set of $N$ observations $(x_1, x_2, ... , x_N)$ of a random $p$-vector $X$ with joint density $\\text{Pr}(X)$. The goal of classification is to directly infer the properties of this probability density without the help of the correct answers (or degree-of-error) for each observation. In this note I focus on cluster analysis that attempts to find convex regions of the $X$-space that contain modes of $\\text{Pr}(X)$. This approach aims to tell whether $\\text{Pr}(X)$ can be represented by a mixture of simpler densities representing distinct classes of observations.\n\nIntuitively, I want to find clusters of the survey responses such that respondents within each cluster are more closely related to one another than respondents assigned to different clusters. There are many possible ways to achieve that, but I focus on the most popular and most approachable ones: $K$-means, $K$-modes, as well as agglomerative and divisive hierarchical clustering. AS we see below, the 4 models yield quite different results for clustering binary data.\n\nI mainly use the tidyverse family of packages throughout this post. \n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(tidyverse)\n```\n:::\n\n\n## Creating sample data\n\nLet us start by creating some sample data where we basically exactly know which kind of answer profiles are out there. Later, we evaluate the cluster models according to how well they are doing in uncovering the clusters and assigning respondents to clusters. I assume that there are 4 yes/no questions labeled q1, q2, q3 and q4. In addition, there are 3 different answer profiles where cluster 1 answers positively to the first question only, cluster 2 answers positively to question 2 and 3 and cluster 3 answers all questions positively. I also define the the number of respondents for each cluster.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncenters <- tibble(\n cluster = factor(1:3), \n respondents = c(250, 500, 200),\n q1 = c(1, 0, 1),\n q2 = c(0, 1, 1), \n q3 = c(0, 1, 1),\n q4 = c(0, 0, 1)\n)\n```\n:::\n\n\nAlternatively, we could think of the yes/no questions as medical records that indicate whether the subject has a certain pre-condition or not.\n\nSince it should be a bit tricky for the clustering models to find the actual response profiles, let us add some noise in the form of respondents that deviate from their assigned cluster profile. We find out below how the cluster algorithms are able to deal with this noise.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nset.seed(123)\nlabelled_respondents <- centers |> \n mutate(\n across(\n starts_with(\"q\"),\n ~map2(respondents, .x, function(x, y) {\n rbinom(x, 1, max((y - 0.1), 0.1))\n }),\n .names = \"{col}\"\n )\n ) |> \n select(-respondents) |> \n unnest(cols = c(q1, q2, q3, q4))\n```\n:::\n\nThe figure below visualizes the distribution of simulated question responses by cluster.\n\n::: {.cell}\n\n```{.r .cell-code}\nlabelled_respondents |>\n pivot_longer(cols = -cluster, names_to = \"question\", values_to = \"response\") |>\n mutate(response = response == 1) |>\n ggplot(aes(x = response, y = question, color = cluster)) +\n geom_jitter() +\n theme_bw() +\n labs(x = \"Response\", y = \"Question\", color = \"Cluster\",\n title = \"Visualization of simulated question responses by cluster\")\n```\n\n::: {.cell-output-display}\n![](index_files/figure-html/unnamed-chunk-4-1.png){width=672}\n:::\n:::\n\n\n## $K$-means clustering\n\nThe $K$-means algorithm is one of the most popular clustering methods (see also this tidymodels example). It is intended for situations in which all variables are of the quantitative type since it partitions all respondents into $k$ groups such that the sum of squares from respondents to the assigned cluster centers are minimized. For binary data, the Euclidean distance reduces to counting the number of variables on which two cases disagree.\n\nThis leads to a problem (which is also described here) because of an arbitrary cluster assignment after cluster initialization. The first chosen clusters are still binary data and hence observations have integer distances from each of the centers. The corresponding ties are hard to overcome in any meaningful way. Afterwards, the algorithm computes means in clusters and revisits assignments. Nonetheless, $K$-means might produce informative results in a fast and easy to interpret way. I hence include it in this post for comparison.\n\nTo run the $K$-means algorithm, we first drop the cluster column.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nrespondents <- labelled_respondents |>\n select(-cluster)\n```\n:::\n\n\nIt is very straight-forward to run the built-in stats::kmeans clustering algorithm. I choose the parameter of maximum iterations to be 100 to increase the likeliness of getting the best fitting clusters. Since the data is fairly small and the algorithm is also quite fast, I see no harm in using a high number of iterations.\n\n\n::: {.cell}\n\n```{.r .cell-code}\niter_max <- 100\nkmeans_example <- stats::kmeans(respondents, centers = 3, iter.max = iter_max)\n```\n:::\n\n\nThe output of the algorithm is a list with different types of information including the assigned clusters for each respondent.\n\nAs we want to compare cluster assignment across different models and we repeatedly assign different clusters to respondents, I write up a helper function that adds assignments to the respondent data from above. The function shows that $K$-means and $K$-modes contain a field with cluster information. The two hierarchical cluster models, however, need to be cut a the desired number of clusters (more on that later).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nassign_clusters <- function(model, k = NULL) {\n if (class(model)[1] %in% c(\"kmeans\", \"kmodes\")) {\n cluster_assignment <- model$cluster\n }\n if (class(model)[1] %in% c(\"agnes\", \"diana\")) {\n if (is.null(k)) {\n stop(\"k required for hierarchical models!\")\n }\n cluster_assignment <- stats::cutree(model, k = k)\n }\n \n clusters <- respondents |>\n mutate(cluster = cluster_assignment)\n \n return(clusters)\n}\n```\n:::\n\n\nIn addition, I introduce a helper function that summarizes information by cluster. In particular, the function computes average survey responses (which correspond to proportion of yes answers in the current setting) and sorts the clusters according to the total number of positive answers. The latter helps us later to compare clusters across different models.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsummarize_clusters <- function(model, k = NULL) {\n # Assign respondents to clusters\n clusters <- assign_clusters(model = model, k = k)\n \n # Compute summary statistics by cluster\n summary_statistics <- clusters |>\n group_by(cluster) |>\n summarize(across(matches(\"q\"), \\(x) mean(x, na.rm = TRUE)),\n assigned_respondents = n()) |>\n select(-cluster) |>\n # Rank clusters by total share of positive answers\n mutate(total = rowSums(across(matches(\"q\")))) |>\n arrange(-total) |>\n mutate(k = row_number(),\n model = class(model)[1])\n \n return(summary_statistics)\n}\n```\n:::\n\n\nWe could easily introduce other summary statistics into the function, but the current specification is sufficient for the purpose of this note.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nkmeans_example <- summarize_clusters(kmeans_example)\n```\n:::\n\n\nSince we do not know the true number of clusters in real-world settings, we want to compare the performance of clustering models for different numbers of clusters. Since we know that the true number of clusters is 3 in the current setting, let us stick to a maximum of 7 clusters. In practice, you might of course choose an arbitrary maximum number of clusters.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nk_min <- 1\nk_max <- 7\n\nkmeans_results <- tibble(k = k_min:k_max) |>\n mutate(\n kclust = map(k, ~kmeans(respondents, centers = .x, iter.max = iter_max)),\n )\n```\n:::\n\n\nA common heuristic to determine the optimal number of clusters is the elbow method where we plot the within-cluster sum of squared errors of an algorithm for increasing number of clusters. The optimal number of clusters corresponds to the point where adding another cluster does lead to much of an improvement anymore. In economic terms, we look for the point where the diminishing returns to an additional cluster are not worth the additional cost (assuming that we want the minimum number of clusters with optimal predictive power).\n\nThe function below computes the within-cluster sum of squares for any cluster assignments.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncompute_withinss <- function(model, k = NULL) {\n # Assign respondents to clusters\n clusters <- assign_clusters(model = model, k = k)\n \n # Compute averages per cluster center\n centers <- clusters |>\n group_by(cluster) |>\n summarize_all(mean) |>\n pivot_longer(cols = -cluster, names_to = \"question\", values_to = \"cluster_mean\")\n \n # Compute sum of squared differences from cluster centers\n withinss <- clusters |>\n pivot_longer(cols = -cluster, names_to = \"question\", values_to = \"response\") |>\n left_join(centers, by = c(\"cluster\", \"question\")) |>\n summarize(k = max(cluster),\n withinss = sum((response - cluster_mean)^2)) |>\n mutate(model = class(model)[1])\n \n return(withinss)\n}\n```\n:::\n\n\nWe can simply map the function across our list of $K$-means models. For better comparability, we normalize the within-cluster sum of squares for any number of cluster by the benchmark case of only having a single cluster. Moreover, we consider log-differences to because we care more about the percentage decrease in sum of squares rather than the absolute number. \n\n::: {.cell}\n\n```{.r .cell-code}\nkmeans_logwithindiss <- kmeans_results$kclust |>\n map(compute_withinss) |>\n reduce(bind_rows) |>\n mutate(logwithindiss = log(withinss) - log(withinss[k == 1]))\n```\n:::\n\n\n## $K$-modes clustering\n\nSince $K$-means is actually not ideal for binary (or hierarchical data in general), Huang (1997) came up with the $K$-modes algorithm. This clustering approach aims to partition respondents into $K$ groups such that the distance from respondents to the assigned cluster modes is minimized. A mode is a vector of elements that minimize the dissimilarities between the vector and each object of the data. Rather than using the Euclidean distance, $K$-modes uses simple matching distance between respondents to quantify dissimilarity which translates into counting the number of mismatches in all question responses in the current setting.\n\nFortunately, the klaR package provides an implementation of the $K$-modes algorithm that we can apply just like the $K$-means above.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nkmodes_example <- klaR::kmodes(respondents, iter.max = iter_max, modes = 3) |>\n summarize_clusters()\n```\n:::\n\n\nSimilarly, we just map the model across different numbers of target cluster modes and compute the within-cluster sum of squares.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nkmodes_results <- tibble(k = k_min:k_max) |>\n mutate(\n kclust = map(k, ~klaR::kmodes(respondents, modes = ., iter.max = iter_max))\n )\n\nkmodes_logwithindiss <- kmodes_results$kclust |>\n map(compute_withinss) |>\n reduce(bind_rows) |>\n mutate(logwithindiss = log(withinss) - log(withinss[k == 1]))\n```\n:::\n\n\nNote that I computed the within-cluster sum of squared errors rather than using the within-cluster simple-matching distance provided by the function itself. The latter counts the number of differences from assigned respondents to their cluster modes.\n\n## Hierarchical clustering\n\nAs an alternative to computing optimal assignments for a given number of clusters, we might sometimes prefer to arrange the clusters into a natural hierarchy. This involves successively grouping the clusters themselves such that at each level of the hierarchy, clusters within the same group are more similar to each other than those in different groups. There are two fundamentally different approaches to hierarchical clustering that are fortunately implemented in the great cluster package.\n\nBoth hierarchical clustering approaches require a dissimilarity or distance matrix. Since we have binary data, we choose the asymmetric binary distance matrix based on the Jaccard distance. Intuitively, the Jaccard distance measures how far the overlap of responses between two groups is from perfect overlap.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndissimilarity_matrix <- stats::dist(respondents, method = \"binary\")\n```\n:::\n\n\nAgglomerative clustering start at the bottom and at each level recursively merge a selected pair of clusters into a single cluster. This produces a clustering at the next higher level with one less cluster. The pair chosen for merging consist of the two clusters with the smallest within-cluster dissimilarity. On an intuitive level, agglomerative clustering is hence better in discovering small clusters.\n\nThe cluster package provides the agnes algorithm (AGglomerative NESting) that can easily applied to the dissimilarity matrix.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nagnes_results <- cluster::agnes(dissimilarity_matrix, diss = TRUE, keep.diss = TRUE, method = \"complete\")\n```\n:::\n\n\nThe function returns a clustering tree that we could plot (which I actually rarely found really helpful) or cut into different partitions using the stats::cutree function. This is why the helper functions from above need a number of target clusters as an input for hierarchical clustering models. However, the logic of the summary statistics are just as above.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nagnes_example <- summarize_clusters(agnes_results, k = 3)\n\nagnes_logwithindiss <- k_min:k_max |>\n map(~compute_withinss(agnes_results, .)) |>\n reduce(bind_rows) |>\n mutate(logwithindiss = log(withinss) - log(withinss[k == 1]))\n```\n:::\n\n\nDivisive methods start at the top and at each level recursively split one of the existing clusters at that level into two new clusters. The split is chosen such that two new groups with the largest between-group dissimilarity emerge. Intuitively speaking, divisive clustering is thus better in discovering large clusters.\n\nThe cluster package provides the diana algorithm (DIvise ANAlysis) for this clustering approach where the logic is basically the same as for the agnes model.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndiana_results <- cluster::diana(dissimilarity_matrix, diss = TRUE, keep.diss = TRUE) \ndiana_example <- diana_results |>\nsummarize_clusters(k = 3)\ndiana_logwithindiss <- k_min:k_max |>\n map(~compute_withinss(diana_results, .)) |>\n reduce(bind_rows) |>\n mutate(logwithindiss = log(withinss) - log(withinss[k == 1]))\n```\n:::\n\n\n## Model comparison\n\nLet us start the model comparison by looking at the within cluster sum of squares for different numbers of clusters. The figure shows that the \\(K\\)-modes algorithm improves the fastest towards the true number of 3 clusters. The elbow method would suggest in this case to stick with 3 clusters for this algorithm.\n\nFor the other models, the picture is less clear: the curve of \\(K\\)-means would rather suggest having 2 or 4 clusters. But then again this might be the result of initial conditions as the \\(K\\)-means algorithm assigns respondents to clusters in a rather arbitrary way at cluster initialization. The divise model would also suggest 3 clusters as there is no improvement to using 4 clusters, but the overall sum of squares does not improve much suggesting a lot of mis-classification in the first 2 clusters. The agglomerative model exhibits the worst performance since it shows no clear suggestion for an optimal cluster and the first 2 clusters seem to be only marginally better than having a single cluster.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbind_rows(kmeans_logwithindiss, kmodes_logwithindiss,\n agnes_logwithindiss, diana_logwithindiss) |>\n ggplot(aes(x = k, y = logwithindiss, color = model, linetype = model)) +\n geom_line() +\n scale_x_continuous(breaks = k_min:k_max) + \n theme_minimal() +\n labs(x = \"Number of Clusters\", y = bquote(log(W[k])-log(W[1])), color = \"Model\", linetype = \"Model\",\n title = \"Within cluster sum of squares relative to benchmark case of one cluster\")\n```\n\n::: {.cell-output-display}\n![](index_files/figure-html/unnamed-chunk-19-1.png){width=672}\n:::\n:::\n\n\nNow, let us compare the proportion of positive responses within assigned clusters across models. Recall that I ranked clusters according to the total share of positive answers to ensure comparability. This approach is only possible in this type of setting where we can easily introduce such a ranking. The figure shows that all models are doing well in discovering the cluster with a single response. The cluster with 2 positive responses seems to be discovered by diana, \\(K\\)-means and \\(K\\)-modes, while agnes is clearly off. The cluster with only positive responses is again discovered by \\(K\\)-means and \\(K\\)-modes whereas the hierarchical models perform rather poorly. Overall, this picture again suggests that \\(K\\)-modes performs best for the current setting.\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbind_rows(\n kmeans_example,\n kmodes_example,\n agnes_example,\n diana_example) |>\n select(-c(total, assigned_respondents)) |>\n pivot_longer(cols = -c(k, model), names_to = \"question\", values_to = \"response\") |>\n mutate(cluster = str_c(\"Cluster \", k)) |>\n ggplot(aes(x = response, y = question, fill = model)) +\n geom_col(position = \"dodge\") +\n facet_wrap(~cluster) +\n theme_bw() +\n scale_x_continuous(labels = scales::percent) + \n geom_hline(yintercept = seq(1.5, length(unique(colnames(respondents)))-0.5, 1),\n colour = 'black') +\n labs(x = \"Proportion of responses\", y = \"Question\", fill = \"Model\",\n title = \"Proportion of positive responses within assigned clusters\")\n```\n\n::: {.cell-output-display}\n![](index_files/figure-html/unnamed-chunk-20-1.png){width=672}\n:::\n:::\n\n\nFinally, let us check how well each model assigns respondents to the true cluster which is obviously not possible in real unsupervised applications. The figure below shows the true number of respondents by cluster as a dashed box and the assigned respondents as bars. Again, agnes and diana do a pretty bad job for the current setting, while \\(K\\)-means and \\(K\\)-modes show quite simiar assignments that are not too far off from the true ones.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbind_rows(\n kmeans_example,\n kmodes_example,\n agnes_example,\n diana_example) |>\n mutate(cluster = str_c(\"Cluster \", k)) |>\n select(model, cluster, assigned_respondents) |>\n ggplot() +\n geom_col(position = \"dodge\", aes(y = assigned_respondents, x = cluster, fill = model)) +\n geom_col(data = labelled_respondents |>\n group_by(cluster = str_c(\"Cluster \", cluster)) |>\n summarize(assigned_respondents = n(),\n model = \"actual\"), aes(y = assigned_respondents, x = cluster), fill = \"white\", color = \"black\", alpha = 0, linetype = \"dashed\") +\n theme_bw() +\n labs(x = NULL, y = \"Number of assigned respondents\", fill = \"Model\",\n title = \"Number of assigned respondents by cluster\",\n subtitle = \"Dashed box indicates true number of respondents by cluster\")\n```\n\n::: {.cell-output-display}\n![](index_files/figure-html/unnamed-chunk-21-1.png){width=672}\n:::\n:::\n\n\nLet me end this post with a few words of caution: first, the ultimate outcome heavily depends on the seed chosen at the beginning of the post. The results might be quite different for other draws of respondents or initial conditions for clustering algorithms. Second, there are many more models out there that can be applied to the current setting. However, with this post I want to emphasize that it is important to consider different models at the same time and to compare them through a consistent set of measures. Ultimately, choosing the optimal number of clusters in practice requires a judgment call, but at least it can be informed as much as possible.", + "markdown": "---\ntitle: \"Clustering Binary Data\"\ndescription: \"An application of different clustering approaches to simulated survey responses\"\nauthor: \"Christoph Scheuch\"\ndate: \"2023-11-25\" \nimage: thumbnail.png\n---\n\n\nIn this post I tackle the challenge to extract a small number of typical respondent profiles from a large scale survey with multiple yes-no questions. This type of setting corresponds to a classification problem without knowing the true labels of the observations – also known as unsupervised learning. Since I regularly face tasks in this area, I decided to start an irregular series of blogs that touch upon practical aspects of unsupervised learning in R using tidy principles.\n\nTechnically speaking, I have a set of $N$ observations $(x_1, x_2, ... , x_N)$ of a random $p$-vector $X$ with joint density $\\text{Pr}(X)$. The goal of classification is to directly infer the properties of this probability density without the help of the correct answers (or degree-of-error) for each observation. In this note I focus on cluster analysis that attempts to find convex regions of the $X$-space that contain modes of $\\text{Pr}(X)$. This approach aims to tell whether $\\text{Pr}(X)$ can be represented by a mixture of simpler densities representing distinct classes of observations.\n\nIntuitively, I want to find clusters of the survey responses such that respondents within each cluster are more closely related to one another than respondents assigned to different clusters. There are many possible ways to achieve that, but I focus on the most popular and most approachable ones: $K$-means, $K$-modes, as well as agglomerative and divisive hierarchical clustering. AS we see below, the 4 models yield quite different results for clustering binary data.\n\nI use the following packages throughout this post. In particular, I use `klaR` and `cluster` for clustering algorithms that go beyond the `stats` package that is included with your R installation.[^1] I explicitely refer to the corresponding packages when I call them below. \n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(klaR)\nlibrary(cluster)\nlibrary(dplyr)\nlibrary(tidyr)\nlibrary(purrr)\nlibrary(ggplot2)\nlibrary(scales)\n```\n:::\n\n\n## Creating sample data\n\nLet us start by creating some sample data where we basically exactly know which kind of answer profiles are out there. Later, we evaluate the cluster models according to how well they are doing in uncovering the clusters and assigning respondents to clusters. I assume that there are 4 yes/no questions labeled q1, q2, q3 and q4. In addition, there are 3 different answer profiles where cluster 1 answers positively to the first question only, cluster 2 answers positively to question 2 and 3 and cluster 3 answers all questions positively. I also define the the number of respondents for each cluster.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncenters <- tibble(\n cluster = factor(1:3), \n respondents = c(250, 500, 200),\n q1 = c(1, 0, 1),\n q2 = c(0, 1, 1), \n q3 = c(0, 1, 1),\n q4 = c(0, 0, 1)\n)\n```\n:::\n\n\nAlternatively, we could think of the yes/no questions as medical records that indicate whether the subject has a certain pre-condition or not.\n\nSince it should be a bit tricky for the clustering models to find the actual response profiles, let us add some noise in the form of respondents that deviate from their assigned cluster profile and shuffle all rows. We find out below how the cluster algorithms are able to deal with this noise.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nset.seed(42)\nlabelled_respondents <- centers |> \n mutate(\n across(\n starts_with(\"q\"),\n ~map2(respondents, .x, function(x, y) {\n rbinom(x, 1, max((y - 0.1), 0.1))\n }),\n .names = \"{col}\"\n )\n ) |> \n select(-respondents) |> \n unnest(cols = c(q1, q2, q3, q4)) |> \n sample_n(n())\n```\n:::\n\nThe figure below visualizes the distribution of simulated question responses by cluster.\n\n::: {.cell}\n\n```{.r .cell-code}\nlabelled_respondents |>\n pivot_longer(cols = -cluster, names_to = \"question\", values_to = \"response\") |>\n mutate(response = response == 1) |>\n ggplot(aes(x = response, y = question, color = cluster)) +\n geom_jitter() +\n theme_bw() +\n labs(x = \"Response\", y = \"Question\", color = \"Cluster\",\n title = \"Visualization of simulated question responses by cluster\")\n```\n\n::: {.cell-output-display}\n![](index_files/figure-html/unnamed-chunk-4-1.png){width=672}\n:::\n:::\n\n\n## $K$-means clustering\n\nThe $K$-means algorithm is one of the most popular clustering methods (see also this tidymodels example). It is intended for situations in which all variables are of the quantitative type since it partitions all respondents into $k$ groups such that the sum of squares from respondents to the assigned cluster centers are minimized. For binary data, the Euclidean distance reduces to counting the number of variables on which two cases disagree.\n\nThis leads to a problem (which is also described here) because of an arbitrary cluster assignment after cluster initialization. The first chosen clusters are still binary data and hence observations have integer distances from each of the centers. The corresponding ties are hard to overcome in any meaningful way. Afterwards, the algorithm computes means in clusters and revisits assignments. Nonetheless, $K$-means might produce informative results in a fast and easy to interpret way. I hence include it in this post for comparison.\n\nTo run the $K$-means algorithm, we first drop the cluster column.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nrespondents <- labelled_respondents |>\n select(-cluster)\n```\n:::\n\n\nIt is very straight-forward to run the built-in stats::kmeans clustering algorithm. I choose the parameter of maximum iterations to be 100 to increase the likeliness of getting the best fitting clusters. Since the data is fairly small and the algorithm is also quite fast, I see no harm in using a high number of iterations.\n\n\n::: {.cell}\n\n```{.r .cell-code}\niter_max <- 100\nkmeans_example <- stats::kmeans(respondents, centers = 3, iter.max = iter_max)\n```\n:::\n\n\nThe output of the algorithm is a list with different types of information including the assigned clusters for each respondent.\n\nAs we want to compare cluster assignment across different models and we repeatedly assign different clusters to respondents, I write up a helper function that adds assignments to the respondent data from above. The function shows that $K$-means and $K$-modes contain a field with cluster information. The two hierarchical cluster models, however, need to be cut a the desired number of clusters (more on that later).\n\n\n::: {.cell}\n\n```{.r .cell-code}\nassign_clusters <- function(model, k = NULL) {\n if (class(model)[1] %in% c(\"kmeans\", \"kmodes\")) {\n cluster_assignment <- model$cluster\n }\n if (class(model)[1] %in% c(\"agnes\", \"diana\")) {\n if (is.null(k)) {\n stop(\"k required for hierarchical models!\")\n }\n cluster_assignment <- stats::cutree(model, k = k)\n }\n \n clusters <- respondents |>\n mutate(cluster = cluster_assignment)\n \n return(clusters)\n}\n```\n:::\n\n\nIn addition, I introduce a helper function that summarizes information by cluster. In particular, the function computes average survey responses (which correspond to proportion of yes answers in the current setting) and sorts the clusters according to the total number of positive answers. The latter helps us later to compare clusters across different models.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsummarize_clusters <- function(model, k = NULL) {\n # Assign respondents to clusters\n clusters <- assign_clusters(model = model, k = k)\n \n # Compute summary statistics by cluster\n summary_statistics <- clusters |>\n group_by(cluster) |>\n summarize(across(matches(\"q\"), \\(x) mean(x, na.rm = TRUE)),\n assigned_respondents = n()) |>\n select(-cluster) |>\n # Rank clusters by total share of positive answers\n mutate(total = rowSums(across(matches(\"q\")))) |>\n arrange(-total) |>\n mutate(k = row_number(),\n model = class(model)[1])\n \n return(summary_statistics)\n}\n```\n:::\n\n\nWe could easily introduce other summary statistics into the function, but the current specification is sufficient for the purpose of this note.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nkmeans_example <- summarize_clusters(kmeans_example)\n```\n:::\n\n\nSince we do not know the true number of clusters in real-world settings, we want to compare the performance of clustering models for different numbers of clusters. Since we know that the true number of clusters is 3 in the current setting, let us stick to a maximum of 7 clusters. In practice, you might of course choose an arbitrary maximum number of clusters.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nk_min <- 1\nk_max <- 7\n\nkmeans_results <- tibble(k = k_min:k_max) |>\n mutate(\n kclust = map(k, ~kmeans(respondents, centers = .x, iter.max = iter_max)),\n )\n```\n:::\n\n\nA common heuristic to determine the optimal number of clusters is the elbow method where we plot the within-cluster sum of squared errors of an algorithm for increasing number of clusters. The optimal number of clusters corresponds to the point where adding another cluster does lead to much of an improvement anymore. In economic terms, we look for the point where the diminishing returns to an additional cluster are not worth the additional cost (assuming that we want the minimum number of clusters with optimal predictive power).\n\nThe function below computes the within-cluster sum of squares for any cluster assignments.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncompute_withinss <- function(model, k = NULL) {\n # Assign respondents to clusters\n clusters <- assign_clusters(model = model, k = k)\n \n # Compute averages per cluster center\n centers <- clusters |>\n group_by(cluster) |>\n summarize_all(mean) |>\n pivot_longer(cols = -cluster, names_to = \"question\", values_to = \"cluster_mean\")\n \n # Compute sum of squared differences from cluster centers\n withinss <- clusters |>\n pivot_longer(cols = -cluster, names_to = \"question\", values_to = \"response\") |>\n left_join(centers, by = c(\"cluster\", \"question\")) |>\n summarize(k = max(cluster),\n withinss = sum((response - cluster_mean)^2)) |>\n mutate(model = class(model)[1])\n \n return(withinss)\n}\n```\n:::\n\n\nWe can simply map the function across our list of $K$-means models. For better comparability, we normalize the within-cluster sum of squares for any number of cluster by the benchmark case of only having a single cluster. Moreover, we consider log-differences to because we care more about the percentage decrease in sum of squares rather than the absolute number. \n\n::: {.cell}\n\n```{.r .cell-code}\nkmeans_logwithindiss <- kmeans_results$kclust |>\n map(compute_withinss) |>\n reduce(bind_rows) |>\n mutate(logwithindiss = log(withinss) - log(withinss[k == 1]))\n```\n:::\n\n\n## $K$-modes clustering\n\nSince $K$-means is actually not ideal for binary (or hierarchical data in general), Huang (1997) came up with the $K$-modes algorithm. This clustering approach aims to partition respondents into $K$ groups such that the distance from respondents to the assigned cluster modes is minimized. A mode is a vector of elements that minimize the dissimilarities between the vector and each object of the data. Rather than using the Euclidean distance, $K$-modes uses simple matching distance between respondents to quantify dissimilarity which translates into counting the number of mismatches in all question responses in the current setting.\n\nFortunately, the klaR package provides an implementation of the $K$-modes algorithm that we can apply just like the $K$-means above.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nkmodes_example <- klaR::kmodes(respondents, iter.max = iter_max, modes = 3) |>\n summarize_clusters()\n```\n:::\n\n\nSimilarly, we just map the model across different numbers of target cluster modes and compute the within-cluster sum of squares.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nkmodes_results <- tibble(k = k_min:k_max) |>\n mutate(\n kclust = map(k, ~klaR::kmodes(respondents, modes = ., iter.max = iter_max))\n )\n\nkmodes_logwithindiss <- kmodes_results$kclust |>\n map(compute_withinss) |>\n reduce(bind_rows) |>\n mutate(logwithindiss = log(withinss) - log(withinss[k == 1]))\n```\n:::\n\n\nNote that I computed the within-cluster sum of squared errors rather than using the within-cluster simple-matching distance provided by the function itself. The latter counts the number of differences from assigned respondents to their cluster modes.\n\n## Hierarchical clustering\n\nAs an alternative to computing optimal assignments for a given number of clusters, we might sometimes prefer to arrange the clusters into a natural hierarchy. This involves successively grouping the clusters themselves such that at each level of the hierarchy, clusters within the same group are more similar to each other than those in different groups. There are two fundamentally different approaches to hierarchical clustering that are fortunately implemented in the great cluster package.\n\nBoth hierarchical clustering approaches require a dissimilarity or distance matrix. Since we have binary data, we choose the asymmetric binary distance matrix based on the Jaccard distance. Intuitively, the Jaccard distance measures how far the overlap of responses between two groups is from perfect overlap.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndissimilarity_matrix <- stats::dist(respondents, method = \"binary\")\n```\n:::\n\n\nAgglomerative clustering start at the bottom and at each level recursively merge a selected pair of clusters into a single cluster. This produces a clustering at the next higher level with one less cluster. The pair chosen for merging consist of the two clusters with the smallest within-cluster dissimilarity. On an intuitive level, agglomerative clustering is hence better in discovering small clusters.\n\nThe cluster package provides the agnes algorithm (AGglomerative NESting) that can easily applied to the dissimilarity matrix.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nagnes_results <- cluster::agnes(dissimilarity_matrix, diss = TRUE, keep.diss = TRUE, method = \"complete\")\n```\n:::\n\n\nThe function returns a clustering tree that we could plot (which I actually rarely found really helpful) or cut into different partitions using the stats::cutree function. This is why the helper functions from above need a number of target clusters as an input for hierarchical clustering models. However, the logic of the summary statistics are just as above.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nagnes_example <- summarize_clusters(agnes_results, k = 3)\n\nagnes_logwithindiss <- k_min:k_max |>\n map(~compute_withinss(agnes_results, .)) |>\n reduce(bind_rows) |>\n mutate(logwithindiss = log(withinss) - log(withinss[k == 1]))\n```\n:::\n\n\nDivisive methods start at the top and at each level recursively split one of the existing clusters at that level into two new clusters. The split is chosen such that two new groups with the largest between-group dissimilarity emerge. Intuitively speaking, divisive clustering is thus better in discovering large clusters.\n\nThe cluster package provides the diana algorithm (DIvise ANAlysis) for this clustering approach where the logic is basically the same as for the agnes model.\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndiana_results <- cluster::diana(dissimilarity_matrix, diss = TRUE, keep.diss = TRUE) \ndiana_example <- diana_results |>\nsummarize_clusters(k = 3)\ndiana_logwithindiss <- k_min:k_max |>\n map(~compute_withinss(diana_results, .)) |>\n reduce(bind_rows) |>\n mutate(logwithindiss = log(withinss) - log(withinss[k == 1]))\n```\n:::\n\n\n## Model comparison\n\nLet us start the model comparison by looking at the within cluster sum of squares for different numbers of clusters. The figure shows that the \\(K\\)-modes algorithm improves the fastest towards the true number of 3 clusters. The elbow method would suggest in this case to stick with 3 clusters for this algorithm.\n\nFor the other models, the picture is less clear: the curve of \\(K\\)-means would rather suggest having 2 or 4 clusters. But then again this might be the result of initial conditions as the \\(K\\)-means algorithm assigns respondents to clusters in a rather arbitrary way at cluster initialization. The divise model would also suggest 3 clusters as there is no improvement to using 4 clusters, but the overall sum of squares does not improve much suggesting a lot of mis-classification in the first 2 clusters. The agglomerative model exhibits the worst performance since it shows no clear suggestion for an optimal cluster and the first 2 clusters seem to be only marginally better than having a single cluster.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbind_rows(kmeans_logwithindiss, kmodes_logwithindiss,\n agnes_logwithindiss, diana_logwithindiss) |>\n ggplot(aes(x = k, y = logwithindiss, color = model, linetype = model)) +\n geom_line() +\n scale_x_continuous(breaks = k_min:k_max) + \n theme_minimal() +\n labs(x = \"Number of Clusters\", y = bquote(log(W[k])-log(W[1])), color = \"Model\", linetype = \"Model\",\n title = \"Within cluster sum of squares relative to benchmark case of one cluster\")\n```\n\n::: {.cell-output-display}\n![](index_files/figure-html/unnamed-chunk-19-1.png){width=672}\n:::\n:::\n\n\nNow, let us compare the proportion of positive responses within assigned clusters across models. Recall that I ranked clusters according to the total share of positive answers to ensure comparability. This approach is only possible in this type of setting where we can easily introduce such a ranking. The figure shows that all models are doing well in discovering the cluster with a single response. The cluster with 2 positive responses seems to be discovered by diana, \\(K\\)-means and \\(K\\)-modes, while agnes is clearly off. The cluster with only positive responses is again discovered by \\(K\\)-means and \\(K\\)-modes whereas the hierarchical models perform rather poorly. Overall, this picture again suggests that \\(K\\)-modes performs best for the current setting.\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbind_rows(\n kmeans_example,\n kmodes_example,\n agnes_example,\n diana_example) |>\n select(-c(total, assigned_respondents)) |>\n pivot_longer(cols = -c(k, model), names_to = \"question\", values_to = \"response\") |>\n mutate(cluster = paste0(\"Cluster \", k)) |>\n ggplot(aes(x = response, y = question, fill = model)) +\n geom_col(position = \"dodge\") +\n facet_wrap(~cluster) +\n theme_bw() +\n scale_x_continuous(labels = scales::percent) + \n geom_hline(yintercept = seq(1.5, length(unique(colnames(respondents)))-0.5, 1),\n colour = 'black') +\n labs(x = \"Proportion of responses\", y = \"Question\", fill = \"Model\",\n title = \"Proportion of positive responses within assigned clusters\")\n```\n\n::: {.cell-output-display}\n![](index_files/figure-html/unnamed-chunk-20-1.png){width=672}\n:::\n:::\n\n\nFinally, let us check how well each model assigns respondents to the true cluster which is obviously not possible in real unsupervised applications. The figure below shows the true number of respondents by cluster as a dashed box and the assigned respondents as bars. Again, agnes and diana do a pretty bad job for the current setting, while \\(K\\)-means and \\(K\\)-modes show quite simiar assignments that are not too far off from the true ones.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nbind_rows(\n kmeans_example,\n kmodes_example,\n agnes_example,\n diana_example) |>\n mutate(cluster = paste0(\"Cluster \", k)) |>\n select(model, cluster, assigned_respondents) |>\n ggplot() +\n geom_col(position = \"dodge\", aes(y = assigned_respondents, x = cluster, fill = model)) +\n geom_col(data = labelled_respondents |>\n group_by(cluster = paste0(\"Cluster \", cluster)) |>\n summarize(assigned_respondents = n(),\n model = \"actual\"), aes(y = assigned_respondents, x = cluster), fill = \"white\", color = \"black\", alpha = 0, linetype = \"dashed\") +\n theme_bw() +\n labs(x = NULL, y = \"Number of assigned respondents\", fill = \"Model\",\n title = \"Number of assigned respondents by cluster\",\n subtitle = \"Dashed box indicates true number of respondents by cluster\")\n```\n\n::: {.cell-output-display}\n![](index_files/figure-html/unnamed-chunk-21-1.png){width=672}\n:::\n:::\n\n\nLet me end this post with a few words of caution: first, the ultimate outcome heavily depends on the seed chosen at the beginning of the post. The results might be quite different for other draws of respondents or initial conditions for clustering algorithms. Second, there are many more models out there that can be applied to the current setting. However, with this post I want to emphasize that it is important to consider different models at the same time and to compare them through a consistent set of measures. Ultimately, choosing the optimal number of clusters in practice requires a judgment call, but at least it can be informed as much as possible.\n\n[^1]: As of writing, the `tidyclust` package only has limited support for hierarchical clustering, so I decided to abstain from using it for this post. ", "supporting": [ "index_files" ], diff --git a/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-19-1.png b/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-19-1.png index 75f3c4c..253dd96 100644 Binary files a/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-19-1.png and b/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-19-1.png differ diff --git a/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-20-1.png b/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-20-1.png index 9b12749..19be9bd 100644 Binary files a/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-20-1.png and b/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-20-1.png differ diff --git a/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-21-1.png b/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-21-1.png index a6aa5a3..996811b 100644 Binary files a/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-21-1.png and b/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-21-1.png differ diff --git a/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-4-1.png b/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-4-1.png index 7eb8750..bd9e8d4 100644 Binary files a/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-4-1.png and b/_freeze/posts/clustering-binary-data/index/figure-html/unnamed-chunk-4-1.png differ diff --git a/_quarto.yml b/_quarto.yml index c5e0369..72b04b7 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -11,6 +11,7 @@ website: title: "Tidy Intelligence" description: "Let your data spark joy!" site-url: https://www.tidy-intelligence.com + favicon: images/favicon.png search: false page-navigation: false navbar: @@ -33,3 +34,4 @@ format: toc: false smooth-scroll: true from: markdown+emoji + fig-align: "center" diff --git a/blog.Rproj b/blog.Rproj new file mode 100644 index 0000000..8e3c2eb --- /dev/null +++ b/blog.Rproj @@ -0,0 +1,13 @@ +Version: 1.0 + +RestoreWorkspace: Default +SaveWorkspace: Default +AlwaysSaveHistory: Default + +EnableCodeIndexing: Yes +UseSpacesForTab: Yes +NumSpacesForTab: 2 +Encoding: UTF-8 + +RnwWeave: Sweave +LaTeX: pdfLaTeX diff --git a/docs/index.html b/docs/index.html index d9e3c94..cbf340c 100644 --- a/docs/index.html +++ b/docs/index.html @@ -25,6 +25,7 @@ + @@ -137,7 +138,7 @@
diff --git a/docs/posts/clustering-binary-data/index.html b/docs/posts/clustering-binary-data/index.html index e3617b0..b752ab4 100644 --- a/docs/posts/clustering-binary-data/index.html +++ b/docs/posts/clustering-binary-data/index.html @@ -61,6 +61,7 @@ + @@ -172,9 +173,15 @@
In this post I tackle the challenge to extract a small number of typical respondent profiles from a large scale survey with multiple yes-no questions. This type of setting corresponds to a classification problem without knowing the true labels of the observations – also known as unsupervised learning. Since I regularly face tasks in this area, I decided to start an irregular series of blogs that touch upon practical aspects of unsupervised learning in R using tidy principles.
Technically speaking, I have a set of \(N\) observations \((x_1, x_2, ... , x_N)\) of a random \(p\)-vector \(X\) with joint density \(\text{Pr}(X)\). The goal of classification is to directly infer the properties of this probability density without the help of the correct answers (or degree-of-error) for each observation. In this note I focus on cluster analysis that attempts to find convex regions of the \(X\)-space that contain modes of \(\text{Pr}(X)\). This approach aims to tell whether \(\text{Pr}(X)\) can be represented by a mixture of simpler densities representing distinct classes of observations.
Intuitively, I want to find clusters of the survey responses such that respondents within each cluster are more closely related to one another than respondents assigned to different clusters. There are many possible ways to achieve that, but I focus on the most popular and most approachable ones: \(K\)-means, \(K\)-modes, as well as agglomerative and divisive hierarchical clustering. AS we see below, the 4 models yield quite different results for clustering binary data.
-I mainly use the tidyverse family of packages throughout this post.
+I use the following packages throughout this post. In particular, I use klaR
and cluster
for clustering algorithms that go beyond the stats
package that is included with your R installation.1 I explicitely refer to the corresponding packages when I call them below.
library(tidyverse)
library(klaR)
+library(cluster)
+library(dplyr)
+library(tidyr)
+library(purrr)
+library(ggplot2)
+library(scales)
Alternatively, we could think of the yes/no questions as medical records that indicate whether the subject has a certain pre-condition or not.
-Since it should be a bit tricky for the clustering models to find the actual response profiles, let us add some noise in the form of respondents that deviate from their assigned cluster profile. We find out below how the cluster algorithms are able to deal with this noise.
+Since it should be a bit tricky for the clustering models to find the actual response profiles, let us add some noise in the form of respondents that deviate from their assigned cluster profile and shuffle all rows. We find out below how the cluster algorithms are able to deal with this noise.
set.seed(123)
+set.seed(42)
<- centers |>
labelled_respondents mutate(
across(
@@ -204,7 +211,8 @@ Creating sample data<
)|>
) select(-respondents) |>
- unnest(cols = c(q1, q2, q3, q4))
+unnest(cols = c(q1, q2, q3, q4)) |>
+ sample_n(n())
The figure below visualizes the distribution of simulated question responses by cluster.