Skip to content

Commit

Permalink
[UP+ BF]: fixed bibliography entries +
Browse files Browse the repository at this point in the history
  • Loading branch information
alexpron committed Jul 24, 2024
1 parent 17ce5f0 commit f160348
Show file tree
Hide file tree
Showing 4 changed files with 7 additions and 78 deletions.
18 changes: 0 additions & 18 deletions content/publication/rosenblatt-data-2024-1/cite.bib

This file was deleted.

33 changes: 0 additions & 33 deletions content/publication/rosenblatt-data-2024-1/index.md

This file was deleted.

4 changes: 1 addition & 3 deletions content/publication/soskic-garden-2024/index.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
---
title: Garden of forking paths in textlessspan style=\"font-variant:small-caps;\"textgreaterERPtextless/spantextgreater
research – Effects of varying pre‐processing and analysis steps in an textlessspan
style=\"font-variant:small-caps;\"textgreaterN400textless/spantextgreater experiment
title: Garden of forking paths in ERP research – Effects of varying pre-processing and analysis steps in an N400 experiment
authors:
- Anđela Šoškić
- Suzy J. Styles
Expand Down
30 changes: 6 additions & 24 deletions publications.bib
Original file line number Diff line number Diff line change
Expand Up @@ -77,14 +77,14 @@ @article{chekroud_illusory_2024
url = {https://www.science.org/doi/10.1126/science.adg8538},
doi = {10.1126/science.adg8538},
abstract = {It is widely hoped that statistical models can improve decision-making related to medical treatments. Because of the cost and scarcity of medical outcomes data, this hope is typically based on investigators observing a model’s success in one or two datasets or clinical contexts. We scrutinized this optimism by examining how well a machine learning model performed across several independent clinical trials of antipsychotic medication for schizophrenia. Models predicted patient outcomes with high accuracy within the trial in which the model was developed but performed no better than chance when applied out-of-sample. Pooling data across trials to predict outcomes in the trial left out did not improve predictions. These results suggest that models predicting treatment outcomes in schizophrenia are highly context-dependent and may have limited generalizability.
,
,
Editor’s summary
A central promise of artificial intelligence (AI) in healthcare is that large datasets can be mined to predict and identify the best course of care for future patients. Unfortunately, we do not know how these models would perform on new patients because they are rarely tested prospectively on truly independent patient samples. Chekroud
et al
. showed that machine learning models routinely achieve perfect performance in one dataset even when that dataset is a large international multisite clinical trial (see the Perspective by Petzschner). However, when that exact model was tested in truly independent clinical trials, performance fell to chance levels. Even when building what should be a more robust model by aggregating across a group of similar multisite trials, subsequent predictive performance remained poor. —Peter Stern
,
,
Clinical prediction models that work in one trial do not work in future trials of the same condition and same treatments.},
language = {en},
number = {6679},
Expand Down Expand Up @@ -514,24 +514,6 @@ @article{giehl_sharing_2024
file = {Giehl et al. - 2024 - Sharing brain imaging data in the Open Science era.pdf:/home/alpron/Zotero/storage/KLC63VH2/Giehl et al. - 2024 - Sharing brain imaging data in the Open Science era.pdf:application/pdf},
}
@article{rosenblatt_data_2024-1,
title = {Data leakage inflates prediction performance in connectome-based machine learning models},
volume = {15},
issn = {2041-1723},
url = {https://www.nature.com/articles/s41467-024-46150-w},
doi = {10.1038/s41467-024-46150-w},
abstract = {Abstract
Predictive modeling is a central technique in neuroimaging to identify brain-behavior relationships and test their generalizability to unseen data. However, data leakage undermines the validity of predictive models by breaching the separation between training and test data. Leakage is always an incorrect practice but still pervasive in machine learning. Understanding its effects on neuroimaging predictive models can inform how leakage affects existing literature. Here, we investigate the effects of five forms of leakage–involving feature selection, covariate correction, and dependence between subjects–on functional and structural connectome-based machine learning models across four datasets and three phenotypes. Leakage via feature selection and repeated subjects drastically inflates prediction performance, whereas other forms of leakage have minor effects. Furthermore, small datasets exacerbate the effects of leakage. Overall, our results illustrate the variable effects of leakage and underscore the importance of avoiding data leakage to improve the validity and reproducibility of predictive modeling.},
language = {en},
number = {1},
urldate = {2024-07-15},
journal = {Nature Communications},
author = {Rosenblatt, Matthew and Tejavibulya, Link and Jiang, Rongtao and Noble, Stephanie and Scheinost, Dustin},
month = feb,
year = {2024},
pages = {1829},
file = {Rosenblatt et al. - 2024 - Data leakage inflates prediction performance in co.pdf:/home/alpron/Zotero/storage/P3NIEWBK/Rosenblatt et al. - 2024 - Data leakage inflates prediction performance in co.pdf:application/pdf},
}
@article{jadavji_editorial_2023,
title = {Editorial: {Reproducibility} in neuroscience},
Expand Down Expand Up @@ -575,7 +557,7 @@ @misc{demidenko_impact_2024
url = {http://biorxiv.org/lookup/doi/10.1101/2024.03.19.585755},
doi = {10.1101/2024.03.19.585755},
abstract = {Abstract
Empirical studies reporting low test-retest reliability of individual blood oxygen-level dependent (BOLD) signal estimates in functional magnetic resonance imaging (fMRI) data have resurrected interest among cognitive neuroscientists in methods that may improve reliability in fMRI. Over the last decade, several individual studies have reported that modeling decisions, such as smoothing, motion correction and contrast selection, may improve estimates of test-retest reliability of BOLD signal estimates. However, it remains an empirical question whether certain analytic decisions
consistently
improve individual and group level reliability estimates in an fMRI task across multiple large, independent samples. This study used three independent samples (
Expand All @@ -599,7 +581,7 @@ @misc{demidenko_impact_2024
}
@article{soskic_garden_nodate,
title = {Garden of forking paths in {ERP} research – {Effects} of varying pre-processing and analysis steps in an {N400} experiment},
title = {Garden of forking paths in ERP research: Effects of varying pre-processing and analysis steps in an N400 experiment},
volume = {n/a},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/psyp.14628},
doi = {https://doi.org/10.1111/psyp.14628},
Expand Down

0 comments on commit f160348

Please sign in to comment.