From b55f1631ad3749151391899e5507dbb243696757 Mon Sep 17 00:00:00 2001 From: Dung Truong Date: Thu, 25 Jul 2024 10:29:26 -0400 Subject: [PATCH] merge master --- code/plugins/reformat_plugin.py | 62 +--- code/plugins/update_plugins.py | 31 +- code/plugins/update_plugins.sh | 12 + plugins/ARfitStudio/index.md | 1 + plugins/EEG-BIDS/index.md | 1 + plugins/ICLabel/index.md | 1 + plugins/NFT/index.md | 120 +------ plugins/NIMA/index.md | 1 + plugins/PACT/index.md | 286 ----------------- plugins/PACTools/index.md | 1 + plugins/PowPowCAT/index.md | 1 + plugins/SIFT/README.md | 78 +++++ plugins/SIFT/index.md | 51 ++- plugins/amica/index.md | 1 + plugins/clean_rawdata/README.md | 102 ++++++ plugins/clean_rawdata/index.md | 497 ++++++----------------------- plugins/dipfit/index.md | 1 + plugins/eegstats/index.md | 1 + plugins/fMRIb/index.md | 1 + plugins/firfilt/index.md | 1 + plugins/get_chanlocs/README.md | 11 + plugins/get_chanlocs/index.md | 239 +------------- plugins/groupSIFT/index.md | 1 + plugins/imat/index.md | 1 + plugins/nsgportal/README.md | 27 ++ plugins/nsgportal/index.md | 25 +- plugins/nwbio/index.md | 1 + plugins/relica/index.md | 1 + plugins/roiconnect/index.md | 1 + plugins/std_dipoleDensity/index.md | 1 + plugins/trimOutlier/index.md | 1 + plugins/viewprops/index.md | 1 + plugins/zapline-plus/index.md | 1 + 33 files changed, 460 insertions(+), 1101 deletions(-) delete mode 100644 plugins/PACT/index.md create mode 100644 plugins/SIFT/README.md create mode 100644 plugins/clean_rawdata/README.md create mode 100644 plugins/get_chanlocs/README.md create mode 100644 plugins/nsgportal/README.md diff --git a/code/plugins/reformat_plugin.py b/code/plugins/reformat_plugin.py index 8cf109fd..ddeb5af0 100644 --- a/code/plugins/reformat_plugin.py +++ b/code/plugins/reformat_plugin.py @@ -3,26 +3,7 @@ import shutil # open a text file ending with .md and append a paragraph to it -def reformat_plugin(dirpath, plugin_name): - plugins_dir = '../../plugins' - index_file = os.path.join(plugins_dir, 'index.md') - shutil.copyfile(os.path.join(dirpath, 'README.md'), index_file) - with open(index_file) as f: - text = f.read() - append_text = '''--- -layout: default -title: {plugin_name} -long_title: {plugin_name} -parent: Plugins ---- -'''.format(plugin_name=plugin_name) - text = append_text + text - with open(index_file, 'w') as out: - out.write(text) - -# open a text file ending with .md and append a paragraph to it -# Usage: python test.py .md -def append_to_file(filepath, filename, parent, output_file): +def reformat_wiki_pages(filepath, filename, parent, output_file): with open(filepath) as f: text = f.read() append_text = '''--- @@ -42,53 +23,36 @@ def reformat_plugin_dir(plugin_input_dir, plugin_name, order, plugin_type='wiki' plugin_output_dir = os.path.join('../../plugins', plugin_name) if not os.path.exists(plugin_output_dir): os.makedirs(plugin_output_dir) + # copy image directory from input to output dir if os.path.exists(os.path.join(plugin_input_dir, 'images')): shutil.copytree(os.path.join(plugin_input_dir, 'images'), os.path.join(plugin_output_dir, 'images'), dirs_exist_ok=True) index_file = os.path.join(plugin_output_dir, 'index.md') - if plugin_type == 'wiki': - shutil.copyfile(os.path.join(plugin_input_dir, 'Home.md'), index_file) - with open(index_file) as f: - text = f.read() - append_text = '''--- + shutil.copyfile(os.path.join(plugin_input_dir, 'README.md'), index_file) + with open(index_file) as f: + text = f.read() + append_text = '''--- layout: default title: {plugin_name} long_title: {plugin_name} parent: Plugins -categories: plugins has_children: true nav_order: {order} --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/{plugin_name}). '''.format(plugin_name=plugin_name, order=order) - text = append_text + text - with open(index_file, 'w') as out: - out.write(text) + text = append_text + text + with open(index_file, 'w') as out: + out.write(text) - for root, dirs, files in os.walk(plugin_input_dir): + if plugin_type == 'wiki': + wiki_plugin_input_dir = plugin_input_dir + '.wiki' + for root, dirs, files in os.walk(wiki_plugin_input_dir): for file in files: if file.endswith('.md') and not file.startswith('index') and not file.startswith('Home'): - append_to_file(os.path.join(plugin_input_dir, file), file.strip('.md'), plugin_name, os.path.join(plugin_output_dir, file)) - else: - shutil.copyfile(os.path.join(plugin_input_dir, 'README.md'), index_file) - with open(index_file) as f: - text = f.read() - append_text = '''--- -layout: default -title: {plugin_name} -long_title: {plugin_name} -parent: Plugins -nav_order: {order} ---- -To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/{plugin_name}). - -'''.format(plugin_name=plugin_name, order=order) - text = append_text + text - with open(index_file, 'w') as out: - out.write(text) - + reformat_wiki_pages(os.path.join(wiki_plugin_input_dir, file), file.strip('.md'), plugin_name, os.path.join(plugin_output_dir, file)) # main def main(): if len(sys.argv) != 5: diff --git a/code/plugins/update_plugins.py b/code/plugins/update_plugins.py index 5d166afb..ff179c17 100644 --- a/code/plugins/update_plugins.py +++ b/code/plugins/update_plugins.py @@ -17,10 +17,15 @@ def update_repo(repo, order, plugin_type='readme'): os.chdir(repo_path) run_command('git pull') else: - if plugin_type == "wiki": - run_command(f'git clone https://github.com/sccn/{repo}.wiki.git {repo_path}') + run_command(f'git clone https://github.com/sccn/{repo}.git {repo_path}') + + if plugin_type == "wiki": + wiki_repo_path = f"{repo_path}.wiki" + if os.path.exists(wiki_repo_path): + os.chdir(wiki_repo_path) + run_command('git pull') else: - run_command(f'git clone https://github.com/sccn/{repo}.git {repo_path}') + run_command(f'git clone https://github.com/sccn/{repo}.wiki.git {wiki_repo_path}') os.chdir(current_dir) script = 'reformat_plugin.py' @@ -31,20 +36,26 @@ def update_repo(repo, order, plugin_type='readme'): # if 'github' not in current directory, create it if not os.path.exists('github'): os.makedirs('github') - if len(sys.argv) == 0: + wiki_plugins = ['SIFT', 'get_chanlocs', 'NFT', 'PACT', 'nsgportal', 'clean_rawdata'] + readme_plugins = ['ARfitStudio', 'roiconnect', 'EEG-BIDS', 'trimOutlier', 'groupSIFT', 'nwbio', 'ICLabel', 'dipfit', 'eegstats', 'PowPowCAT', 'PACTools', 'zapline-plus', 'amica', 'fMRIb', 'relica', 'std_dipoleDensity', 'imat', 'viewprops', 'cleanline','NIMA', 'firfilt'] + if len(sys.argv) == 1: order = 1 - wiki_plugins = ['SIFT', 'get_chanlocs', 'NFT', 'PACT', 'nsgportal', 'clean_rawdata'] for plugin in wiki_plugins: update_repo(plugin, order, 'wiki') order += 1 - readme_plugins = ['ARfitStudio', 'roiconnect', 'EEG-BIDS', 'trimOutlier', 'groupSIFT', 'nwbio', 'ICLabel', 'dipfit', 'eegstats', 'PowPowCAT', 'PACTools', 'zapline-plus', 'amica', 'fMRIb', 'relica', 'std_dipoleDensity', 'imat', 'viewprops', 'cleanline','NIMA', 'firfilt'] for plugin in readme_plugins: update_repo(plugin, order, "readme") order += 1 - elif len(sys.argv) == 3: + elif len(sys.argv) == 2: plugin_name = sys.argv[1] - plugin_type = sys.argv[2] - update_repo(plugin_name, 1, plugin_type) + if plugin_name not in wiki_plugins and plugin_name not in readme_plugins: + print(f"Plugin {plugin_name} not found.") + sys.exit(1) + + plugin_type = 'wiki' if plugin_name in wiki_plugins else 'readme' + plugin_order = wiki_plugins.index(plugin_name) + 1 if plugin_type == 'wiki' else len(wiki_plugins) + readme_plugins.index(plugin_name) + 1 + + update_repo(plugin_name, plugin_order, plugin_type) else: - print('Usage: python update_plugins.py ') + print('Usage: python update_plugins.py ') sys.exit(1) diff --git a/code/plugins/update_plugins.sh b/code/plugins/update_plugins.sh index e69de29b..d9d4d831 100644 --- a/code/plugins/update_plugins.sh +++ b/code/plugins/update_plugins.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +DIRECTORY="/path/to/directory" + +if [ -d "$DIRECTORY" ]; then + # Directory exists, pull changes + cd "$DIRECTORY" + git pull +else + # Directory doesn't exist, clone from GitHub + git clone https://github.com/username/repository.git "$DIRECTORY" +fi \ No newline at end of file diff --git a/plugins/ARfitStudio/index.md b/plugins/ARfitStudio/index.md index e053f88e..5ed63a2f 100644 --- a/plugins/ARfitStudio/index.md +++ b/plugins/ARfitStudio/index.md @@ -3,6 +3,7 @@ layout: default title: ARfitStudio long_title: ARfitStudio parent: Plugins +has_children: true nav_order: 7 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/ARfitStudio). diff --git a/plugins/EEG-BIDS/index.md b/plugins/EEG-BIDS/index.md index a2e83ef5..4d427328 100644 --- a/plugins/EEG-BIDS/index.md +++ b/plugins/EEG-BIDS/index.md @@ -3,6 +3,7 @@ layout: default title: EEG-BIDS long_title: EEG-BIDS parent: Plugins +has_children: true nav_order: 9 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/EEG-BIDS). diff --git a/plugins/ICLabel/index.md b/plugins/ICLabel/index.md index 2506db37..1a2a3b90 100644 --- a/plugins/ICLabel/index.md +++ b/plugins/ICLabel/index.md @@ -3,6 +3,7 @@ layout: default title: ICLabel long_title: ICLabel parent: Plugins +has_children: true nav_order: 13 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/ICLabel). diff --git a/plugins/NFT/index.md b/plugins/NFT/index.md index fe97af36..e1371129 100644 --- a/plugins/NFT/index.md +++ b/plugins/NFT/index.md @@ -3,119 +3,31 @@ layout: default title: NFT long_title: NFT parent: Plugins -categories: plugins has_children: true nav_order: 3 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/NFT). -### Open Source Matlab Toolbox for Neuroelectromagnetic Forward Head Modeling +Pre-compiled binaries for the following 3rd party programs are distributed +within the NFT toolbox for convinience of the users. The binaries are compiled +for 32 and 64 bit Linux distributions. -![right](NFTsmall.jpg "wikilink") +All of these programs have opensource licenses and provide full source-code. +Please visit home-pages of individual programs for more information on usage, +source-code and license information. -### What is NFT? +ASC: Adaptive skeleton climbing +homepage: http://www.cse.cuhk.edu.hk/~ttwong/papers/asc/asc.html -Neuroelectromagnetic Forward Modeling Toolbox (NFT) is a MATLAB toolbox -for generating realistic head models from available data (MRI and/or -electrode locations) and for computing numerical solutions for solving -the forward problem of electromagnetic source imaging (Zeynep Akalin -Acar & S. Makeig, 2010). NFT includes tools for segmenting scalp, skull, -cerebrospinal fluid (CSF) and brain tissues from T1-weighted magnetic -resonance (MR) images. The Boundary Element Method (BEM) is used for the -numerical solution of the forward problem. After extracting the -segmented tissue volumes, surface BEM meshes may be generated. When a -subject MR image is not available, a template head model may be warped -to 3-D measured electrode locations to obtain an individualized BEM head -model. Toolbox functions can be called from either a graphic user -interface (gui) compatible with EEGLAB (sccn.ucsd.edu/eeglab), or from -the MATLAB command line. Function help messages and a user tutorial are -included. The toolbox is freely available for noncommercial use and open -source development under the GNU Public License. +QSLIM: Quadric-based surface simplification +homepage: http://mgarland.org/software/qslim.html -### Why NFT? +BEM_MATRIX: The METU-FP Toolkit +homepage: http://www.eee.metu.edu.tr/metu-fp/ -The NFT is released under an open source license, allowing researchers -to contribute and improve on the work for the benefit of the -neuroscience community. By bringing together advanced head modeling and -forward problem solution methods and implementations within an easy to -use toolbox, the NFT complements EEGLAB, an open source toolkit under -active development. Combined, NFT and EEGLAB form a freely available EEG -(and in future, MEG) source imaging solution. +PROCMESH: Mesh correction and processing. No web page yet. Please contact NFT developers for source code. -The toolbox implements the major aspects of realistic head modeling and -forward problem solution from available subject information: +MATITK: Matlab and ITK +homepage: http://www.sfu.ca/~vwchu/matitk.html -1. Segmentation of T1-weighted MR images: The preferred method of - generating a realistic head model is to use a 3-D whole-head - structural MR image of the subject's head. The toolbox can generate - a segmentation of scalp, skull, CSF and brain tissues from a - T1-weighted image. - -2. High-quality BEM meshes: The accuracy of the BEM solution depends on - the quality of the underlying mesh that models tissue - conductance-change boundaries. To avoid numerical instabilities, the - mesh must be topologically correct with no self-intersections. It - should represent the surface using high-quality elements while - keeping the number of elements as small as possible. The NFT can - create high-quality linear surface BEM meshes from the head - segmentation. - -3. Warping a template head model: When a whole-head structural MR image - of the subject is not available, a semi-realistic head model can be - generated by warping a standard template BEM mesh to the digitized - electrode coordinates (instead of vice versa). - -4. Registration of electrode positions with the BEM mesh: The digitized - electrode locations and the BEM mesh must be aligned to compute - accurate forward problem solutions and lead field matrices. - -5. Accurate high-performance forward problem solution: The NFT uses a - high-performance BEM implementation from the open source METU-FP - Toolkit for bioelectromagnetic field computations. - -### Required Resources - -Matlab 7.0 or later running under any operating system (Linux, Windows). -A large amount of RAM is useful - at least 2 GB (4-8 GB recommended for -forward problem solution of realistic head models). The Matlab Image -Processing toolbox is also recommended. - -### NFT Reference Paper - -Zeynep Akalin Acar & Scott Makeig, [Neuroelectromagnetic Forward Head -Modeling -Toolbox](http://sccn.ucsd.edu/%7Escott/pdf/Zeynep_NFT_Toolbox10.pdf). -Journal of Neuroscience Methods, 2010 - -Download --------- - -To download the NFT, go to the [NFT download -page](http://sccn.ucsd.edu/nft/). - -NFT User's Manual ------------------ - -- [Chapter 01: Getting Started with NFT](Chapter_01_Getting_Started_with_NFT "wikilink") -- [Chapter 02: Head Modeling from MR Images](Chapter_02_Head_Modeling_from_MR_Images "wikilink") -- [Chapter 03: Forward Model Generation](Chapter_03_Forward_Model_Generation "wikilink") -- [Chapter 04: NFT Examples](Chapter_04_NFT_Examples "wikilink") -- [Chapter 05: NFT Commands and Functions](Chapter_05_NFT_Commands_and_Functions "wikilink") -- [Appendix A: BEM Mesh Format](NFT_Appendix_A) -- [Appendix B: Function Reference](NFT_Appendix_B) -- [Appendix C: Effect of brain-to-skull conductivity ratio estimate](NFT_Appendix_C) - - -- [Click here to download the NFT User Manual as a PDF book](NFT_Tutorial.pdf) - -
- -Creation and documentation by: - -Zeynep Akalin Acar - -Project Scientist - -zeynep@sccn.ucsd.edu - -
+Note: The MATITK shared libraries are installed in the 'mfiles' directory. diff --git a/plugins/NIMA/index.md b/plugins/NIMA/index.md index 4deb543d..6005faa7 100644 --- a/plugins/NIMA/index.md +++ b/plugins/NIMA/index.md @@ -3,6 +3,7 @@ layout: default title: NIMA long_title: NIMA parent: Plugins +has_children: true nav_order: 26 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/NIMA). diff --git a/plugins/PACT/index.md b/plugins/PACT/index.md deleted file mode 100644 index c89bded3..00000000 --- a/plugins/PACT/index.md +++ /dev/null @@ -1,286 +0,0 @@ ---- -layout: default -title: PACT -long_title: PACT -parent: Plugins -categories: plugins -has_children: true -nav_order: 4 ---- -To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/PACT). - -What is PACT? -------------- - -PACT is a plug-in for EEGLAB. PACT stands for (cross-frequency) -Phase-Amplitude Coupling Toolbox. See the github repository at -[](https://github.com/sccn/PACT) to submit -bug reports or modify the codebase. - -What data does PACT take? -------------------------- - -Currently it takes continuous data only. PACT was originally developed -for electrocorticographic (ECoG) data analysis. - -What does PACT do? ------------------- - -In preparatory exploration, you may run a brute-force computation of PAC -for all combinations of low-frequency oscillation (LFO) and -highest-amplitude sampling (HAS) center frequencies. This computation -may take a long time depending on the frequency resolution and bandwidth -you specify. To compute each PAC value for a channel -frequency-by-frequency combination, PACT performs the following steps: - -1\. Band-pass filter the data to extract HFO and LFO signals. - -2\. Hilbert transform the HFO signal to extract a time series of -instantaneous amplitudes. - -3\. Hilbert transform the LFO signal to extract a time series of -instantaneous phases. - -4\. (Highest-Amplitude Sampling, HAS): apply a threshold to select the -N% highest HFO amplitudes. Obtain the HAS index from this. - -5\. For LFO phase, apply HAS index. - -6\. Combine HAS-indexed HFO and LFO indices into complex-valued phasors. - -7\. Compute the Modulation Index (Canotly et al., 2006) for the -collection of HAS phasors constructed above. - -8\. Generate a collection of surrogate data by circularly permuting the -phase time-series relative to the amplitude series. - -9\. Compute a surrogate set of Modulation Indices for which the null -hypothesis should hold and determine a statistical threshold from their -distribution. - -10\. Perform multiple comparison corrections based on the number of -channels for which you are estimating PAC significance. - -Note: To compute and perform statistics on the Mean Resultant Vector -Length, PACT uses CircStat (Berens, 2009). To compute phase-sorted -amplitude statistics, PACT uses K-S and Chi-square tests. - -What do the PACT GUIs look like? --------------------------------- - -![thumb\|400px\|Figure 1. PACT Seen from EEGLAB main -GUI.](Demo01.jpg) - -

Figure 1. PACT Seen from EEGLAB main GUI.

- -![thumb\|400px\|Figure 2. Main -GUI.](Demo02.jpg) - -

Figure 2. Main GUI

- -When successfully installed, the item -'PACT' should appear under 'Tools' (Figure 1). Currently it has 12 -menus. - -- Compute PAC: This launches the main GUI (Figure 2). When press ok, - computation starts. When it done, statistics set up window pops up - (described later). - - Phase freq range \[lohz hihz\] - - Amp freq range \[lohz hihz\] - - Highest amplitude sampling rate \[%\] - - Sampling pool: This is for the experimental purpose. Always - choose 'Each channel'. - - If handpicked, event type and win size \[+/- ms\]: If you want - to run the analysis using the data around event markers - generated by either VidEd or MoBILAB, use this. - - Significance threshold \[p\] - - Number of surrogation \[N\]: This determines how many data - points you want to generate surrogate data that represents for - distribution of null hypothesis. - - Number of phase bins \[N\]: This affects sensitivity of circular - statistics. Don't use too extremely large value (e.g. \>100). - -![thumb\|400px\|Figure 3. Detected HFOs (shown in -red).](Demo06.jpg) - -

Figure 3. Detected HFOs (shown in red)

- -- Plot HFO-marked Raw data: This plot looks like Figure 3. -- Invert polarity: This is to invert EEG polarity by simply - multiplying -1 to all the data. - -![thumb\|400px\|Figure 4. Manually marking HFOs. Left, using VisEd. -Right, using customized MoBILAB plots.](Demo04.jpg) - -

Figure 4. Manually marking HFOs. Left, using VisEd. Right, using customized MoBILAB plots

- -- Handpick HFO(VisEd): This plot looks like Figure 4 left. You can - choose the marking point by mouse click. For detailed explanation - how to use this VisEd, see VisEd help. -- Handpick HFO(Mobilab): This plot looks like Figure 4 left. - Similarly, you can choose the marking point by mouse click. Use - whichever suit you. -- Copy event markers: This is to copy event markers from dataset 1 to - dataset 2. - -![thumb\|400px\|Figure 5. Statistics set -up.](Demo03.jpg) - -

Figure 5. Statistics set up

- -- Set up statistics: This shows a GUI that look like Figure 5. -- Plot Modulation Index: This shows a plot that look like Figure 7/8 - top right. -- Plot Angular hist (bar) -- Plot Angular hist (polar): This shows a plot that look like Figure - 7/8 bottom left. -- Plot phase-sorted amp: This shows a plot that look like Figure 7/8 - bottom left. Each bar represents mean amplitude of each phase bin. - -![thumb\|400px\|Figure 6. Scanning parameter space consists of LFO phase -frequencies and HAS rates.](Demo05.jpg) - -

Figure 6. Scanning parameter space consists of LFO phase frequencies and HAS rates

- -- Scan LFO freqs (very slow!): This pops up GUI like Figure 6. Start - with N = 10 or around, and HAS rate of 0.3-10. Color normalization - should be used when plotting Mean Resultant Vector Length. - -What plots does PACT output? ----------------------------- - -1\. LFO-HAS parameter space scan results (combination of LFO phase -frequencies and HAS rates; the measure used may be either Mean Resultant -Vector Length or Modulation Index. - -2\. Using Modulation Index with a confidence interval of 95% or 99%. - -3a. Angular histogram displayed in a polar plot using Mean Resultant -Vector Length. - -3b. Angular histogram in a rectangular plot with phase unwrapped on the -x-axis. - -4\. Bar graphs of LFO phase-sorted HFO-amplitudes. - -Note that the number of phase bins in Figures 3a and 3b is determined by -user input and affects the results of the circular statistics. - -How PACT can be used, and how its output can be interpreted: A demo example ---------------------------------------------------------------------------- - -These plots show examples in which PACT was applied to -electrocorticographic data for which a neurologist judged the channel( -Ch) 1 signal to be pathological and the Ch2 signal to be normal. - -First, exploratory LFO frequency scans were performed (Figures 7 and 8, -top left; they are the same). We needed to run PACT several times, -adjusting parameters; the result that showed the difference most clearly -is plotted here. Mean Resultant Vector Length was chosen as the -dependent variable, since it is naturally normalized from 0 to 1 and is -therefore convenient for comparisons across channels. This plot shows -two noticeable clusters of interest that showed differences between Ch1 -and Ch2:.One is (LFO 0.5 Hz, HAS 3%,) the other (LFO 1.5 Hz, HAS 1.5%). -We decided to run analysis for both combinations of parameters. -Statistical significance level was set to 1% with Bonferroni-Holm -correction (Note: here, since we have only 2 items to compare, B-H is -the same as Bonferroni). - -![](PACT05Hz.jpg) - -

Figure 7. LFO 0.5Hz, HAS 3%, p < 0.01, CI 95%. Top left, LFO-HAS parameter space scan results. Top right, Modulation Index. Bottom left, Mean Resultant Vector Length. Bottom right, phase-sorted HFO amplitudes

- -Figure 7 shows the result of choosing parameters (LFO0.5 Hz, HAS 3%). The Modulation Index -for Ch1 is larger than that for Ch2. Only the Ch1 value reached -statistical significance (Figure 7, top right; a horizontal bar in the -graph shows the 95% confidence interval). By Mean Resultant Vector -length, both channel signals exhibited showed phase concentrations, -though their preferred phases were different -- almost opposite (Figure -7, bottom left). Phase-sorted HFO amplitude also indicated that Ch1 has -a preferred phase, and the Ch1 amplitude distribution over phase bins -deviates significantly from uniform, whereas Ch2 does not show this -effect (Figure 7, bottom right). Note also the large difference in -amplitude scales. - -![](PACT15Hz.jpg)

Figure 8. LFO 1.5Hz, HAS 1.5%, p < 0.01, CI 95%. Top right, Modulation Index. Bottom left, Mean Resultant Vector Length. Bottom right, Phase-sorted HFO amplitude. Note that the Ch1 Modulation Index is much larger than the confidence interval compared to Figure 7

- -Figure 8 shows the result of -choosing the parameters (LFO 1.5 Hz, HAS 1.5%). Modulation Index, Mean -Resultant Vector length, and Phase-sorted HFO amplitude all showed -similar properties to the results shown in Figure 7. However, note that -the Ch1 Modulation Index is much larger than its confidence interval -level; probably this combination of parameters better fits the -pathological pattern in this channel signal. - -Download Link -------------- - - - -Caution and Limitation ----------------------- - -The 'Handpick HFO' menu does not work with newer Matlab versions, which -no longer support the *graphics.cursorbar* object. To use this function, -use Matlab 2013 or older as a workaround. - -Scanning Phase-frequency vs. HFO-frequency (07/24/2019 updated) ---------------------------------------------------------------- - -In calculating phase-amplitude coupling, a typical problem is how to -determine the target frequencies in both phase and amplitude. To perform -it simply, in ver.0.30 I implemented a function to generate -phase-amplitude frequency-by-frequency grid plot. If you need to reduce -the number of channel, do so by using EEGLAB GUI beforehand. Otherwise, -this frequency scan process does not need any preprocessing by PACT, it -does the job itself. One extra parameter you have to choose is highest -amplitude sampling (HAS) rate, which specifies the right-tail cutoff for -the amplitude distribution for each channel. Note that this is only -picking up the highest amplitude after high-frequency band-pass filter, -so if there is artifact with high-frequency (or broadband), HAS will -pick it up. In this case, you would want to clean the data using EEGLAB -function before performing this analysis. - -In the example below, one can easily find that Ch21 showed the strongest -PAC between 3-Hz phase and 80-Hz HFO amplitude, followed by Ch16. Ch18 -also showed some PAC, but it coupled with 1.7 Hz instead of 3 Hz so this -could be something different. If one chooses mean vector length to show -instead of Canolty's modulation index (MI), it allows to evaluate the -same measure without the effect of HFO amplitude. The calculated values -are stored under EEG.pacScan. - -![200px](PactUpdate1crop.jpg) - -![600px](PactUpdate2.jpg) - -### How to obtain mean HFO gamma amplitude - -1. In the plot above, confirm that the peak PAC value is observed at - Ch21, phase 3.2-Hz, ampltiude 80-Hz. -2. Type 'EEG.pacScan' in the command line. Among the variables, find - 'meanHfoAmp' This is mean HFO amplitude in microVolt. If you are not - sure about dimensions, see 'dataDimensions'. We know our channel and - freq-freq window of interest, which are 21, 3.2 Hz, 80 Hz, - respectively. Based on these parameters of interest, we obtain - indices for these parameters: 21 for the channel order, 7 for the - phase freq (see 'phaseFreqEdge'--3.2Hz is between the 7th and 8th - edges, so we select 7), and 1 for the HFO freq. -3. We enter EEG.pacScan.meanHfoAmp(21,7,1) in the command window. It - returned '35.0391' which mean the mean HFO amplitude during the - selected HFO frames was 35.0391 microVolt. - -![600px](PactUpdate3.jpg) - -Bug report, request, comment ----------------------------- - -Please post bugs and suggestions to the EEGLAB mailing list. - -Reference ---------- - - Makoto Miyakoshi, Arnaud -Delorme, Tim Mullen, Katsuaki Kojima, Scott Makeig, Eishi Asano. -*Automated detection of cross-frequency coupling in the -electrocorticogram for clinical inspection.* Conf Proc IEEE Eng Med Biol -Soc.2013.3282-3285. diff --git a/plugins/PACTools/index.md b/plugins/PACTools/index.md index 1a522456..1fb8850e 100644 --- a/plugins/PACTools/index.md +++ b/plugins/PACTools/index.md @@ -3,6 +3,7 @@ layout: default title: PACTools long_title: PACTools parent: Plugins +has_children: true nav_order: 17 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/PACTools). diff --git a/plugins/PowPowCAT/index.md b/plugins/PowPowCAT/index.md index 502e2e05..47a313e3 100644 --- a/plugins/PowPowCAT/index.md +++ b/plugins/PowPowCAT/index.md @@ -3,6 +3,7 @@ layout: default title: PowPowCAT long_title: PowPowCAT parent: Plugins +has_children: true nav_order: 16 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/PowPowCAT). diff --git a/plugins/SIFT/README.md b/plugins/SIFT/README.md new file mode 100644 index 00000000..385e92a7 --- /dev/null +++ b/plugins/SIFT/README.md @@ -0,0 +1,78 @@ +--- +layout: default +title: README +long_title: README +parent: SIFT +grand_parent: Plugins +--- +![263416749-1abc1d2d-36bb-4cfb-9328-b57a96044f55](https://github.com/user-attachments/assets/b45a5caa-6b39-4291-b137-125132e5ade0) + +## The Source Information Flow Toolbox + +Developed by: Tim Mullen 2009- +Maintained: Tim Mullen and Arnaud Delorme + +SIFT is an EEGLAB-compatible toolbox for the analysis and visualization of +multivariate causality and information flow between sources of +electrophysiological (EEG/ECoG/MEG) activity. It consists of a suite of +command-line functions with an integrated Graphical User Interface for +easy access to multiple features. There are currently six modules: data +preprocessing, model fitting and connectivity estimation, statistical +analysis, visualization, group analysis, and neuronal data simulation. + +Methods currently implemented include: + +- Preprocessing routines +- Time-varying (adaptive) multivariate autoregessive modeling + - Granger causality + - directed transfer function (DTF, dDTF) + - partial directed coherence (PDC, GPDC, PDCF, RPDC) + - multiple and partial coherence + - event-related spectral perturbation (ERSP) + - and many other measures... +- Bootstrap/resampling and analytical statistics + - event-related (difference from baseline)) + - between-condition (test for condition A = condition B) +- A suite of programs for interactive visualization of information + flow dynamics across time and frequency (with optional 3D + visualization in MRI-coregistered source-space). + +## Acknowledgements + +- Arnaud Delorme was instrumental in the development of the SIFT framework and integration into EEGLAB as well as contributing initial BrainMovie3D code. +- Christian Kothe contributed the arg() framework for function I/O and auto-GUI generation +- Wes Thompson consulted on statistics and methods for bayesian smoothing and multi-subject analysis +- Alejandro Ojeda contributed routines for fast ridge regression + +SIFT makes use of routines from (or is inspired by) the following open-source packages: + +- [ARFIT](https://github.com/tapios/arfit) (Schneider et al) +- [TSA/Biosig](http://octave.sourceforge.net/tsa/) (Schlögl et al) +- [Chronux](https://chronux.org) (Mitra et al) +- [DAL/SCSA](https://ttic.uchicago.edu/~ryotat/softwares/dal/) (Tomioka / Haufe et al) +- [BCILAB](http://sccn.ucsd.edu/wiki/BCILAB) (Kothe et al) + + +## Official Website + +[SIFT page in the SCCN wiki](http://sccn.ucsd.edu/wiki/SIFT) + +## Citation + +If you find this toolbox useful for your research, PLEASE include the following citations with any publications and/or presentations which make use of SIFT: + +1. Mullen, T. R. (2014). The dynamic brain: Modeling neural dynamics and interactions from human electrophysiological recordings (Order No. 3639187). Available from Dissertations & Theses @ University of California; ProQuest Dissertations & Theses A&I. (1619637939) +2. Delorme, A., Mullen, T., Kothe C., Akalin Acar, Z., Bigdely Shamlo, N., Vankov, A., Makeig, S. (2011) "EEGLAB, SIFT, NFT, BCILAB, and ERICA: New tools for advanced EEG/MEG processing." Computational Intelligence and Neuroscience vol. 2011, Article ID 130714, 12 pages. + +## License + +SIFT is licensed under the GPL-2, see LICENSE.txt +ANY USE OF SIFT IMPLIES THAT YOU HAVE READ AND AGREE WITH THE TERMS AND CONDITIONS OF THE SIFT LICENSE AS STATED BELOW: + +## ADDITIONAL NOTE + +SIFT is designed and distributed for research purposes only. SIFT should not be used for medical purposes. The authors accept no responsibility for its use in this manner. + +## Verions + +v1.6 - fix conflict with BrainMovie plugin. Fix minor GUI issues. diff --git a/plugins/SIFT/index.md b/plugins/SIFT/index.md index 3e2b8b51..a20e25b6 100644 --- a/plugins/SIFT/index.md +++ b/plugins/SIFT/index.md @@ -3,18 +3,17 @@ layout: default title: SIFT long_title: SIFT parent: Plugins -categories: plugins has_children: true nav_order: 1 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/SIFT). +![263416749-1abc1d2d-36bb-4cfb-9328-b57a96044f55](https://github.com/user-attachments/assets/b45a5caa-6b39-4291-b137-125132e5ade0) -![700px\|link=](https://github.com/sccn/SIFT/assets/1872705/1abc1d2d-36bb-4cfb-9328-b57a96044f55) +## The Source Information Flow Toolbox -# The Source Information Flow Toolbox tutorial (SIFT) - -Developed and Maintained by: Tim Mullen and Arnaud Delorme (SCCN, INC, UCSD) 2009 +Developed by: Tim Mullen 2009- +Maintained: Tim Mullen and Arnaud Delorme SIFT is an EEGLAB-compatible toolbox for the analysis and visualization of multivariate causality and information flow between sources of @@ -28,7 +27,7 @@ Methods currently implemented include: - Preprocessing routines - Time-varying (adaptive) multivariate autoregessive modeling - - granger causality + - Granger causality - directed transfer function (DTF, dDTF) - partial directed coherence (PDC, GPDC, PDCF, RPDC) - multiple and partial coherence @@ -40,3 +39,43 @@ Methods currently implemented include: - A suite of programs for interactive visualization of information flow dynamics across time and frequency (with optional 3D visualization in MRI-coregistered source-space). + +## Acknowledgements + +- Arnaud Delorme was instrumental in the development of the SIFT framework and integration into EEGLAB as well as contributing initial BrainMovie3D code. +- Christian Kothe contributed the arg() framework for function I/O and auto-GUI generation +- Wes Thompson consulted on statistics and methods for bayesian smoothing and multi-subject analysis +- Alejandro Ojeda contributed routines for fast ridge regression + +SIFT makes use of routines from (or is inspired by) the following open-source packages: + +- [ARFIT](https://github.com/tapios/arfit) (Schneider et al) +- [TSA/Biosig](http://octave.sourceforge.net/tsa/) (Schlögl et al) +- [Chronux](https://chronux.org) (Mitra et al) +- [DAL/SCSA](https://ttic.uchicago.edu/~ryotat/softwares/dal/) (Tomioka / Haufe et al) +- [BCILAB](http://sccn.ucsd.edu/wiki/BCILAB) (Kothe et al) + + +## Official Website + +[SIFT page in the SCCN wiki](http://sccn.ucsd.edu/wiki/SIFT) + +## Citation + +If you find this toolbox useful for your research, PLEASE include the following citations with any publications and/or presentations which make use of SIFT: + +1. Mullen, T. R. (2014). The dynamic brain: Modeling neural dynamics and interactions from human electrophysiological recordings (Order No. 3639187). Available from Dissertations & Theses @ University of California; ProQuest Dissertations & Theses A&I. (1619637939) +2. Delorme, A., Mullen, T., Kothe C., Akalin Acar, Z., Bigdely Shamlo, N., Vankov, A., Makeig, S. (2011) "EEGLAB, SIFT, NFT, BCILAB, and ERICA: New tools for advanced EEG/MEG processing." Computational Intelligence and Neuroscience vol. 2011, Article ID 130714, 12 pages. + +## License + +SIFT is licensed under the GPL-2, see LICENSE.txt +ANY USE OF SIFT IMPLIES THAT YOU HAVE READ AND AGREE WITH THE TERMS AND CONDITIONS OF THE SIFT LICENSE AS STATED BELOW: + +## ADDITIONAL NOTE + +SIFT is designed and distributed for research purposes only. SIFT should not be used for medical purposes. The authors accept no responsibility for its use in this manner. + +## Verions + +v1.6 - fix conflict with BrainMovie plugin. Fix minor GUI issues. diff --git a/plugins/amica/index.md b/plugins/amica/index.md index 0d9c4b07..61ea425e 100644 --- a/plugins/amica/index.md +++ b/plugins/amica/index.md @@ -3,6 +3,7 @@ layout: default title: amica long_title: amica parent: Plugins +has_children: true nav_order: 19 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/amica). diff --git a/plugins/clean_rawdata/README.md b/plugins/clean_rawdata/README.md new file mode 100644 index 00000000..9281e68b --- /dev/null +++ b/plugins/clean_rawdata/README.md @@ -0,0 +1,102 @@ +--- +layout: default +title: README +long_title: README +parent: clean_rawdata +grand_parent: Plugins +--- +# Clean_rawdata EEGLAB plug-in + +The Clean Rawdata plug-in (version 2.0) interface has been redesigned and will soon become the default EEGLAB method for removing artifacts from EEG and related data. The plug-in detects and can separate low-frequency drifts, flatline and noisy channels from the data. It can also apply ASR (automated subspace removal) to detect and reject or remove high-amplitude non-brain ('artifact') activity (produced by eye blinks, muscle activity, sensor motion, etc.) by comparing its structure to that of known artifact-free reference data, thereby revealing and recovering (possibly smaller) EEG background activity that lies outside the subspace spanned by the artifact processes. + +**Note:** This plug-in uses the Signal Processing toolbox for pre- and post-processing of the data (removing drifts, channels and time windows); the core ASR method (clean_asr) does not require this toolbox but you will need high-pass filtered data if you use it directly. + +# This project needs you + +We need community maintain to this project. Please review existing issues and issue pull requests. A section in this documentation with link to all the existing methodological papers is also needed. + +# Credit + +This plug-in, clean_rawdata uses methods (e.g., Artifact Subspace +Reconstruction, ASR) by Christian Kothe from the BCILAB Toolbox +(Kothe & Makeig, 2013), first wrapped into an EEGLAB plug-in by +Makoto Miyakoshi and further developed by Arnaud Delorme with +Scott Makeig. + +This plug-in cleans raw EEG data. Methods from the BCILAB toolbox +are being used (in particular Artifact Subspace Reconstruction) +designed by Christian Kothe. + +These functions were wrapped up into an EEGLAB plug-in by Makoto +Myakoshi, then later by Arnaud Delorme with input from Scott +Makeig. + +The private folder contains 3rd party utilities, including: +- findjobj.m Copyright (C) 2007-2010 Yair M. Altman +- asr_calibrate.m and asr_process.m + Copyright (C) 2013 The Regents of the University of California + Note that this function is not free for commercial use. +- sperhicalSplineInterpolate.m Copyright (C) 2009 Jason Farquhar +- oct_fftfilt Copyright (C) 1996, 1997 John W. Eaton +- utility functions from the BCILAB toolbox Copyright (C) 2010-2014 Christian Kothe + +The folder "manopt" contains the Matlab toolbox for optimization on manifolds. + +# Graphic interface + +Below we detail the GUI interface. Individual function contain additional help information. + +![](gui_interface.png) + +## High pass filter the data + +Check checkbox **(1)** if the data have not been high pass filtered yet. If you use this option, the edit box in **(2)** allows setting the transition band for the high-pass filter in Hz. This is formatted as[transition-start, transition-end]. Default is 0.25 to 0.75 Hz. + +## Reject bad channels + +Check checkbox **(3)** to reject bad channels. Options **(4)** allows removal of flat channels. The edit box sets the maximum tolerated (non-rejected) flatline duration in seconds. If a channel has a longer flatline than this, it will be considered abnormal and rejected. The default is 5 seconds. Option **(5)** sets the Line Noise criterion: If a channel has more line noise relative to its signal than this value (in standard deviations based on the total channel signal), it is considered abnormal. The default is 4 standard deviations. Option **(6)** sets the minimum channel correlation. If a channel is correlated at less than this value to an estimate based on other nearby channels, it is considered abnormal in the given time window. This method requires that channel locations be available and roughly correct; otherwise a fallback criterion will be used. The default is a correlation of 0.8. + +## Artifact Subspace Reconstruction + +Check checkbox **(7)** to use Artifact Subspace Reconstruction (ASR). ASR is described in this [publication](https://www.ncbi.nlm.nih.gov/pubmed/26415149). In edit box **(8)** you may change the standard deviation cutoff for removal of bursts (via ASR). Data portions whose variance is larger than this threshold relative to the calibration data are considered missing data and will be removed. The most aggressive value that can be used without losing much EEG is 3. For new users it is recommended to first visually inspect the difference between the aw and the cleaned data (using eegplot) to get a sense of the content the is removed at various levels of this input variable. Here, a quite conservative value is 20; this is the current default value. Use edit box **(9)** to use Riemannian distance instead of Euclidian distance. This is a beta option as the advantage of this method has not yet been clearly demonstrated. Checkbox **(10)** allows removal instead of correction of artifact-laden portions of data identified by ASR. One of the strength of ASR is its ability to detect stretches of 'bad data' before correcting them. This option allows use of ASR for data-period rejection instead of correction, and is the default for offline data processing. ASR was originally designed as an online data cleaning algorithm, in which case 'bad data' correction may be used. + +## Additional removal of 'bad data' periods + +Check checkbox **(11)** to perform additional removal of bad-data periods. Edit box **(12)** sets the maximum percentage of contaminated channels that are tolerated in the final output data for each considered window. Edit box **(13)** sets the noise threshold for labeling a channel as contaminated. + +## Display rejected and corrected regions + +Check checkbox **(14)** plots rejection results overlaid on the original data. This option is useful to visually assess the performance of a given ASR method. + +Additional parameters are accessible through the command line interface of the clean_artifacts function. + +## Additional documentation + +Makoto Miyakoshi wrote a page in the [wiki section](https://github.com/sccn/clean_rawdata/wiki) of this repository discussing ASR. + +# Version history +v0.34 and earlier - original versions + +v1.0 - new default values for some of the rejection tools, new GUI + +v2.0 - new improved GUI, compatibility with studies + +v2.1 - fix issue with 'distance' variable for burst detection + +v2.2 - fix history call for pop_clean_rawdata + +v2.3 - add maxmem to asr_calibrate to ensure reproducibility of results + +v2.4 - fixing issue with running function in parallel for Matlab 2020a + +v2.5 - move asr_calibrate out of the private folder so it can be used directly + +v2.6 - allowing to exclude channels and a variety of small bug fixes + +v2.7 - allowing to fuse channel rejection for datasets with same subject and session (STUDY processing) + +v2.8 - better error messages, and fix excluding channels (there was a rare crash) + +v2.9 - fix bug when ignoring channels and removing channels at the same time, fix plotting issue with vis_artifact + +v2.91 - add support for fractional sampling rate; fix too many splits with high sampling frequencies diff --git a/plugins/clean_rawdata/index.md b/plugins/clean_rawdata/index.md index 2acccb63..59836104 100644 --- a/plugins/clean_rawdata/index.md +++ b/plugins/clean_rawdata/index.md @@ -3,410 +3,103 @@ layout: default title: clean_rawdata long_title: clean_rawdata parent: Plugins -categories: plugins has_children: true nav_order: 6 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/clean_rawdata). -Historical Background (01/07/2020 updated) ------------------------------------------- - -ASR was originally developed by [Christian -Kothe](https://intheon.io/team/#christian-kothe), now Intheon CTO. It -was a part of [BCILAB](https://sccn.ucsd.edu/wiki/BCILAB) for online -data cleaning. See their famous demo -[here](https://www.youtube.com/watch?v=qYC_3SUxE-M). In the spring of -2013, I asked Christian to make the offline version. In response, he -gave me the custom version of his ASR for offline use. I wrapped it up -into an -[clean_rawdata()](https://sccn.ucsd.edu/mediawiki/index.php?title=Plugin_list_all&action=submit) -for EEGLAB plugin. Below is cited from Supplement of [Miyakoshi et al. -(2020)](https://academic.oup.com/cercorcomms/article/1/1/tgaa046/5881803?login=true). - -> ...A historical fact is that the offline version of artifact subspace -> reconstruction (ASR) implemented in clean_rawdata() plugin, which is -> now validated by multiple studies (Mullen et al. 2015; Chang et al. -> 2018, 2019; Gabard-Durnam et al. 2018; Blum et al. 2019; -> Plechawska-Wojcik et al. 2019), was specifically developed for this -> project upon our request by the main developer of BCILAB (Kothe and -> Makeig 2013). The original solution was called *Christian-Nima Combo* -> after the developpers, but formally changed into clean_rawdata() on -> June 26, 2013 to be implemented as an plugin for EEGLAB (Delorme and -> Makeig 2004). - -How it is organized -------------------- - -The core function*clean_artifacts()* consists of five subfunctions, -which form a versatile pipeline for the most upstream preprocessing. - -1. clean_flatlines()-This is to remove a channel that has a dead flat - line longer than certain length. -2. clean_drifts()-This is an IIR filter. You have to enter transition - bandwidth (if you don't know that it is, see [this - page](https://sccn.ucsd.edu/wiki/Firfilt_FAQ#Q._What_are_passband.2C_stopband.2C_transition_band_width.2C_cutoff_frequency.2C_passband_ripple.2Fringing.2C_and_stopband_ripple.2Fattenuation.3F)). -3. clean_channels()/clean_channels_nolocs()-This is to reject so-called - 'bad channels'. If channel location data is available, it calculates - each channel's correlation to its RANSAC reconstruction for each - window (it mode was specially added later in the spring of 2014). If - channel location is unavailable, it calculates each channel's - correlation to all others. -4. **clean_asr()**-This does the real magic. The documents linked in - this page will explain the mechanism in detail. In a nutshell, it - uses a sliding window (default 0.5 s, overlap 50%) to PCA-decompose - all the channels to identify a 'bad' PCs (defined by a comparison - against the data's own cleanest part in frequency-enhanced RMS) to - reject and reconstruct the rejected PC activity from the remaining - components. -5. clean_windows()-This performs the final window rejection. If a - sliding window (default 1.0 s. overlap 66%) finds more than given - percentage of bad channels even after ASR, the window is rejected. - By disabling this function, you can keep the data length to be the - same between before and after the processing, if necessary. - -Download --------- - -ASR is implemented in an EEGLAB plugin clean_rawdata(), which you can -download and install via EEGLAB plugin manager (from EEGLAB main GUI). -Alternatively, you can download it manually from [this -page](http://sccn.ucsd.edu/eeglab/plugin_uploader/plugin_list_all.php), -unzip it, and locate the folder under eeglab/plugins. - -### Download ver 1.10 (old) - -clean_rawdata() is no longer maintainned by Makoto Miyakoshi (me). But -there was a report that the newer version re-introduced the problem of -result replicatability. The behavior of the algorithm is identical -except for options such as support for Riemanian geometry. In case -people need to guarantee the result replicatability (I do), I uploaded -my final version here. Use 'availableRAM_GB' option as described below. -[clean_rawdata1.10](files/Mmiyakoshi-clean-rawdata.zip) - -Reference (07/09/2020 update) ------------------------------ - -- [Plechawska-Wojcik M, Kaczorowska M, Zapala D. (2019). The artifact - subspace reconstruction (ASR) for EEG signal correction. A - comparative study in Information systems architecture and - technology. proceedings of 39th international conference on - information systems architecture and technology – ISAT 2018: part II - (Advances in intelligent systems and computing) - 853:125-135.](https://www.researchgate.net/publication/327272493_The_Artifact_Subspace_Reconstruction_ASR_for_EEG_Signal_Correction_A_Comparative_Study) - "*The paper presents the results of a comparative study of the - artifact subspace re-construction (ASR) method and two other popular - methods dedicated to correct EEG artifacts: independent component - analysis (ICA) and principal component analysis (PCA).*" However, I - recommend one should use ASR as a preprocessing for ICA, not as an - alternative. - - - -- [Blum S, Jacobsen NSJ, Bleichner MG, Debener S. (2019) A riemannian - modification of artifact subspace reconstruction for EEG artifact - handling. Front Hum Neurosci. - 13:141.](https://www.frontiersin.org/articles/10.3389/fnhum.2019.00141/full) - *"Compared to ASR, our rASR algorithm performed favorably on all - three measures. We conclude that rASR is suitable for the offline - and online correction of multichannel EEG data acquired in - laboratory and in field conditions."* - - - -- [Chang C-Y, Hsu S-H, Pion-Tonachini L, Jung T-P. (2019). Evaluation - of Artifact Subspace Reconstruction for Automatic Artifact - Components Removal in Multi-channel EEG Recordings. IEEE Trans - Biomed Eng. 2019 - Jul 22.](https://ieeexplore.ieee.org/abstract/document/8768041) - "*...Conclusions: Empirical results show that the optimal ASR - parameter is between 20 and 30, balancing between removing non-brain - signals and retaining brain activities.*" - - - -- [Chang C-Y, Hsu S-H, Pion-Tonachini L, Jung T-P. (2018). Evaluation - of Artifact Subspace Reconstruction for Automatic EEG Artifact - Removal. Conf Proc IEEE Eng Med Biol Soc. - 2018](https://www.researchgate.net/publication/325921646_Evaluation_of_Artifact_Subspace_Reconstruction_for_Automatic_EEG_Artifact_Removal) - This is the first paper that evaluated the parameter for ASR. Very - valuable. - - - -- [Gabard-Durnam LJ, Mendez Leal AS, Wilkinson CL, and Levin AR (2018) - The Harvard Automated Processing Pipeline for Electroencephalography - (HAPPE): Standardized Processing Software for Developmental and - High-Artifact Data. Frontiers in Neuroscience, - 12:97](https://www.frontiersin.org/articles/10.3389/fnins.2018.00097/full) - *ASR was used to interpolate artifact “bursts” with variance more - than 5 standard deviations different from the automatedly detected - clean data, as in prior work wit the clinical populations (Grummett - et al., 2014). Data segments postinterpolation were removed with a - time-window rejection set ting of 0.05 (aggressive segment - rejection). Data were then submitted to ICA and MARA component - rejection (as in HAPPE). HAPPE retained more EEG data than the ASR - approach across all measures. Although ASR was designed for brief - “bursts” of artifact in otherwise clean data, due to the high degree - of artifact contamination in the developmental data, the ASR - approach interpolated an average of 35.7% of the EEG data per file, - which may constitute a prohibitively high interpolation rate... ASR - approach performed less successfully than HAPPE across all measures - in the context of developmental resting-state EEG files.* Thank you - very much. My comments are addressed below. - - - -- [Mullen TR, Kothe CA, Chi YM, Ojeda A, Kerth T, Makeig S, Jung TP, - Cauwenberghs G. (2015). Real-Time Neuroimaging and Cognitive - Monitoring Using Wearable Dry EEG. IEEE Trans Biomed Eng. 2015 - Nov;62(11):2553-67. doi: 10.1109/TBME.2015.2481482. Epub 2015 - Sep 23.](https://www.ncbi.nlm.nih.gov/pubmed/26415149) Note that - this paper does NOT describe ALL the steps of ASR. See also the - Supplementary Materials below for more details. - - - -- [Kothe CA, Makeig S. (2013) BCILAB: a platform for brain-computer - interface development. J Neural Eng. - 10:056014.](https://pubmed.ncbi.nlm.nih.gov/23985960/) "*bad - subspace removal*" under "Signal processing algorithms-Artifact - rejection" - -### Does ASR removes signal and noise altogether? (01/07/2020) - -Qualitatively speaking, yes, but it is in the sense that no method can -separate noise from signal perfectly. Thus I do not mean, as the English -saying goes, ASR 'throws the baby out with the bathwater.' The reality -is that much more noise is rejected than signal (otherwise what is the -point?) However, to demonstrate it with quantitative evidence requires a -well-designed simulation study. I attempted it, the report of which can -be found in the Section 5 of the Supplementary Materials for [Miyakoshi -et al. -(2020)](https://academic.oup.com/cercorcomms/article/1/1/tgaa046/5881803?login=true). -I concluded that under the conditions I defined there, ASR+ICA approach -gives advantage of 7-13 dB of SNR gain. If you are interested in, please -check out the article. - -Comments to the HAPPE paper, and how to choose the critical parameters ----------------------------------------------------------------------- - -1\) The SD threshold used was too aggressive: According to [Chang et al. -(2018)](https://www.researchgate.net/publication/325921646_Evaluation_of_Artifact_Subspace_Reconstruction_for_Automatic_EEG_Artifact_Removal), -if SD = 5 is used, as they did, then 90% of data points will be modified -and 80% of the original variance will be lost, according to this paper. -You don't want to do this. The same paper concluded that threshold SD = -10-100 is recommended. Important note: here, the definition of SD is - -1. choosing data's cleanest part -2. apply a custom frequency equalization filter that has a profile of - inverse of EEG power spectral density (PSD) to 'exaggerate' - non-brain-like power -3. Calculate RMS of the data -4. z-score the data - -Because the distribution of the finally obtained z-scores is so tight, -which means the cleanest part of data is too clean compared with noisy -part of the same data, we need to use somewhat unusual cutoff values of -SD=10 or 20. We once had discussion over this issue with Christian. He -suggested we may want to use a different metric here, because SD=10 or -20 is unusually high, which indicates inappropriate use of the current -metric. Indeed, in our unpublished data of cleaning method comparison -conducted by Nima Bigdely-Shamlo (who developed [Measure -Projection](https://github.com/bigdelys/measure_projection), -[PREP](https://www.frontiersin.org/articles/10.3389/fninf.2015.00016/full), -etc.), ASR with SD==20 recorded the best among other 20 methods. So -starting with SD==10 to 20 is recommended. - -2\) Too aggressive window rejection: They also used window rejection of -0.05, which is also very aggressive. - -3\) After all, it was our fault--ASR has been there for years without -being explained well (and still not! There are \>30 main and optional -parameters to explore), not to mention how to optimize the parameters -based on empirical testing. The poor choices of the critical parameters -shown in the HAPPE paper was unfortunate but understandable. Hopefully, -we learn from Chiyuan's paper [Chang et al. -(2018)](https://www.researchgate.net/publication/325921646_Evaluation_of_Artifact_Subspace_Reconstruction_for_Automatic_EEG_Artifact_Removal), -which was published shortly after the HAPPE paper, to use ASR and -related functions reasonably. - -Supplementary Materials ------------------------ - -[A power point slides by Christian Kothe](files/ASR.pdf): -This is a nice explanation of the principle by Christian himself. - -[The most detailed description of -ASR](files/AsrDescription.pdf): I wrote probably the **most -detailed (but still without all the details) description of ASR** with -great help of my colleagues, Chiyuang Chang and Shawn Hsu. This is one -of supplements from Loo et al. (2019). - -[ASR for dummies](files/AsrForDummies_ver21_web.pdf): I -also made a presentation material to explain the principle of ASR using -an analogy. - -[My EEGLAB workshop -slides:](files/PreprocessingPipelineAndUtilityTools.pdf) -This is the file I distributed in [the 25th EEGLAB workshop at JAIST -Tokyo satellite, Tokyo](https://eeglab.org/workshops/EEGLAB_2017_Japan.html). - -Theoretical justification for using ASR as ICA's preprocessing --------------------------------------------------------------- - -An important thing to remember is that - -- ASR == non-stationary method (i.e., it uses sliding window PCA) -- ICA == stationary method (i.e., only one spatial filter is used - throughout the recording), and data stationarity (i.e., no glitches - or bursts) is a required assumption. - -In other words, - -- ASR == good at removing occasional large-amplitude noise/artifacts -- ICA == good at decomposing constant fixed-source - noise/artifacts/signals - -In this way, both methods are complementary to each other. PCA is -certainly too simple to explain EEG. But, if the current goal is to -identify 'occasional large-amplitude noise/artifact' to make ICA work -easier (by increasing data stationarity), it is sufficient. - -Version 1.00 update details (updated 03/27/2019) ------------------------------------------------- - -As mentioned above, clean_rawdata() has been available as a EEGLAB -plugin since 2013. In the spring of 2019, I made a major update. -Important confirmation is that this update did NOT change any core -algorithms, so the results from the calculation will be the \*same\* -given same parameters--but I will give you more detail about the exact -replicatability of the output. The changes made in this update are -twofold: - -### The option 'availableRAM_GB' is available to fix the length of final output. - -We noticed that even if we use the same parameters, the final results -from clean_rawdata() fluctuated. This is because when ASR is performed, -it determines the number of chunks according to the available amount of -RAM, which is obtained by hlp_memfree(), which calls -getFreePhysicalMemorySize(), under private folder. Because the available -amount of RAM is dynamically calculated literally moment by moment, -every time this function is called, it returns different values. This is -the cause of fluctuating final data length, since different chunking -results in different ASR results, hence different final window rejection -result. Here, I made a simple test shown below to determine if fixing -the available amount of RAM fixes the output data length. - -``` matlab -EEG = pop_loadset('filename','asrTestData.set','filepath','/data/projects/makoto/asrTest/'); % 123 ch, 2583s , 250 Hz sampling rate. - -% High-pass filter the data. -EEG = pop_firws(EEG, 'fcutoff', 0.5, 'ftype', 'highpass', 'wtype', 'blackman', 'forder', 2750, 'minphase', 0); - -% Trim outliers. -EEG = trimOutlier(EEG, -Inf, 1000, 1000, 100); % % 123 ch, 2581s , 250 Hz sampling rate. - -resultTable = zeros(110, 6); % Conventional_dataLength, availableRam, processTime, Updated_dataLength, availableRam, processTime -for trialIdx = 1:110 - - % ASR with no RAM option. - currentRam = java.lang.management.ManagementFactory.getOperatingSystemMXBean().getFreePhysicalMemorySize()/(2^30); - tStart = tic; - EEG2 = clean_rawdata(EEG, -1, -1, -1, -1, 20, 0.25); - currentProcessTime = toc(tStart); - resultTable(trialIdx, 1:3) = [EEG2.pnts, currentRam, currentProcessTime]; - - % ASR with 8GB RAM option. - currentRam = java.lang.management.ManagementFactory.getOperatingSystemMXBean().getFreePhysicalMemorySize()/(2^30); - tStart = tic; - EEG2 = clean_rawdata(EEG, -1, -1, -1, -1, 20, 0.25, 'availableRAM_GB', 8); - currentProcessTime = toc(tStart); - resultTable(trialIdx, 4:6) = [EEG2.pnts, currentRam, currentProcessTime]; -end -``` - -![800px](AsrRamFix.png) - -As shown, fixing the RAM size fixed the length of the output data. -Currently, this is the only way to ensure result replicatability. I -recommend this option is always used. - -### All the optional inputs down to *clean_artifacts()* are supported. - -Upon a request made by the main EEGLAB developer Arnaud Delorme. One -exception is 'availableRAM_GB' which is delivered to the two functions -in two deeper layers, *asr_calibrate()* and *asr_process()*, via -*clean_asr()*. The list of all 31 optional inputs are shown below. For -detail of each optional input, please refer to the help section of each -function specified. - -``` matlab -% Decode all the inputs. -hlp_varargin2struct(varargin,... - ... - ... % This section contains 6 basic parameters that corresponds to GUI inputs. The assumption is that users determine these values. - {'chancorr_crit','ChannelCorrelationCriterion','ChannelCriterion'}, 0.8, ... - {'line_crit','LineNoiseCriterion'}, 4, ... - {'burst_crit','BurstCriterion'}, 5, ... - {'window_crit','WindowCriterion'}, 0.25, ... - {'highpass_band','Highpass'}, [0.25 0.75], ... - {'channel_crit_maxbad_time','ChannelCriterionMaxBadTime'}, 0.5, ... - ... - ... % This section contains optional inputs for clean_artifacts() (i.e., for the 6 subfunctions). The assumptiion is that the default values are usually used. - {'burst_crit_refmaxbadchns','BurstCriterionRefMaxBadChns'}, 0.075, ... - {'burst_crit_reftolerances','BurstCriterionRefTolerances'}, [-3.5 5.5], ... - {'window_crit_tolerances','WindowCriterionTolerances'},[-3.5 7], ... - {'flatline_crit','FlatlineCriterion'}, 5,... - {'nolocs_channel_crit','NoLocsChannelCriterion'}, 0.45, ... - {'nolocs_channel_crit_excluded','NoLocsChannelCriterionExcluded'}, 0.1, ... - ... - ... % This section contains optional inputs for clean_flatlines(). - {'max_allowed_jitter', 'MaxAllowedJitter'}, [], ... - ... - ... % This section contains optional inputs for clean_drifts(). - {'attenuation', 'Attenuation'}, [], ... - ... - ... % This section contains optional inputs for clean_channels(). - {'clchan_window_len', 'CleanChannelsWindowLength'}, [], ... - {'num_samples', 'NumSamples'}, [], ... - {'subset_size', 'SubsetSize'}, [], ... - ... - ... % This section contains optional inputs for clean_channels_nolocs(). - {'linenoise_aware', 'LineNoiseAware'}, [], ... - ... - ... % This section contains optional inputs for clean_asr(). - {'asr_windowlen','ASR_WindowLength'}, [],... - {'asr_stepsize','ASR_StepSize'}, [],... - {'maxdims','MaxDimensions'}, [],... - {'ref_wndlen','ReferenceWindowLength'}, [], ... - {'usegpu','UseGPU'}, [],... - {'availableRAM_GB'}, [],... - ... - ... % This section contains optional inputs for clean_windows(). - {'clwin_window_len', 'CleanWindowsWindowLength'}, [], ... - {'window_overlap', 'WindowOverlap'}, [], ... - {'max_dropout_fraction', 'MaxDropoutFraction'}, [], ... - {'min_clean_fraction', 'MinCleanFraction'}, [], ... - {'truncate_quant', 'TruncateQuantile'}, [], ... - {'clwin_step_sizes', 'CleanWindowsStepSizes'}, [], ... - {'shape_range', 'ShapeRange'}, []); -``` - -### *clean_rawdata()* now stores all the parameters used, including optional inputs, under EEG.etc.clean_rawdata_log - -To replicate a result, one needs ALL the input parameters, particularly -'availableRAM_GB'. To make it easier, I decide to make each data set -carry how they are processed with *clean_rawdata()* so that even the -data sets are processed in a batch mode, they can be reproduced without -the original code. - -Support -------- - -clean_rawdata(), which contains the offline version of ASR, was -developed for a project for a study on chronic tic disorder (PI Sandra -Loo) that was supported by NINDS 80160 and 97484. - -Author: Makoto Miyakoshi, Swartz Center for Computational Neuroscience -(SCCN), Institute for Neural Computation, UC San Diego +# Clean_rawdata EEGLAB plug-in + +The Clean Rawdata plug-in (version 2.0) interface has been redesigned and will soon become the default EEGLAB method for removing artifacts from EEG and related data. The plug-in detects and can separate low-frequency drifts, flatline and noisy channels from the data. It can also apply ASR (automated subspace removal) to detect and reject or remove high-amplitude non-brain ('artifact') activity (produced by eye blinks, muscle activity, sensor motion, etc.) by comparing its structure to that of known artifact-free reference data, thereby revealing and recovering (possibly smaller) EEG background activity that lies outside the subspace spanned by the artifact processes. + +**Note:** This plug-in uses the Signal Processing toolbox for pre- and post-processing of the data (removing drifts, channels and time windows); the core ASR method (clean_asr) does not require this toolbox but you will need high-pass filtered data if you use it directly. + +# This project needs you + +We need community maintain to this project. Please review existing issues and issue pull requests. A section in this documentation with link to all the existing methodological papers is also needed. + +# Credit + +This plug-in, clean_rawdata uses methods (e.g., Artifact Subspace +Reconstruction, ASR) by Christian Kothe from the BCILAB Toolbox +(Kothe & Makeig, 2013), first wrapped into an EEGLAB plug-in by +Makoto Miyakoshi and further developed by Arnaud Delorme with +Scott Makeig. + +This plug-in cleans raw EEG data. Methods from the BCILAB toolbox +are being used (in particular Artifact Subspace Reconstruction) +designed by Christian Kothe. + +These functions were wrapped up into an EEGLAB plug-in by Makoto +Myakoshi, then later by Arnaud Delorme with input from Scott +Makeig. + +The private folder contains 3rd party utilities, including: +- findjobj.m Copyright (C) 2007-2010 Yair M. Altman +- asr_calibrate.m and asr_process.m + Copyright (C) 2013 The Regents of the University of California + Note that this function is not free for commercial use. +- sperhicalSplineInterpolate.m Copyright (C) 2009 Jason Farquhar +- oct_fftfilt Copyright (C) 1996, 1997 John W. Eaton +- utility functions from the BCILAB toolbox Copyright (C) 2010-2014 Christian Kothe + +The folder "manopt" contains the Matlab toolbox for optimization on manifolds. + +# Graphic interface + +Below we detail the GUI interface. Individual function contain additional help information. + +![](gui_interface.png) + +## High pass filter the data + +Check checkbox **(1)** if the data have not been high pass filtered yet. If you use this option, the edit box in **(2)** allows setting the transition band for the high-pass filter in Hz. This is formatted as[transition-start, transition-end]. Default is 0.25 to 0.75 Hz. + +## Reject bad channels + +Check checkbox **(3)** to reject bad channels. Options **(4)** allows removal of flat channels. The edit box sets the maximum tolerated (non-rejected) flatline duration in seconds. If a channel has a longer flatline than this, it will be considered abnormal and rejected. The default is 5 seconds. Option **(5)** sets the Line Noise criterion: If a channel has more line noise relative to its signal than this value (in standard deviations based on the total channel signal), it is considered abnormal. The default is 4 standard deviations. Option **(6)** sets the minimum channel correlation. If a channel is correlated at less than this value to an estimate based on other nearby channels, it is considered abnormal in the given time window. This method requires that channel locations be available and roughly correct; otherwise a fallback criterion will be used. The default is a correlation of 0.8. + +## Artifact Subspace Reconstruction + +Check checkbox **(7)** to use Artifact Subspace Reconstruction (ASR). ASR is described in this [publication](https://www.ncbi.nlm.nih.gov/pubmed/26415149). In edit box **(8)** you may change the standard deviation cutoff for removal of bursts (via ASR). Data portions whose variance is larger than this threshold relative to the calibration data are considered missing data and will be removed. The most aggressive value that can be used without losing much EEG is 3. For new users it is recommended to first visually inspect the difference between the aw and the cleaned data (using eegplot) to get a sense of the content the is removed at various levels of this input variable. Here, a quite conservative value is 20; this is the current default value. Use edit box **(9)** to use Riemannian distance instead of Euclidian distance. This is a beta option as the advantage of this method has not yet been clearly demonstrated. Checkbox **(10)** allows removal instead of correction of artifact-laden portions of data identified by ASR. One of the strength of ASR is its ability to detect stretches of 'bad data' before correcting them. This option allows use of ASR for data-period rejection instead of correction, and is the default for offline data processing. ASR was originally designed as an online data cleaning algorithm, in which case 'bad data' correction may be used. + +## Additional removal of 'bad data' periods + +Check checkbox **(11)** to perform additional removal of bad-data periods. Edit box **(12)** sets the maximum percentage of contaminated channels that are tolerated in the final output data for each considered window. Edit box **(13)** sets the noise threshold for labeling a channel as contaminated. + +## Display rejected and corrected regions + +Check checkbox **(14)** plots rejection results overlaid on the original data. This option is useful to visually assess the performance of a given ASR method. + +Additional parameters are accessible through the command line interface of the clean_artifacts function. + +## Additional documentation + +Makoto Miyakoshi wrote a page in the [wiki section](https://github.com/sccn/clean_rawdata/wiki) of this repository discussing ASR. + +# Version history +v0.34 and earlier - original versions + +v1.0 - new default values for some of the rejection tools, new GUI + +v2.0 - new improved GUI, compatibility with studies + +v2.1 - fix issue with 'distance' variable for burst detection + +v2.2 - fix history call for pop_clean_rawdata + +v2.3 - add maxmem to asr_calibrate to ensure reproducibility of results + +v2.4 - fixing issue with running function in parallel for Matlab 2020a + +v2.5 - move asr_calibrate out of the private folder so it can be used directly + +v2.6 - allowing to exclude channels and a variety of small bug fixes + +v2.7 - allowing to fuse channel rejection for datasets with same subject and session (STUDY processing) + +v2.8 - better error messages, and fix excluding channels (there was a rare crash) + +v2.9 - fix bug when ignoring channels and removing channels at the same time, fix plotting issue with vis_artifact + +v2.91 - add support for fractional sampling rate; fix too many splits with high sampling frequencies diff --git a/plugins/dipfit/index.md b/plugins/dipfit/index.md index 0480bf21..eb694244 100644 --- a/plugins/dipfit/index.md +++ b/plugins/dipfit/index.md @@ -3,6 +3,7 @@ layout: default title: dipfit long_title: dipfit parent: Plugins +has_children: true nav_order: 14 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/dipfit). diff --git a/plugins/eegstats/index.md b/plugins/eegstats/index.md index 22a3c6aa..b5350c75 100644 --- a/plugins/eegstats/index.md +++ b/plugins/eegstats/index.md @@ -3,6 +3,7 @@ layout: default title: eegstats long_title: eegstats parent: Plugins +has_children: true nav_order: 15 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/eegstats). diff --git a/plugins/fMRIb/index.md b/plugins/fMRIb/index.md index e28dca4c..014fef89 100644 --- a/plugins/fMRIb/index.md +++ b/plugins/fMRIb/index.md @@ -3,6 +3,7 @@ layout: default title: fMRIb long_title: fMRIb parent: Plugins +has_children: true nav_order: 20 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/fMRIb). diff --git a/plugins/firfilt/index.md b/plugins/firfilt/index.md index d826352a..5fb41a46 100644 --- a/plugins/firfilt/index.md +++ b/plugins/firfilt/index.md @@ -3,6 +3,7 @@ layout: default title: firfilt long_title: firfilt parent: Plugins +has_children: true nav_order: 27 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/firfilt). diff --git a/plugins/get_chanlocs/README.md b/plugins/get_chanlocs/README.md new file mode 100644 index 00000000..3ab51843 --- /dev/null +++ b/plugins/get_chanlocs/README.md @@ -0,0 +1,11 @@ +--- +layout: default +title: README +long_title: README +parent: get_chanlocs +grand_parent: Plugins +--- +# get_chanlocs +*get_chanlocs* performs electrode localization using 3D head image. It is an EEGLAB plug-in also using some functions from the Fieldtrip toolbox. + +See [documentation](https://github.com/sccn/get_chanlocs/wiki) for more information. diff --git a/plugins/get_chanlocs/index.md b/plugins/get_chanlocs/index.md index 01e091a4..8ee73c9b 100644 --- a/plugins/get_chanlocs/index.md +++ b/plugins/get_chanlocs/index.md @@ -3,245 +3,12 @@ layout: default title: get_chanlocs long_title: get_chanlocs parent: Plugins -categories: plugins has_children: true nav_order: 2 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/get_chanlocs). -

- -*get_chanlocs*: Compute 3-D electrode positions from a 3-D head image -==\> [Download the *get_chanlocs* User Guide](https://sccn.ucsd.edu/eeglab/download/Get_chanlocs_userguide.pdf) - -

- -![](Get_chanlocs.jpg) - -### What is *get_chanlocs*? - -The *get_chanlocs* EEGLAB plug-in is built on functions in -[FieldTrip](http://www.fieldtriptoolbox.org/) to locate 3-D electrode -positions from a 3-D scanned head image. Robert Oostenveld, originator -of the FieldTrip toolbox, alerted us in 2017 that he and his students in -Nijmegen had put functions into FieldTrip to compute positions of scalp -electrodes from the recorded 3-D images for one 3-D camera, the -[Structure scanner](https://structure.io/) mounted to an Apple iPad. -(Read [Homölle and Oostenveld -(2019)](https://doi.org/10.1016/j.jneumeth.2019.108378) and [notes on -the incorporated FieldTrip -functions](http://www.fieldtriptoolbox.org/tutorial/electrode/)). We at -SCCN have created an EEGLAB plug-in extension, *get_chanlocs*, to ease -the process of digitizing the positions of the electrodes from the -acquired 3-D and entering them into the *EEG.chanlocs* data structure -for use with other EEGLAB (plotting and source localization) functions -that require electrode position information. - -The major advantages of using get_chanlocs to measure -electrode positions are that: 1) the 3D image can be recorded quickly -(\<1 min), thereby saving precious subject time (and attention -capacity) better used to record EEG data! The researchers who have been -most enthusiastic to hear about get_chanlocs are those -collecting data from children and infants -- though even normal adult -participants must feel less cognitive capacity for the experimental -tasks after sitting, wearing the EEG montage, for 20 min while research -assistants record the 3D location of each scalp electrode. 2) The 3D -image connects the electrode locations to the head fidicuals in a very -concrete and permanent way; future improved head modeling will be -able to use the 3D head surface scans to fit to subject MR images or to -warp template head models to the actual subject head. 3) Unlike with -wand-based electrode localizing (neurologists call this electrode -'digitizing'), retaining the 3D head image allows rechecking the -electrode positions (e.g., if some human error occurs on first -readout). - -In brief, the process is as follows: - -Scanning the head surface: A 3-D head image (3-D head ‘scan’) is -acquired using the Structure scanner showing the subject wearing the -electrode cap; this image acquisition typically requires a minute or -less to perform. The resulting 3-D *.obj* image file is stored along -with the EEG data. *get_chanlocs* also supports use of *.obj* 3D image -files obtained using the [itSeez3D scanning app](https://itseez3d.com/), -which we have found to be easier to capture good 3D images with than the -Structure scanner's native app (Suggestion: Ask iSeez3D about a -non-commercial license). - -Localizing the electrodes in the 3D scan: When the data are to be -analyzed, the *get_chanlocs* plug-in, called from the Matlab command -line or EEGLAB menu, guides the data analyst through the process of -loading the recorded 3-D head image and then clicking on each of the -electrodes in the image in a pre-planned order to compute and store -their 3-D positions relative to 3 fidicual points on the head (bridge of -nose and ears). (Note: in future, this digitizing step may be automated -at some point in the future using a machine vision approach). The -electrode labels and their 3-D positions relative to the three skull -landmarks (‘fiducial points’) are then written directly into the dataset -*EEG.chanlocs* structure. During this process, a montage template -created for the montage used in the recorded experiment can be shown by -*get_chanlocs* as a convenient visual reference to speed and minimize -human error in the electrode digitization process. - -User Guide See the illustrated [*get_chanlocs* User -Guide](https://sccn.ucsd.edu/mediawiki/images/5/5f/Get_chanlocs_userguide.pdf) for details. - -Uses: Once the digitized electrode positions have been stored in -the dataset, further (scalp field plotting and source localization) -processes can use the digitized positions. - -Ethical considerations: An institutional review board (or -equivalent ethics review body) will likely consider head images as -personally identifiable information. Here is the IRB-approved [UCSD -subject Consent -form](/Media:Get_chanlocs_sampleConsent.pdf "wikilink"), allowing -participants to consent to different degrees of use of their 3D head -image, that we use at SCCN. - -### Why *get_chanlocs*? - -To achieve high-resolution EEG (effective) source imaging -requires (a) an accurate 3-D electrical head model, and (b) -accurate co-registration of the 3-D scalp electrode positions to the -head model. Several packages are available for fashioning a -geometrically accurate head model from an anatomic MR head image. We use -Zeynep Akalin Acar's [Neuromagnetic Forward problem Toolbox -(NFT)](https://sccn.ucsd.edu/wiki/NFT), which she is now coupling to the -first non-invasive, universally applicable method (SCALE) for estimating -individual skull conductivity from EEG data (Akalin Acar et al., 2016; -more news of this soon!). When a subject MR head image is *not* -available, equivalent dipole models for independent component brain -sources can use a template head model. Zeynep has shown that the dipole -position fitting process is more accurate when the template head is -warped to fit the actual 3-D positions of the electrodes -- IF these are -recorded accurately. This kind of warping is performed in Zeynep's -[**NFT** toolbox for EEGLAB](https://sccn.ucsd.edu/wiki/NFT). - -For too long, it has been expensive and/or time consuming (for both -experimenter and subject) to record (or 'digitize') the 3-D positions of -the scalp electrodes for each subject. In recent years, however, cameras -capable of recording images in 3-D have appeared and are now becoming -cheaper and more prevalent. Robert Oostenveld, originator of the -FieldTrip toolbox, alerted us that he and his students in Nijmegen had -added functions to FieldTrip to compute the 3-D positions of scalp -electrodes from scanned 3-D images acquired by one such camera, the -[Structure scanner](https://store.structure.io/store) mounted to an -Apple iPad. - -Recording the actual electrode positions in a 3-D head image minimizes -the time spent by the experimenter and subject on electrode position -recording during the recording session to a minute or less, while also -minimizing position digitizing system cost (to near $1000) and the space -required (to an iPad-sized scanner plus enough space to walk around the -seated subject holding the scanner). Digitizing the imaged electrode -positions during data preprocessing is made convenient in *get_chanlocs* -by using a montage template. In future, we anticipate an automated -template-matching app will reduce time required to simply checking the -results of an automated procedure. - -Required Resources ------------------- - -The *get_chanlocs* plug-in has been tested under Matlab 9.1 (R2016b) on -Windows 10 as well as OS X 10.10.5. Please provide feedback concerning -any incompatibilities, bugs, or feature suggestions using the [GitHub -issue tracker](https://github.com/cll008/get_chanlocs/issues/). - -Scanning software: In theory, any combination of 3-D scanning -hardware and software that produces a Wavefront OBJ file (.obj) with the -corresponding material texture library (.mtl) and JPEG (.jpg) files can -be used for the plug-in. *get_chanlocs* has only been tested with head -models produced by the [Structure Sensor -camera](https://store.structure.io/store) attached to an iPad Air (model -A1474). We use the default [calibrator -app](https://itunes.apple.com/us/app/structure-sensor-calibrator/id914275485?mt=8) -to align the Sensor camera and the tablet camera, and both the default -scanning software -([Scanner](https://itunes.apple.com/us/app/scanner-structure-sensor-sample/id891169722?mt=8)) -and a third-party scanning software ([itSeez3D](https://itseez3d.com/)). - -Scanner vs. itSeez3D: While the default scanning app -([Scanner](https://itunes.apple.com/us/app/scanner-structure-sensor-sample/id891169722?mt=8)) -is free and produces models that are of high enough quality for the -plug-in, we find the third-party app ([itSeez3D](https://itseez3d.com/)) -easier to use. It seems to be more robust, providing better tracking and -faster scans while minimizing the effects of adverse lighting -conditions. itSeez3D features a user friendly online account system for -accessing high-resolution models that are processed on their cloud -servers. Users may contact [itSeez3D](mailto:support@itseez3d.com) to -change processing parameters; for *get_chanlocs*, we found that -increasing the model polygon count beyond 400,000 results in longer -processing time without providing an appreciable increase in resolution. -Unfortunately, while scanning is free, exporting models (required for -*get_chanlocs*) has a [per export or subscription -cost](https://itseez3d.com/pricing.html). Please contact -[itSeez3D](mailto:support@itseez3d.com) regarding discounts for -educational institutions and other non-commercial purposes. - -Common Issues -------------- - -Incorrect units in resulting electrode locations: 3-D .obj model -units are estimated by relating the range of the recorded vertex -coordinates to an average-sized head: a captured model that is much -larger or smaller than average will cause errors. If your project -requires scanning an atypically-sized model (e.g. large bust scan -including ECG electrode, arm scan for EMG sleeve, etc.), manually set -obj.unit - [instead of using -*ft_determine_units*](https://github.com/cll008/get_chanlocs/blob/master/private/ft_convert_units.m#L86) -- to the correct unit used by your scanner {'m','dm','cm','mm'} to avoid -complications. - -Keyboard settings: Key presses are used to rotate 3-D head models -when selecting electrode locations in *get_chanlocs*. Key press -parameters should be adjusted per user discretion: macOS and Windows -systems have adjustable Keyboard Properties, where 'Repeat delay' and -'Repeat rate' may be modified. For some versions of macOS, long key -presses will instead bring up an accent selection menu; in such cases, -repeated single key presses can be used to control MATLAB, or users may -disable the accent selection menu and enable repeating keys by typing -(or pasting) the following in the terminal: -`defaults write -g ApplePressAndHoldEnabled -bool false` - -One way to circumvent this issue is to use the 3-D figure rotation tool -in MATLAB. First select the rotation tool, then mark electrodes by -clicking as normal; to rotate the model, hold the click after selecting -an electrode and drag the mouse; else, be sure to press 'r' to remove -points as necessary. - -Low resolution in head model: Models will have lowered resolution -in MATLAB due to how 3-D .obj are imported and handled, even if they -have show a reasonable resolution in other 3-D modeling software (e.g. -Paint 3D). Increase the polygon count of the model to circumvent this -issue (we recommend 400,000 uniform polygons for itSeez3D). - -Download --------- - -To download *get_chanlocs*, use the extension manager within EEGLAB. -Alternatively, plug-ins are available for manual download from the -[EEGLAB plug-in -list](https://sccn.ucsd.edu/eeglab/plugin_uploader/plugin_list_all.php). - -Revision History ----------------- - -Please check the [commit -history](https://github.com/cll008/get_chanlocs/commits/master) of the -plug-in's GitHub repository. - -*get_chanlocs* User Guide -------------------------- - -View/download the [*get_chanlocs* User -Guide](https://sccn.ucsd.edu/eeglab/download/Get_chanlocs_userguide.pdf) - -
- -Creation and documentation by: - -**Clement Lee**, Applications Programmer, SCCN/INC/UCSD, - -**Scott Makeig**, Director, SCCN/INC/UCSD, - -
+# get_chanlocs +*get_chanlocs* performs electrode localization using 3D head image. It is an EEGLAB plug-in also using some functions from the Fieldtrip toolbox. +See [documentation](https://github.com/sccn/get_chanlocs/wiki) for more information. diff --git a/plugins/groupSIFT/index.md b/plugins/groupSIFT/index.md index d1e662cb..2332ab43 100644 --- a/plugins/groupSIFT/index.md +++ b/plugins/groupSIFT/index.md @@ -3,6 +3,7 @@ layout: default title: groupSIFT long_title: groupSIFT parent: Plugins +has_children: true nav_order: 11 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/groupSIFT). diff --git a/plugins/imat/index.md b/plugins/imat/index.md index 3253ad16..be95646f 100644 --- a/plugins/imat/index.md +++ b/plugins/imat/index.md @@ -3,6 +3,7 @@ layout: default title: imat long_title: imat parent: Plugins +has_children: true nav_order: 23 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/imat). diff --git a/plugins/nsgportal/README.md b/plugins/nsgportal/README.md new file mode 100644 index 00000000..b9507177 --- /dev/null +++ b/plugins/nsgportal/README.md @@ -0,0 +1,27 @@ +--- +layout: default +title: README +long_title: README +parent: nsgportal +grand_parent: Plugins +--- +# EEGLAB on NSG and nsgportal +An Open EEGLAB Portal to High-Performance Computing: As of late 2018, EEGLAB scripts may now be run on high-performance computing resources via the freely available Neuroscience Gateway Portal to the NSF-sponsored [Comet supercomputer](https://ucsdnews.ucsd.edu/pressrelease/sdsc_to_double_comet_supercomputers_graphic_processor_count/) of the [San Diego Supercomputer Center](https://sdsc.edu/). The home page of the Neuroscience Gateway is shown below. NSG accounts are free and are not limited to US users, but the portal may only be used for non-commercial purposes (see the [NSG Terms of Use](http://www.nsgportal.org/policy.html)). + +![Screenshot 2024-07-11 at 14 45 33](https://github.com/user-attachments/assets/ddccba01-f5f4-4337-ae08-2fd4cf96f916) + +Like all (except personal!) supercomputers, Comet typically runs jobs in batch mode rather than in the interactive style of Matlab. However, Comet has all Matlab functions as well as EEGLAB functions and many plug-in extensions installed ready to be called from scripts. When a job submitted through the NSG portal is run, you will receive an email from NSG alerting you to download the results. This means that best uses of the Open EEGLAB Portal are for computationally intensive processes and/or for parallel, automated processing of large EEG studies. In the first category, we are now installing the most computationally intensive EEGLAB functions on comet: AMICA, RELICA, time/frequency analysis, SCALE-optimized individual subject head modeling via NFT, etc. We will give more information here about using these installed capabilities as they become available. + +To read a detailed overview of the Open EEGLAB Portal, browse a [conference paper submitted the IEEE/EMBS Neural Engineering Conference](https://sccn.ucsd.edu/~scott/pdf/Delorme_Open_EEGLAB_Portal_NER18.pdf) in San Francisco (March, 2019), and our later [Neuroimage](https://www.sciencedirect.com/science/article/pii/S1053811920302652) article. + +This respository contains the code for the EEGLAB plug-in interfacing the NSG portal through the REST API: nsgportal. The core functions of the plug-in were initially drafted by Arnaud Delorme and further modified and reworked by Ramon Martinez-Cancino, Dung Troung and Scott Makeig (The EEGLAB Team) with substantials contributions from the NSG team at the SDSC. + +# Versions +v1.0 - version used for Neuroimage 2020 article + +v2.0 - version used for Nov. 2020 NSG online tutorial (increased robustness and command line calls) + +v2.1 - Use EEGLAB on Expanse. Make job submission non-blocking + +**Explore the NSGPORTAL Wiki [here](https://github.com/sccn/nsgportal/wiki)** + diff --git a/plugins/nsgportal/index.md b/plugins/nsgportal/index.md index 90325270..5de7881b 100644 --- a/plugins/nsgportal/index.md +++ b/plugins/nsgportal/index.md @@ -3,19 +3,28 @@ layout: default title: nsgportal long_title: nsgportal parent: Plugins -categories: plugins has_children: true -nav_order: 1 +nav_order: 5 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/nsgportal). -# EEGLAB on NSG -An Open EEGLAB Portal to High-Performance Computing: As of late 2018, EEGLAB scripts may now be run on high-performance computing resources via the freely available Neuroscience Gateway Portal to the NSF-sponsored [Comet supercomputer](https://ucsdnews.ucsd.edu/pressrelease/sdsc_to_double_comet_supercomputers_graphic_processor_count/) of the [San Diego Supercomputer Center](https://sdsc.edu/). The home page of the Neuroscience Gateway is shown below. NSG accounts are free and are not limited to US users, but the portal may only be used for non-commercial purposes (see the [NSG Terms of Use](http://www.nsgportal.org/policy.html)). We also recommend you to watch the [NSG tutorial videos](https://www.nsgportal.org/tutorial.html). -
-drawing -
+# EEGLAB on NSG and nsgportal +An Open EEGLAB Portal to High-Performance Computing: As of late 2018, EEGLAB scripts may now be run on high-performance computing resources via the freely available Neuroscience Gateway Portal to the NSF-sponsored [Comet supercomputer](https://ucsdnews.ucsd.edu/pressrelease/sdsc_to_double_comet_supercomputers_graphic_processor_count/) of the [San Diego Supercomputer Center](https://sdsc.edu/). The home page of the Neuroscience Gateway is shown below. NSG accounts are free and are not limited to US users, but the portal may only be used for non-commercial purposes (see the [NSG Terms of Use](http://www.nsgportal.org/policy.html)). + +![Screenshot 2024-07-11 at 14 45 33](https://github.com/user-attachments/assets/ddccba01-f5f4-4337-ae08-2fd4cf96f916) Like all (except personal!) supercomputers, Comet typically runs jobs in batch mode rather than in the interactive style of Matlab. However, Comet has all Matlab functions as well as EEGLAB functions and many plug-in extensions installed ready to be called from scripts. When a job submitted through the NSG portal is run, you will receive an email from NSG alerting you to download the results. This means that best uses of the Open EEGLAB Portal are for computationally intensive processes and/or for parallel, automated processing of large EEG studies. In the first category, we are now installing the most computationally intensive EEGLAB functions on comet: AMICA, RELICA, time/frequency analysis, SCALE-optimized individual subject head modeling via NFT, etc. We will give more information here about using these installed capabilities as they become available. -To read a detailed overview of the Open EEGLAB Portal, browse a [conference paper submitted the IEEE/EMBS Neural Engineering Conference](https://sccn.ucsd.edu/~scott/pdf/Delorme_Open_EEGLAB_Portal_NER18.pdf) in San Francisco (March, 2019) and our [Neuroimage](https://www.sciencedirect.com/science/article/pii/S1053811920302652) article. +To read a detailed overview of the Open EEGLAB Portal, browse a [conference paper submitted the IEEE/EMBS Neural Engineering Conference](https://sccn.ucsd.edu/~scott/pdf/Delorme_Open_EEGLAB_Portal_NER18.pdf) in San Francisco (March, 2019), and our later [Neuroimage](https://www.sciencedirect.com/science/article/pii/S1053811920302652) article. + +This respository contains the code for the EEGLAB plug-in interfacing the NSG portal through the REST API: nsgportal. The core functions of the plug-in were initially drafted by Arnaud Delorme and further modified and reworked by Ramon Martinez-Cancino, Dung Troung and Scott Makeig (The EEGLAB Team) with substantials contributions from the NSG team at the SDSC. + +# Versions +v1.0 - version used for Neuroimage 2020 article + +v2.0 - version used for Nov. 2020 NSG online tutorial (increased robustness and command line calls) + +v2.1 - Use EEGLAB on Expanse. Make job submission non-blocking + +**Explore the NSGPORTAL Wiki [here](https://github.com/sccn/nsgportal/wiki)** diff --git a/plugins/nwbio/index.md b/plugins/nwbio/index.md index c28676fb..82c9522b 100644 --- a/plugins/nwbio/index.md +++ b/plugins/nwbio/index.md @@ -3,6 +3,7 @@ layout: default title: nwbio long_title: nwbio parent: Plugins +has_children: true nav_order: 12 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/nwbio). diff --git a/plugins/relica/index.md b/plugins/relica/index.md index 8789a9e5..ccc5756d 100644 --- a/plugins/relica/index.md +++ b/plugins/relica/index.md @@ -3,6 +3,7 @@ layout: default title: relica long_title: relica parent: Plugins +has_children: true nav_order: 21 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/relica). diff --git a/plugins/roiconnect/index.md b/plugins/roiconnect/index.md index f9a3c511..1aafe600 100644 --- a/plugins/roiconnect/index.md +++ b/plugins/roiconnect/index.md @@ -3,6 +3,7 @@ layout: default title: roiconnect long_title: roiconnect parent: Plugins +has_children: true nav_order: 8 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/roiconnect). diff --git a/plugins/std_dipoleDensity/index.md b/plugins/std_dipoleDensity/index.md index 36fec6bd..b146abea 100644 --- a/plugins/std_dipoleDensity/index.md +++ b/plugins/std_dipoleDensity/index.md @@ -3,6 +3,7 @@ layout: default title: std_dipoleDensity long_title: std_dipoleDensity parent: Plugins +has_children: true nav_order: 22 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/std_dipoleDensity). diff --git a/plugins/trimOutlier/index.md b/plugins/trimOutlier/index.md index 7496518d..5f2c732d 100644 --- a/plugins/trimOutlier/index.md +++ b/plugins/trimOutlier/index.md @@ -3,6 +3,7 @@ layout: default title: trimOutlier long_title: trimOutlier parent: Plugins +has_children: true nav_order: 10 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/trimOutlier). diff --git a/plugins/viewprops/index.md b/plugins/viewprops/index.md index e2c4ec79..b7e84bb0 100644 --- a/plugins/viewprops/index.md +++ b/plugins/viewprops/index.md @@ -3,6 +3,7 @@ layout: default title: viewprops long_title: viewprops parent: Plugins +has_children: true nav_order: 24 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/viewprops). diff --git a/plugins/zapline-plus/index.md b/plugins/zapline-plus/index.md index 00fd1c83..ceeb74cf 100644 --- a/plugins/zapline-plus/index.md +++ b/plugins/zapline-plus/index.md @@ -3,6 +3,7 @@ layout: default title: zapline-plus long_title: zapline-plus parent: Plugins +has_children: true nav_order: 18 --- To view the plugin source code, please visit the plugin's [GitHub repository](https://github.com/sccn/zapline-plus).