From 34d439045f483111f92b3516e71e98a0a3fbb72e Mon Sep 17 00:00:00 2001
From: haozhu233 adj_matrix (np.ndarray) – A 2D adjacency matrix to save. gene_names (np.ndarray) – A 1D numpy array with all the target gene names. – names. tf_names (np.ndarray, optional) – A 1D numpy array with all the TF gene names. – names. top_gene_percentile (int) – If this value is set, only the top k absolute values (values in the adjacency matrix will be kept. All the other) – zero. (will be set to) – values (values in the adjacency matrix will be kept. All the other) zero. (will be set to) genes (str, List(str)) – A single gene or a list of genes to inspect. k (int) – Top-k edges to inspect on each node. If k=-1, export all hops (str) – Number of hops to explore. We can either do a “2.5” or "2.5". ("1.5" hop travesal around selected genes. Default is) – "2.5". ("1.5" hop travesal around selected genes. Default is) genes (str, List(str)) – A single gene or a list of genes to inspect. k (int) – Top-k edges to inspect on each node. If k=-1, export all. hops (str) – Number of hops of the neighborhood to explore. Default "2.5". (is) – "2.5". (is) edge_widths (List) – The widths for edges for different edge width levels. – levels. plot_engine (str) – Choose which network plot engine to use. Default "pyvis". (is) – "pyvis". (is) **kwargs – Keyword arguments to be passed to x_0 (torch.FloatTensor) – Torch tensor for expression data. Rows are genes (cells and columns are) – genes (cells and columns are) t (torch.LongTensor) – Torch tensor for diffusion time steps. data_dir (str) – Parent directory to save and load the data. If the path exist (does not) – a (it will be created. Data will be saved in) – path. (subdirectory under the provided) – exist (does not) a (it will be created. Data will be saved in) path. (subdirectory under the provided) data_dir (str) – Parent directory to save and load the data. If the path exist (does not) – a (it will be created. Data will be saved in) – path. (subdirectory under the provided) – exist (does not) a (it will be created. Data will be saved in) path. (subdirectory under the provided) benchmark_data (str) – Benchmark datasets. Choose among “hESC”, “hHep”, "mDC" – "mESC" – "mHSC" – "mHSC-GM" – "mHSC-L". (and) – "mDC" "mESC" "mHSC" "mHSC-GM" "mHSC-L". (and) benchmark_setting (str) – Benchmark settings. Choose among “500_STRING”, "1000_STRING" – "500_Non-ChIP" – "1000_Non-ChIP" – "500_ChIP-seq" – "1000_STRING" "500_Non-ChIP" "1000_Non-ChIP" "500_ChIP-seq" data_dir (str) – Parent directory to save and load the data. If the path exist (does not) – a (it will be created. Data will be saved in) – path. (subdirectory under the provided) – exist (does not) a (it will be created. Data will be saved in) path. (subdirectory under the provided) Warning If GRN#
GRN#
GRN#
plot_pyvis
.RegDiffusionTrainerParameters:
regdiffusion.data.load_atlas_microgliaParameters:
regdiffusion.data.load_beelineParameters:
regdiffusion.data.load_hammond_microgliaParameters:
RegDiffusion
assign
is True
the optimizer must be created after
-the call to load_state_dict
.load_state_dict
unless
+get_swap_module_params_on_conversion()
is True
.
state_dict
match the keys returned by this module’s
state_dict()
function. Default: True
-assign (bool, optional) – whether to assign items in the state
-dictionary to their corresponding keys in the module instead
-of copying them inplace into the module’s current parameters and buffers.
-When False
, the properties of the tensors in the current
-module are preserved while when True
, the properties of the
-Tensors in the state dict are preserved.
-Default: False
assign (bool, optional) – When False
, the properties of the tensors
+in the current module are preserved while when True
, the
+properties of the Tensors in the state dict are preserved. The only
+exception is the requires_grad
field of
+Default: ``False`
Register a pre-hook for the load_state_dict()
method.
Register a pre-hook for the state_dict()
method.
These hooks will be called with arguments: self
, prefix
,
and keep_vars
before calling state_dict
on self
. The registered
hooks can be used to perform pre-processing before the state_dict
@@ -1375,7 +1374,7 @@
Set extra state contained in the loaded state_dict.
This function is called from load_state_dict()
to handle any extra state
found within the state_dict. Implement this function and a corresponding
diff --git a/_modules/index.html b/_modules/index.html
index 24c3b99..17e6326 100644
--- a/_modules/index.html
+++ b/_modules/index.html
@@ -37,7 +37,7 @@
-
+
diff --git a/_modules/regdiffusion/data/beeline.html b/_modules/regdiffusion/data/beeline.html
index 85569b0..b1a0233 100644
--- a/_modules/regdiffusion/data/beeline.html
+++ b/_modules/regdiffusion/data/beeline.html
@@ -37,7 +37,7 @@
-
+
diff --git a/_modules/regdiffusion/data/microglia.html b/_modules/regdiffusion/data/microglia.html
index 6d621fe..3a0e659 100644
--- a/_modules/regdiffusion/data/microglia.html
+++ b/_modules/regdiffusion/data/microglia.html
@@ -37,7 +37,7 @@
-
+
diff --git a/_modules/regdiffusion/evaluator.html b/_modules/regdiffusion/evaluator.html
index 29a99ff..c7516e7 100644
--- a/_modules/regdiffusion/evaluator.html
+++ b/_modules/regdiffusion/evaluator.html
@@ -37,7 +37,7 @@
-
+
diff --git a/_modules/regdiffusion/grn.html b/_modules/regdiffusion/grn.html
index b2dc4be..b8f9c86 100644
--- a/_modules/regdiffusion/grn.html
+++ b/_modules/regdiffusion/grn.html
@@ -37,7 +37,7 @@
-
+
diff --git a/_modules/regdiffusion/models/regdiffusion.html b/_modules/regdiffusion/models/regdiffusion.html
index cdbc502..c7b14b7 100644
--- a/_modules/regdiffusion/models/regdiffusion.html
+++ b/_modules/regdiffusion/models/regdiffusion.html
@@ -37,7 +37,7 @@
-
+
diff --git a/_modules/regdiffusion/trainer.html b/_modules/regdiffusion/trainer.html
index 5d8d850..ab23ab0 100644
--- a/_modules/regdiffusion/trainer.html
+++ b/_modules/regdiffusion/trainer.html
@@ -37,7 +37,7 @@
-
+
diff --git a/_sources/index.rst b/_sources/index.rst
index 0ad1576..5f46e20 100644
--- a/_sources/index.rst
+++ b/_sources/index.rst
@@ -35,7 +35,7 @@ consists of 4 components: the ``RegDiffusionTrainer`` class, the ``GRN`` class,
export or visualize local regions. For example, you can use the
``.visualize_local_neighborhood()`` to generate a similar plot as used in
the RegDiffusion paper. You can also extract the underlying adjacency list
- using the ``.extract_node_2hop_neighborhood()`` method.
+ using the ``.extract_local_neighborhood()`` method.
- ``GRNEvaluator``: The ground truth of regulatory relationship often exist as
list of edges but the values to be evaluated are often in adjacency matrix.
The ``GRNEvaluator`` class is designed to fill the gap. Right now it supports
@@ -43,6 +43,15 @@ consists of 4 components: the ``RegDiffusionTrainer`` class, the ``GRN`` class,
- ``data`` module: Right now, the ``data`` module includes quick access to BEELINE
benchmarks and our preprocessed single cell datasets on mouse microglia.
+Model Structure
+---------------
+
+RegDiffusion includes an innovative model structure to estimate the added noise. Here is an high level illustraction. Please refer to our paper for details.
+
+.. image:: https://github.com/TuftsBCB/RegDiffusion/blob/master/resources/regdiffusion_structure.png?raw=true
+ :width: 700
+ :alt: RegDiffusion Structure
+
Understanding the Inferred Networks
-----------------------------------
After the ``RegDiffusion`` model converges, what you get is simply an
@@ -53,7 +62,7 @@ the most. Check out the tutorials on the left side for how to perform a similar
network analysis like the one we did in the paper. We are also working on an
interactive tool to analyze saved GRN object.
-.. image:: https://github.com/TuftsBCB/RegDiffusion/blob/master/resources/apoe_reg.png?raw=true
+.. image:: https://github.com/TuftsBCB/RegDiffusion/blob/master/resources/apoe_net.png?raw=true
:width: 700
:alt: Inferred network around ApoE
diff --git a/_sources/quick_tour.md b/_sources/quick_tour.md
index b7cb1ba..be162ee 100644
--- a/_sources/quick_tour.md
+++ b/_sources/quick_tour.md
@@ -134,7 +134,7 @@ Here we have a fairly obvious bipartisan graph. It also makes sense to use some
>>> from sklearn.cluster import KMeans
>>> from node2vec import Node2Vec
>>>
->>> adj_table = grn.extract_node_2hop_neighborhood('HIST1H1D', 40)
+>>> adj_table = grn.extract_local_neighborhood('HIST1H1D', 40)
>>> nxg = nx.from_pandas_edgelist(adj_table)
>>>
>>> node2vec = Node2Vec(nxg, dimensions=64, walk_length=30, num_walks=200,
diff --git a/_static/basic.css b/_static/basic.css
index e760386..2af6139 100644
--- a/_static/basic.css
+++ b/_static/basic.css
@@ -4,7 +4,7 @@
*
* Sphinx stylesheet -- basic theme.
*
- * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
diff --git a/_static/doctools.js b/_static/doctools.js
index d06a71d..4d67807 100644
--- a/_static/doctools.js
+++ b/_static/doctools.js
@@ -4,7 +4,7 @@
*
* Base JavaScript utilities for all Sphinx HTML documentation.
*
- * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
diff --git a/_static/language_data.js b/_static/language_data.js
index 250f566..367b8ed 100644
--- a/_static/language_data.js
+++ b/_static/language_data.js
@@ -5,7 +5,7 @@
* This script contains the language-specific data used by searchtools.js,
* namely the list of stopwords, stemmer, scorer and splitter.
*
- * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@@ -13,7 +13,7 @@
var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
-/* Non-minified version is copied as a separate JS file, is available */
+/* Non-minified version is copied as a separate JS file, if available */
/**
* Porter Stemmer
diff --git a/_static/searchtools.js b/_static/searchtools.js
index 7918c3f..92da3f8 100644
--- a/_static/searchtools.js
+++ b/_static/searchtools.js
@@ -4,7 +4,7 @@
*
* Sphinx JavaScript utilities for the full-text search.
*
- * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@@ -99,7 +99,7 @@ const _displayItem = (item, searchTerms, highlightTerms) => {
.then((data) => {
if (data)
listItem.appendChild(
- Search.makeSearchSummary(data, searchTerms)
+ Search.makeSearchSummary(data, searchTerms, anchor)
);
// highlight search terms in the summary
if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js
@@ -116,8 +116,8 @@ const _finishSearch = (resultCount) => {
);
else
Search.status.innerText = _(
- `Search finished, found ${resultCount} page(s) matching the search query.`
- );
+ "Search finished, found ${resultCount} page(s) matching the search query."
+ ).replace('${resultCount}', resultCount);
};
const _displayNextItem = (
results,
@@ -137,6 +137,22 @@ const _displayNextItem = (
// search finished, update title and status message
else _finishSearch(resultCount);
};
+// Helper function used by query() to order search results.
+// Each input is an array of [docname, title, anchor, descr, score, filename].
+// Order the results by score (in opposite order of appearance, since the
+// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically.
+const _orderResultsByScoreThenName = (a, b) => {
+ const leftScore = a[4];
+ const rightScore = b[4];
+ if (leftScore === rightScore) {
+ // same score: sort alphabetically
+ const leftTitle = a[1].toLowerCase();
+ const rightTitle = b[1].toLowerCase();
+ if (leftTitle === rightTitle) return 0;
+ return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
+ }
+ return leftScore > rightScore ? 1 : -1;
+};
/**
* Default splitQuery function. Can be overridden in ``sphinx.search`` with a
@@ -160,13 +176,26 @@ const Search = {
_queued_query: null,
_pulse_status: -1,
- htmlToText: (htmlString) => {
+ htmlToText: (htmlString, anchor) => {
const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
- htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() });
+ for (const removalQuery of [".headerlinks", "script", "style"]) {
+ htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() });
+ }
+ if (anchor) {
+ const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`);
+ if (anchorContent) return anchorContent.textContent;
+
+ console.warn(
+ `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.`
+ );
+ }
+
+ // if anchor not specified or not found, fall back to main content
const docContent = htmlElement.querySelector('[role="main"]');
- if (docContent !== undefined) return docContent.textContent;
+ if (docContent) return docContent.textContent;
+
console.warn(
- "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template."
+ "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template."
);
return "";
},
@@ -239,16 +268,7 @@ const Search = {
else Search.deferQuery(query);
},
- /**
- * execute search (requires search index to be loaded)
- */
- query: (query) => {
- const filenames = Search._index.filenames;
- const docNames = Search._index.docnames;
- const titles = Search._index.titles;
- const allTitles = Search._index.alltitles;
- const indexEntries = Search._index.indexentries;
-
+ _parseQuery: (query) => {
// stem the search terms and add them to the correct list
const stemmer = new Stemmer();
const searchTerms = new Set();
@@ -284,16 +304,32 @@ const Search = {
// console.info("required: ", [...searchTerms]);
// console.info("excluded: ", [...excludedTerms]);
- // array of [docname, title, anchor, descr, score, filename]
- let results = [];
+ return [query, searchTerms, excludedTerms, highlightTerms, objectTerms];
+ },
+
+ /**
+ * execute search (requires search index to be loaded)
+ */
+ _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => {
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const titles = Search._index.titles;
+ const allTitles = Search._index.alltitles;
+ const indexEntries = Search._index.indexentries;
+
+ // Collect multiple result groups to be sorted separately and then ordered.
+ // Each is an array of [docname, title, anchor, descr, score, filename].
+ const normalResults = [];
+ const nonMainIndexResults = [];
+
_removeChildren(document.getElementById("search-progress"));
- const queryLower = query.toLowerCase();
+ const queryLower = query.toLowerCase().trim();
for (const [title, foundTitles] of Object.entries(allTitles)) {
- if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) {
+ if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) {
for (const [file, id] of foundTitles) {
let score = Math.round(100 * queryLower.length / title.length)
- results.push([
+ normalResults.push([
docNames[file],
titles[file] !== title ? `${titles[file]} > ${title}` : title,
id !== null ? "#" + id : "",
@@ -308,46 +344,47 @@ const Search = {
// search for explicit entries in index directives
for (const [entry, foundEntries] of Object.entries(indexEntries)) {
if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
- for (const [file, id] of foundEntries) {
- let score = Math.round(100 * queryLower.length / entry.length)
- results.push([
+ for (const [file, id, isMain] of foundEntries) {
+ const score = Math.round(100 * queryLower.length / entry.length);
+ const result = [
docNames[file],
titles[file],
id ? "#" + id : "",
null,
score,
filenames[file],
- ]);
+ ];
+ if (isMain) {
+ normalResults.push(result);
+ } else {
+ nonMainIndexResults.push(result);
+ }
}
}
}
// lookup as object
objectTerms.forEach((term) =>
- results.push(...Search.performObjectSearch(term, objectTerms))
+ normalResults.push(...Search.performObjectSearch(term, objectTerms))
);
// lookup as search terms in fulltext
- results.push(...Search.performTermsSearch(searchTerms, excludedTerms));
+ normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms));
// let the scorer override scores with a custom scoring function
- if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item)));
-
- // now sort the results by score (in opposite order of appearance, since the
- // display function below uses pop() to retrieve items) and then
- // alphabetically
- results.sort((a, b) => {
- const leftScore = a[4];
- const rightScore = b[4];
- if (leftScore === rightScore) {
- // same score: sort alphabetically
- const leftTitle = a[1].toLowerCase();
- const rightTitle = b[1].toLowerCase();
- if (leftTitle === rightTitle) return 0;
- return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
- }
- return leftScore > rightScore ? 1 : -1;
- });
+ if (Scorer.score) {
+ normalResults.forEach((item) => (item[4] = Scorer.score(item)));
+ nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item)));
+ }
+
+ // Sort each group of results by score and then alphabetically by name.
+ normalResults.sort(_orderResultsByScoreThenName);
+ nonMainIndexResults.sort(_orderResultsByScoreThenName);
+
+ // Combine the result groups in (reverse) order.
+ // Non-main index entries are typically arbitrary cross-references,
+ // so display them after other results.
+ let results = [...nonMainIndexResults, ...normalResults];
// remove duplicate search results
// note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
@@ -361,7 +398,12 @@ const Search = {
return acc;
}, []);
- results = results.reverse();
+ return results.reverse();
+ },
+
+ query: (query) => {
+ const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query);
+ const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms);
// for debugging
//Search.lastresults = results.slice(); // a copy
@@ -466,14 +508,18 @@ const Search = {
// add support for partial matches
if (word.length > 2) {
const escapedWord = _escapeRegExp(word);
- Object.keys(terms).forEach((term) => {
- if (term.match(escapedWord) && !terms[word])
- arr.push({ files: terms[term], score: Scorer.partialTerm });
- });
- Object.keys(titleTerms).forEach((term) => {
- if (term.match(escapedWord) && !titleTerms[word])
- arr.push({ files: titleTerms[word], score: Scorer.partialTitle });
- });
+ if (!terms.hasOwnProperty(word)) {
+ Object.keys(terms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: terms[term], score: Scorer.partialTerm });
+ });
+ }
+ if (!titleTerms.hasOwnProperty(word)) {
+ Object.keys(titleTerms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: titleTerms[term], score: Scorer.partialTitle });
+ });
+ }
}
// no match but word was a required one
@@ -496,9 +542,8 @@ const Search = {
// create the mapping
files.forEach((file) => {
- if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1)
- fileMap.get(file).push(word);
- else fileMap.set(file, [word]);
+ if (!fileMap.has(file)) fileMap.set(file, [word]);
+ else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word);
});
});
@@ -549,8 +594,8 @@ const Search = {
* search summary for a given text. keywords is a list
* of stemmed words.
*/
- makeSearchSummary: (htmlText, keywords) => {
- const text = Search.htmlToText(htmlText);
+ makeSearchSummary: (htmlText, keywords, anchor) => {
+ const text = Search.htmlToText(htmlText, anchor);
if (text === "") return null;
const textLower = text.toLowerCase();
diff --git a/data_module.html b/data_module.html
index 7575ab4..6fd1615 100644
--- a/data_module.html
+++ b/data_module.html
@@ -38,7 +38,7 @@
-
+
diff --git a/genindex.html b/genindex.html
index a22c5fc..5d4dfdf 100644
--- a/genindex.html
+++ b/genindex.html
@@ -37,7 +37,7 @@
-
+
diff --git a/index.html b/index.html
index 30fd41a..8f59d2e 100644
--- a/index.html
+++ b/index.html
@@ -38,7 +38,7 @@
-
+
@@ -331,6 +331,7 @@
.extract_local_neighborhood()
method.
GRNEvaluator
: The ground truth of regulatory relationship often exist as
list of edges but the values to be evaluated are often in adjacency matrix.
The GRNEvaluator
class is designed to fill the gap. Right now it supports
@@ -390,6 +391,12 @@
RegDiffusion includes an innovative model structure to estimate the added noise. Here is an high level illustraction. Please refer to our paper for details.
+ + +After the RegDiffusion
model converges, what you get is simply an
@@ -399,7 +406,8 @@