From 76d413ea767c4db45b6dd251b4914f5b8c45efda Mon Sep 17 00:00:00 2001 From: "Yu-Hang \"Maxin\" Tang" Date: Mon, 24 Jan 2022 15:31:30 -0800 Subject: [PATCH] Update to description. --- README.md | 14 ++------------ docs/index.md | 14 ++------------ mkdocs.yml | 11 +++++------ 3 files changed, 9 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index 580507cb..3fb8d7c3 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Welcome to the documentation of FunFact! +# FunFact: Build Your Own Tensor Decomposition Model in a Breeze [![CI](https://github.com/yhtang/FunFact/actions/workflows/ci.yml/badge.svg?branch=develop)](https://github.com/yhtang/FunFact/actions/workflows/ci.yml) [![Coverage](https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/yhtang/839011f3f7a6bab680b18cbd9a45d2d3/raw/coverage-develop.json)](https://badge.fury.io/py/funfact) @@ -6,17 +6,7 @@ [![Documentation Status](https://readthedocs.org/projects/funfact/badge/?version=latest)](https://funfact.readthedocs.io/en/latest/?badge=latest) [![License](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) -## Overview - -[FunFact](https://github.com/yhtang/FunFact.git) is a Python package that -enables flexible and concise expressions of tensor algebra through an Einstein -notation-based syntax. A particular emphasis is on automating the design of -matrix and tensor factorization models. It’s areas of applications include -quantum circuit synthesis, tensor decomposition, and neural network -compression. It is GPU- and parallelization-ready thanks to modern numerical -linear algebra backends such as JAX/TensorFlow and PyTorch. - +[FunFact](https://github.com/yhtang/FunFact.git) is a Python package for accelerating the design of matrix and tensor factorization algorithms. It features a powerful programming interface that augments the NumPy APIs with Einstein notations for writing very concise tensor expressions. Given an arbitrary forward calculation scheme, the package will solve the corresponding inverse problem using stochastic gradient descent, automatic differentiation, and multi-replica vectorization. Its application areas include quantum circuit synthesis, tensor decomposition, and neural network compression. It is GPU- and parallelization-ready thanks to modern numerical linear algebra backends such as JAX/TensorFlow and PyTorch. ## Quick start guide diff --git a/docs/index.md b/docs/index.md index 580507cb..3fb8d7c3 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,4 +1,4 @@ -# Welcome to the documentation of FunFact! +# FunFact: Build Your Own Tensor Decomposition Model in a Breeze [![CI](https://github.com/yhtang/FunFact/actions/workflows/ci.yml/badge.svg?branch=develop)](https://github.com/yhtang/FunFact/actions/workflows/ci.yml) [![Coverage](https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/yhtang/839011f3f7a6bab680b18cbd9a45d2d3/raw/coverage-develop.json)](https://badge.fury.io/py/funfact) @@ -6,17 +6,7 @@ [![Documentation Status](https://readthedocs.org/projects/funfact/badge/?version=latest)](https://funfact.readthedocs.io/en/latest/?badge=latest) [![License](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) -## Overview - -[FunFact](https://github.com/yhtang/FunFact.git) is a Python package that -enables flexible and concise expressions of tensor algebra through an Einstein -notation-based syntax. A particular emphasis is on automating the design of -matrix and tensor factorization models. It’s areas of applications include -quantum circuit synthesis, tensor decomposition, and neural network -compression. It is GPU- and parallelization-ready thanks to modern numerical -linear algebra backends such as JAX/TensorFlow and PyTorch. - +[FunFact](https://github.com/yhtang/FunFact.git) is a Python package for accelerating the design of matrix and tensor factorization algorithms. It features a powerful programming interface that augments the NumPy APIs with Einstein notations for writing very concise tensor expressions. Given an arbitrary forward calculation scheme, the package will solve the corresponding inverse problem using stochastic gradient descent, automatic differentiation, and multi-replica vectorization. Its application areas include quantum circuit synthesis, tensor decomposition, and neural network compression. It is GPU- and parallelization-ready thanks to modern numerical linear algebra backends such as JAX/TensorFlow and PyTorch. ## Quick start guide diff --git a/mkdocs.yml b/mkdocs.yml index 45e10b18..d6ad56bf 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,5 +1,4 @@ -site_name: "FunFact: Tensor Algebra and Deep Learning via Einstein Notations" -# site_description: "Automatic documentation from sources, for MkDocs." +site_name: "FunFact: Tensor Decomposition, Your Way" site_url: "https://funfact.readthedocs.io/" repo_url: "https://github.com/yhtang/FunFact/" edit_uri: "blob/develop/docs/" @@ -30,8 +29,9 @@ theme: - navigation.top plugins: - search - # - gen-files: - # scripts: + - gen-files: + scripts: + - docs/copy_readme.py # - docs/gen_ref_nav.py # - docs/gen_credits.py - section-index @@ -72,10 +72,9 @@ extra_javascript: - https://polyfill.io/v3/polyfill.min.js?features=es6 - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js nav: - - index.md + - Home: index.md - pages/installation.md - Examples: - # - Nonlinear matrix approximation: pages/examples/nma.md - Nonlinear matrix approximation: examples/matrix-approximation.ipynb - pages/cheatsheet.md - User Guide: