diff --git a/.anima/.gitignore b/.anima/.gitignore
new file mode 100644
index 00000000..5e465967
--- /dev/null
+++ b/.anima/.gitignore
@@ -0,0 +1 @@
+cache
\ No newline at end of file
diff --git a/src/documentation/.anima/.gitignore b/src/documentation/.anima/.gitignore
new file mode 100644
index 00000000..5e465967
--- /dev/null
+++ b/src/documentation/.anima/.gitignore
@@ -0,0 +1 @@
+cache
\ No newline at end of file
diff --git a/src/documentation/.gitignore b/src/documentation/.gitignore
new file mode 100644
index 00000000..f40fbd8b
--- /dev/null
+++ b/src/documentation/.gitignore
@@ -0,0 +1,5 @@
+_site
+.sass-cache
+.jekyll-cache
+.jekyll-metadata
+vendor
diff --git a/src/documentation/404.html b/src/documentation/404.html
new file mode 100644
index 00000000..086a5c9e
--- /dev/null
+++ b/src/documentation/404.html
@@ -0,0 +1,25 @@
+---
+permalink: /404.html
+layout: default
+---
+
+
+
+
+
404
+
+
Page not found :(
+
The requested page could not be found.
+
diff --git a/src/documentation/Gemfile b/src/documentation/Gemfile
new file mode 100644
index 00000000..fcc45e8a
--- /dev/null
+++ b/src/documentation/Gemfile
@@ -0,0 +1,35 @@
+source "https://rubygems.org"
+# Hello! This is where you manage which Jekyll version is used to run.
+# When you want to use a different version, change it below, save the
+# file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
+#
+# bundle exec jekyll serve
+#
+# This will help ensure the proper Jekyll version is running.
+# Happy Jekylling!
+gem "jekyll", "~> 4.3.3"
+# This is the default theme for new Jekyll sites. You may change this to anything you like.
+gem "minima", "~> 2.5"
+# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
+# uncomment the line below. To upgrade, run `bundle update github-pages`.
+# gem "github-pages", group: :jekyll_plugins
+# If you have any plugins, put them here!
+group :jekyll_plugins do
+ gem "jekyll-feed", "~> 0.12"
+ gem 'jekyll-seo-tag', '2.8.0'
+ gem 'jekyll-sitemap', '1.4.0'
+end
+
+# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem
+# and associated library.
+platforms :mingw, :x64_mingw, :mswin, :jruby do
+ gem "tzinfo", ">= 1", "< 3"
+ gem "tzinfo-data"
+end
+
+# Performance-booster for watching directories on Windows
+gem "wdm", "~> 0.1.1", :platforms => [:mingw, :x64_mingw, :mswin]
+
+# Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem
+# do not have a Java counterpart.
+gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby]
diff --git a/src/documentation/Gemfile.lock b/src/documentation/Gemfile.lock
new file mode 100644
index 00000000..451fefe5
--- /dev/null
+++ b/src/documentation/Gemfile.lock
@@ -0,0 +1,95 @@
+GEM
+ remote: https://rubygems.org/
+ specs:
+ addressable (2.8.6)
+ public_suffix (>= 2.0.2, < 6.0)
+ colorator (1.1.0)
+ concurrent-ruby (1.2.3)
+ em-websocket (0.5.3)
+ eventmachine (>= 0.12.9)
+ http_parser.rb (~> 0)
+ eventmachine (1.2.7)
+ ffi (1.16.3-x64-mingw-ucrt)
+ forwardable-extended (2.6.0)
+ google-protobuf (4.26.1-x64-mingw-ucrt)
+ rake (>= 13)
+ http_parser.rb (0.8.0)
+ i18n (1.14.5)
+ concurrent-ruby (~> 1.0)
+ jekyll (4.3.3)
+ addressable (~> 2.4)
+ colorator (~> 1.0)
+ em-websocket (~> 0.5)
+ i18n (~> 1.0)
+ jekyll-sass-converter (>= 2.0, < 4.0)
+ jekyll-watch (~> 2.0)
+ kramdown (~> 2.3, >= 2.3.1)
+ kramdown-parser-gfm (~> 1.0)
+ liquid (~> 4.0)
+ mercenary (>= 0.3.6, < 0.5)
+ pathutil (~> 0.9)
+ rouge (>= 3.0, < 5.0)
+ safe_yaml (~> 1.0)
+ terminal-table (>= 1.8, < 4.0)
+ webrick (~> 1.7)
+ jekyll-feed (0.17.0)
+ jekyll (>= 3.7, < 5.0)
+ jekyll-sass-converter (3.0.0)
+ sass-embedded (~> 1.54)
+ jekyll-seo-tag (2.8.0)
+ jekyll (>= 3.8, < 5.0)
+ jekyll-sitemap (1.4.0)
+ jekyll (>= 3.7, < 5.0)
+ jekyll-watch (2.2.1)
+ listen (~> 3.0)
+ kramdown (2.4.0)
+ rexml
+ kramdown-parser-gfm (1.1.0)
+ kramdown (~> 2.0)
+ liquid (4.0.4)
+ listen (3.9.0)
+ rb-fsevent (~> 0.10, >= 0.10.3)
+ rb-inotify (~> 0.9, >= 0.9.10)
+ mercenary (0.4.0)
+ minima (2.5.1)
+ jekyll (>= 3.5, < 5.0)
+ jekyll-feed (~> 0.9)
+ jekyll-seo-tag (~> 2.1)
+ pathutil (0.16.2)
+ forwardable-extended (~> 2.6)
+ public_suffix (5.0.5)
+ rake (13.2.1)
+ rb-fsevent (0.11.2)
+ rb-inotify (0.10.1)
+ ffi (~> 1.0)
+ rexml (3.2.6)
+ rouge (4.2.1)
+ safe_yaml (1.0.5)
+ sass-embedded (1.77.1-x64-mingw-ucrt)
+ google-protobuf (>= 3.25, < 5.0)
+ terminal-table (3.0.2)
+ unicode-display_width (>= 1.1.1, < 3)
+ tzinfo (2.0.6)
+ concurrent-ruby (~> 1.0)
+ tzinfo-data (1.2024.1)
+ tzinfo (>= 1.0.0)
+ unicode-display_width (2.5.0)
+ wdm (0.1.1)
+ webrick (1.8.1)
+
+PLATFORMS
+ x64-mingw-ucrt
+
+DEPENDENCIES
+ http_parser.rb (~> 0.6.0)
+ jekyll (~> 4.3.3)
+ jekyll-feed (~> 0.12)
+ jekyll-seo-tag (= 2.8.0)
+ jekyll-sitemap (= 1.4.0)
+ minima (~> 2.5)
+ tzinfo (>= 1, < 3)
+ tzinfo-data
+ wdm (~> 0.1.1)
+
+BUNDLED WITH
+ 2.5.10
diff --git a/src/documentation/_config.yml b/src/documentation/_config.yml
new file mode 100644
index 00000000..797fc35c
--- /dev/null
+++ b/src/documentation/_config.yml
@@ -0,0 +1,78 @@
+# ----
+# Site
+
+title: DiCRA Documents
+url:
+baseurl: /dicra-documents
+google_analytics_key:
+show_full_navigation: true
+
+logo:
+description:
+author:
+ name:
+ email:
+ twitter: # twitter username without the @ symbol
+social:
+ name: DiCRA Documents
+ links:
+
+
+# -----
+# Build
+
+timezone: Etc/UTC
+
+permalink: pretty
+
+plugins:
+ - jekyll-sitemap
+ - jekyll-seo-tag
+ - jekyll-feed
+
+exclude:
+ - Gemfile
+ - Gemfile.lock
+ - README.md
+ - LICENCE
+
+collections:
+ docs:
+ title: Documentation
+ permalink: /:path/
+ output: true
+
+defaults:
+ -
+ scope:
+ path: ""
+ values:
+ layout: default
+ -
+ scope:
+ path: ""
+ type: "docs"
+ values:
+ seo:
+ type: Article
+ _comments:
+ category: Group navigation links with this field
+ order: Used to sort links in the navigation
+ _options:
+ content:
+ width: 800
+ height: 2000
+ -
+ scope:
+ path: ""
+ type: "docs"
+ values:
+ _comments:
+ type: Marks the impact of this release
+
+# -----------
+# CloudCannon
+
+types:
+ - minor
+ - major
diff --git a/src/documentation/_docs/How to Use/analytics.md b/src/documentation/_docs/How to Use/analytics.md
new file mode 100644
index 00000000..04c12489
--- /dev/null
+++ b/src/documentation/_docs/How to Use/analytics.md
@@ -0,0 +1,10 @@
+---
+title: Site Analytics
+category: How to Use
+order: 6
+---
+In the Analytics page, the information related to website traffic like New Users,
+Sessions, View, Visitors Geography and Engagements etc. are available.
+
+
+ ![Image]({{ site.baseurl }}/images/image15.png)
diff --git a/src/documentation/_docs/How to Use/downloads.md b/src/documentation/_docs/How to Use/downloads.md
new file mode 100644
index 00000000..0d4770d7
--- /dev/null
+++ b/src/documentation/_docs/How to Use/downloads.md
@@ -0,0 +1,20 @@
+---
+title: Downloads
+category: How to Use
+order: 5
+---
+### You will be able to download all the layers used in the portal.
+
+![Image]({{ site.baseurl }}/images/image13.png)
+![Image]({{ site.baseurl }}/images/image14.png)
+
+#### Steps to download a layer
+
+
+1. Select the layer
+2. Select the date – applicable only if the layer has temporal data
+3. Select type - Raster / Vector
+4. Select boundary – applicable only if the user selects vector as the layer type.
+5. Enter Name – User who downloads the data
+6. Enter Email ID
+7. Select Usage Type – Commercial / Non-Commercial
diff --git a/src/documentation/_docs/How to Use/getting-started.md b/src/documentation/_docs/How to Use/getting-started.md
new file mode 100644
index 00000000..2a5b0fb5
--- /dev/null
+++ b/src/documentation/_docs/How to Use/getting-started.md
@@ -0,0 +1,12 @@
+---
+title: Getting Started
+category: How to Use
+order: 2
+---
+
+1. Open any installed browser on your desktop
+2. Go to https://dicra.undp.org.in/
+3. You will enter the Portal and dashboard will appear.
+Sync your Facebook contacts with ChatApp. Any of your Facebook friends with ChatApp accounts are automatically added to your contact list!
+
+![Image]({{ site.baseurl }}/images/image1.png)
diff --git a/src/documentation/_docs/How to Use/help.md b/src/documentation/_docs/How to Use/help.md
new file mode 100644
index 00000000..bd06dc65
--- /dev/null
+++ b/src/documentation/_docs/How to Use/help.md
@@ -0,0 +1,9 @@
+
diff --git a/src/documentation/_docs/How to Use/introduction.md b/src/documentation/_docs/How to Use/introduction.md
new file mode 100644
index 00000000..0b8e20c3
--- /dev/null
+++ b/src/documentation/_docs/How to Use/introduction.md
@@ -0,0 +1,18 @@
+---
+title: Introduction
+category: How to Use
+order: 1
+---
+
+Data in Climate Resilient Agriculture (DiCRA) is a collaborative digital public good
+which provides open access to key geospatial datasets pertinent to climate resilient
+agriculture. These datasets are curated and validated through collaborative efforts of
+hundreds of data scientists and citizen scientists across the world. The pattern
+detection and data insights emerging from DiCRA are aimed towards strengthening
+evidence-driven policy making for climate resilient food systems. DiCRA is guided by
+the digital public good principles of open access, open software, open code, and open
+APIs.
+
+The platform is facilitated by Government of Telangana and UNDP, in collaboration with
+Zero Huger Lab (Netherlands), JADS (Netherlands), ICRISAT, PJTSAU, and RICH. It is
+part of UNDP’s ‘Data for Policy’ initiative supported by Rockefeller Foundation.
diff --git a/src/documentation/_docs/How to Use/layers.md b/src/documentation/_docs/How to Use/layers.md
new file mode 100644
index 00000000..76358e46
--- /dev/null
+++ b/src/documentation/_docs/How to Use/layers.md
@@ -0,0 +1,85 @@
+---
+title: Layers
+category: How to Use
+order: 4
+---
+
+The main landing page has the option to select layers. Based on the selection you will be
+able to see visualisation of the spatial layer on top of the map. The layers are listed
+under the categories it belongs to.
+
+There are 3 icons provided against each layer name.
+
+![Image]({{ site.baseurl }}/images/image2.png)
+
+1. Visibility icon – Turn on/off the layer visibility
+2. Download icon – This will take you to the download page directly for the selected
+layer.
+3. Info icon – Brief description on the layer
+
+![Image]({{ site.baseurl }}/images/image3.png)
+
+### Map
+
+This is the major module where the data visualisation happens. Depends on the data
+layer, raster and vector visualisation is available.
+
+![Image]({{ site.baseurl }}/images/image4.png)
+
+### Map Controls
+
+There are four major map controls
+
+{: .box-success}
+
+1. Zoom-in, Zoom-out & Home
+
+ ![Image]({{ site.baseurl }}/images/image5.png)
+
+2. Admin boundary selection
+District, Mandal & Custom boundary selections are available. Based on the
+selection you will be able to see the change in boundary on the map.
+
+ ![Image]({{ site.baseurl }}/images/image6.png)
+
+3. ‘Custom’ option can be selected for drawing custom shape. It can be a farm
+boundary. You can search the location using the search bar.The custom drawing toolbox will be visible when you select ‘Custom’ option.
+
+ ![Image]({{ site.baseurl }}/images/image7.png)
+
+ * Select ‘Draw a polygon’ or ‘Draw a rectangle’.
+ * Draw a polygon on the map
+
+ ![Image]({{ site.baseurl }}/images/image8.png)
+
+ * A Detail section will get opened with relevant information about the layer & the shape selected.
+
+4. Layer type selection
+
+ Layer type toggle option is available between Raster & Vector (depends on the layers)
+
+ ![Image]({{ site.baseurl }}/images/image9.png)
+
+5. Base map style selection
+
+ You can select the base map as Dark or Satellite
+
+ ![Image]({{ site.baseurl }}/images/image10.png)
+
+
+### Details Section
+
+ When you click on any shape on the map, a ‘Detail’ section will get opened. It contains
+ the information such as
+
+ * Shape of the boundary
+ * District / Mandal name
+ * Area
+ * Value of a selected layer
+ * Description
+ * Source
+ * Trend (not applicable for all the layers)
+ * Etc.
+
+
+ ![Image]({{ site.baseurl }}/images/image12.png)
\ No newline at end of file
diff --git a/src/documentation/_docs/How to Use/menu.md b/src/documentation/_docs/How to Use/menu.md
new file mode 100644
index 00000000..450e7828
--- /dev/null
+++ b/src/documentation/_docs/How to Use/menu.md
@@ -0,0 +1,11 @@
+---
+title: Menu
+category: How to Use
+order: 3
+---
+### The portal has menu options as listed below.
+
+1. Layers
+2. Downloads
+
+3. Site Analytics
diff --git a/src/documentation/_docs/Installation Guid/backend-installation.md b/src/documentation/_docs/Installation Guid/backend-installation.md
new file mode 100644
index 00000000..bfd2ed54
--- /dev/null
+++ b/src/documentation/_docs/Installation Guid/backend-installation.md
@@ -0,0 +1,84 @@
+---
+title: Backend Installation
+category: Installation Guid
+order: 2
+---
+
+Operating System: Ubuntu Technology: Python Database : PostgreSQL with postgis extension
+
+### Running Backend Server using uvicorn (Development server)
+
+Steps:
+
+1. Clone Github Repo containing Backend API `git clone https://github.com/undpindia/dicra.git`
+2. Navigate to api folder `cd dicra/src/api`
+3. Install all required packages using the command `pip install -r requirements.txt`
+4. Create a file with the name `config.ini` inside `/config/`. The content of the file should be in the given format.
+
+
+{% highlight shell %}
+[paths]
+Temporaryfiles=temporary file path
+[azureblob]
+Accounturlazure=account url
+Containername=azure container name
+Filepath=parameter path in blob
+Lulcpath=lulc raster path
+[boundaries]
+Districtboundary=district_boundarypath
+[database]
+Sqlalchemyurl=postgresql://username:password@host/dbname
+[gunicorn]
+Accesslogpath=accesslogpath
+Errorlogpath=errorlogpath
+{% endhighlight %}
+
+ 5. Change `sqlalchemy.url` inside `alembic.ini`
+ 6. To run all database migrations `run alembic upgrade head`. It will create all the necessary tables
+ 7. Finally we can run the uvicorn development server using the command `python main.py`. It will start a uvicorn development server `http://localhost:5004`
+
+
+### Running Backend server using gunicorn systemmd managed unit service and Caddy
+Steps:
+
+1. Clone Github Repo containing Backend API `git clone https://github.com/undpindia/dicra.git`
+2. Create conda virtual environment using the command `conda create -n environmentname python=3`
+3. Activate the conda virtual environment using the command `conda activate envname`
+4. Install all the required packages using `pip install -r requirements.txt`
+5. Change User, Group, WorkingDirectory, Environment in the gunicorn.service file from the repo
+6. Create a gunicorn service by running `sudo nano /etc/systemd/system/gunicorn.service`
+7. Register the unit file `gunicorn.service` with Systemd by executing the following commands.
+
+{% highlight shell %}
+sudo systemctl daemon-reload
+sudo systemctl enable gunicorn.service
+sudo systemctl start gunicorn.service
+{% endhighlight %}
+
+The `systemctl enable` command will add our gunicorn service to resume running when the VM reboots.
+
+The `systemctl start` command will quickly start the gunicorn service and invokes the `ExecStart` command.
+
+To check the status of our gunicorn.service at any point of time, run the following command. `sudo systemctl status gunicorn.service`
+
+8. Install caddy 2 web server We can install caddy web server using the following command
+
+
+{% highlight shell %}
+echo "deb [trusted=yes] https://apt.fury.io/caddy/ /" | sudo tee -a /etc/apt/sources.list.d/caddy-fury.list
+sudo apt update
+sudo apt install -y caddy
+{% endhighlight %}
+
+We can check the caddy server status by running `systemctl status caddy`
+
+9. Now we will configure our Caddy 2 Web server to serve the FastAPI app running on port 8000 via a reverse proxy. To do so, lets edit the `/etc/caddy/Caddyfile` by running the following command. sudo nano `/etc/caddy/Caddyfile`
+
+Replace the contents of the Caddyfile and it should look like below
+
+{% highlight shell %}
+:80
+reverse_proxy 0.0.0.0:8000
+{% endhighlight %}
+
+Restart the caddy server by running the following command `sudo systemctl restart caddy`
diff --git a/src/documentation/_docs/Installation Guid/frontend-installation.md b/src/documentation/_docs/Installation Guid/frontend-installation.md
new file mode 100644
index 00000000..6a66f6cd
--- /dev/null
+++ b/src/documentation/_docs/Installation Guid/frontend-installation.md
@@ -0,0 +1,69 @@
+---
+title: Front End Installation
+category: Installation Guid
+order: 1
+---
+
+Operating System : Ubuntu
+
+Technology : ReactJS
+
+### Running Web App using Development Server
+
+Steps:
+
+1. Clone the github repo containing the frontend app `git clone https://github.com/undpindia/dicra.git`
+2. Navigate to web_portal folder `cd dicra/src/web_portal`
+3. Unzip package-lock.zip `unzip package-lock.zip`
+4. Create .env file and add REACT_APP_API_KEY=google_map_api_key
+5. Run the command `npm install` then it will install the required packages for running the application
+6. After the installation we can able to run the Web application in Development server using the command `npm start`
+
+
+### Running Web App Production Build In a web Server(Nginx)
+
+Steps:
+
+1. Clone the github repo containing the frontend app `git clone https://github.com/undpindia/dicra.git`
+2. Navigate to web_portal folder `cd dicra/src/web_portal`
+3. Unzip package-lock.zip `unzip package-lock.zip`
+4. Create .env file and add REACT_APP_API_KEY=google_map_api_key
+5. Run the command `npm install` then it will install the required packages for running the application.
+6. To create the production build we need to run the command `npm run build`. After the successful execution of the command it will create a folder called `build`, it contain all the build files
+7. Upload all the build files to nginx website deployment location
+8. Make changes to the web server configuration
+
+
+### Running Web App Production Build in Azure Blob
+Steps:
+
+1. Clone the github repo containing the frontend app `git clone https://github.com/undpindia/dicra.git`
+2. Navigate to web_portal folder `cd dicra/src/web_portal`
+3. Unzip package-lock.zip `unzip package-lock.zip`
+4. Create .env file and add REACT_APP_API_KEY=google_map_api_key
+5. Run the command `npm install` then it will install the required packages for running the application
+6. To create the production build we need to run the command `npm run build`. After the successful execution of the command it will create a folder called `build`, it contain all the build files
+7. To deploy react production build in Azure we need to create a storage account in Azure
+8. After the successful Deployment of the storage account Goto static website menu and enable static website option and fill index document name as `index.html` and leave error document path as empty (its optional). After saving this it will provide us a primary endpoint.
+
+ Screenshot of the same is given below. We can use the primary endpoint to test our react production build, deployed in the storage account and same can be done after the completion of step 6.
+
+9. After the completion of Static website enabling section it will create two containers called `$logs` & `$web`. And we need to upload the build files created in step 3 to `$web` container. We can upload Build files to $web container using multiple ways ie., Azure storage explorer, Visual studio extension Azure storage by Microsoft
+
+ Steps followed to upload build files to `$web` using Azure storage are
+
+ * Goto azure storage extension on visual studio code
+ * Sign In using Azure credentials
+ * Expand storage account we have created for web app deployment
+ * Under the Blob Container menu we can able to see `$web` container, right click on that, choose option Deploy to static website via azure storage, browse the build folder and complete the deployment.
+
+ After completing the above steps we can test the deployment using the primary endpoint
+
+ ![Image]({{ site.baseurl }}/images/image16.png)
+
+10. Create CDN profile for the front end Create cdn profiles on Home->CDN profiles menu. after successful deployment of cdn profile, create cdn endpoint at the end point creation menu specify name as any meaningful name .
+origin type : Storage static website origin hostname : the hostname generated by the url when completing the first step
+
+ WE CAN ADD CUSTOM DOMAIN FOR OUR CDN HERE
+
+11. Create Web application Firewall Policies (WAF) Goto home -> Web application Firewall Policies (WAF) menu Click create button On the basic tab under the project details section select AzureCDN Under the Instance details section select Policy mode select Prevention Add necessary custom rules under Custom rules tab Finally associate cdn end point at the Association tab and create the WAF policy
\ No newline at end of file
diff --git a/src/documentation/_docs/Installation Guid/pipeline.md b/src/documentation/_docs/Installation Guid/pipeline.md
new file mode 100644
index 00000000..552c9bd8
--- /dev/null
+++ b/src/documentation/_docs/Installation Guid/pipeline.md
@@ -0,0 +1,183 @@
+---
+title: Pipeline
+category: Installation Guid
+order: 3
+---
+### Installation using Docker Framework
+
+This document captures the steps involved in installing Docker and Setup
+Docker SWARM Cluster to install and configure DiCRA Data Automation
+Platform components.
+
+The setup would primarily address 4 node production configuration - 2
+nodes of Master and 2 nodes of Worker setup. The SWARM cluster is
+designed to scale up or scale down based on the workload requirements.
+
+### Installation Prerequisites
+
+#### Hardware Requirements
+
+The recommended hardware configuration for Installing DiCRA platform
+is Linux VMs with the below configurations:
+
+
+| VM Spec (Production) 4 Instances |
+|----:|-----:|
+| RAM | 16 GB|
+| vCPU| 4 Core|
+|Hard Disk|32 GB (System) + 100 GB (Application) – 2 mount points|
+|OS Ubuntu|Linux|
+
+
+#### Software Requirements
+
+For docker based installation of DiCRA components, we need the below
+software to be installed
+
+
+| Software & Version | Description | Installation File Information|
+|----:|-----:|-----:|
+| Docker version 18.0 or above | For Docker based deployment| Open Source Software|
+
+
+### Pre-Deployment Steps
+
+#### Docker Installation (with Internet Connection)
+
+Before starting the installation of DiCRA components, Docker should be
+installed in all the nodes where DiCRA components are to be deployed. All
+the steps in this document should be executed by a user with Root or sudo
+privileges.
+
+Login to each of the 4 Linux nodes and check if the docker is pre-installed
+and a supported version exists.
+
+Verification Step - Type the below command in the terminal.
+
+`docker –version`
+
+It should output the version of the Docker installed. It should be above
+18.0.
+
+`Docker version 20.10.16, build aa7e414`
+
+If the supported docker is not installed, then install docker using the steps
+mentioned in the docker setup instructions given in the below link or Refer
+Appendix as per the OS image you have selected for these VMs.
+
+
+[https://docs.docker.com/install/linux/docker-ce/ubuntu/#dock
+er-ee-customers/](https://docs.docker.com/install/linux/docker-ce/ubuntu/#dock
+er-ee-customers/)
+
+
+
+Note: The Docker Swarm needs to be started in one of the nodes
+(Designated Manager Node) and then all the other nodes (Designated
+Worker Nodes or Additional Manager nodes) should join the Swarm
+cluster before starting the deployment of the DiCRA Components.
+
+### Create Docker Swarm Manager
+
+Login to the Manager Node (VM1) , or select the first VM as the master
+node in the production VM instances.
+
+In the OS / Linux terminal type the below command:
+
+`#Initialize Swarm`
+
+`docker swarm init --advertise-addr `
+
+Use the `ifconfig` command to get the IP address of the manager
+node. If multiple IP addresses found use an IP address which is accessible
+from all the other 3 Linux nodes. All 4 Linux nodes of the production
+server should be able to communicate with each other. Use `ping ` in all the 3 nodes to confirm the communication and
+connectivity.
+
+Verification Step: #Listing the nodes participating in the swarm
+
+`docker node ls`
+
+### Join Docker Swarm as WORKER
+
+Tokens are required for the worker to join the swarm. To get the worker
+joining token, login to the Manager Swarm node (VM1) and in the
+terminal, type the below command:
+
+`docker swarm join-token -q worker`
+
+Then use the token given by the above command in the worker nodes to
+join the Swarm. Login to the Worker nodes (VM 2 & VM3) and in the
+terminal, type the below command:
+
+`docker swarm join --token <> <>:2377`
+
+Verification Step: In the Manager Node (VM1) terminal, type the below
+command. #Listing the nodes participating in the swarm
+
+`docker node ls`
+
+### Join Docker Swarm as MANAGER
+
+Tokens are required for the manager to join the swarm. To get the
+manager joining token, login to the Manager Swarm node (VM1) and in
+the terminal, type the below command:
+
+`docker swarm join-token -q manager`
+
+Then use the token given by the above command to join the Swarm. Login
+to the Manager (VM4) node and type the below command in the terminal.
+
+`docker swarm join --token <> <>:2377`
+
+Verification Step: In the Manager Node (VM1) terminal, type the below
+command. # Listing the nodes participating in the swarm
+5
+
+`docker node ls`
+
+The above command should list 4 nodes, VM1 & VM4 would be listed as
+Manager / Leader nodes and VM2 & VM3 should be listed as worker
+nodes.
+
+#### Docker Swarm Visualiser
+
+Install a SWARM visualizer to manage the containers with in swarm
+cluster
+
+`docker run -it --rm \`
+
+`--name swarmpit-installer \`
+
+`--volume /var/run/docker.sock:/var/run/docker.sock \`
+
+`swarmpit/install:1.9`
+
+This will install swarmpit visualizer, provide a username and password for
+swarmpit access during the setup.
+
+### Verify the swarm visualiser
+
+Access the Swarmpit web console at http://localhost:888.Use the
+admin credentials configured during the setup. Below screenshot is from
+the single node setup for development and testing.
+
+![Image]({{ site.baseurl }}/images/image18.png)
+
+### Fail-over Scenarios
+
+#### Manager Fail-over Scenario
+
+All the Managers need to have access to the yml files and docker image
+files, so that they can restart any of the service. Ensure that all the
+relevant yml files are copied or the shared drive is mounted in both
+master nodes. Both master nodes should be configured to the same
+docker registry to pull the relevant container images.
+
+#### Worker Node Fail-over scenario
+
+if any of the worker nodes fails, then the manager node will take care of
+starting additional services in the currently available nodes based on the
+load distribution among them. No manual intervention is required in this
+case.
\ No newline at end of file
diff --git a/src/documentation/_docs/_defaults.md b/src/documentation/_docs/_defaults.md
new file mode 100644
index 00000000..7a4919ad
--- /dev/null
+++ b/src/documentation/_docs/_defaults.md
@@ -0,0 +1,5 @@
+---
+title:
+category:
+order: 1
+---
diff --git a/src/documentation/_layouts/default.html b/src/documentation/_layouts/default.html
new file mode 100644
index 00000000..8e140ddf
--- /dev/null
+++ b/src/documentation/_layouts/default.html
@@ -0,0 +1,77 @@
+
+
+
+
+
+
+
+ {% seo %}
+ {% feed_meta %}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Terms of Service and Use
+
+The DiCRA Platform is an open data platform managed by the United Nations Development Programme. These Terms of Service (hereafter ‘Terms’ or ‘these Terms’) describe how DiCRA is managed and how the platform should be used. UNDP will update these Terms as needed, and will post notice of significant updates on our GitHub Page and through the DiCRA Platform. All organizations and individuals using this platform are bound by these Terms. If you do not agree with the Terms, you should discontinue use of DiCRA. If you have any questions or comments about these Terms or DiCRA, please leave a comment on the Discussions tab of our GitHub repository or send an email to acceleratorlab.in@undp.org
+
+DiCRA Platform encourages collaborations from the open source community to improve our datasets, analytics, and software. All the contributors have to adhere to the Code of Conduct detailed [here](https://github.com/undpindia/dicra/blob/main/CODE_OF_CONDUCT.md).
+
+#### Disclaimer
+
+The DiCRA online platform and its content are made available by the UNDP team. The content providers make no warranty, express or implied, nor assume any legal liability or responsibility for the accuracy, completeness, or usefulness of this content or its online services, nor represent that its use may not potentially infringe property rights. UNDP makes no warranty that DiCRA online services and its content will be uninterrupted or error-free, nor that any defects will be corrected, nor that its online services or content will be free of viruses or other harmful components. UNDP assumes no liability for possible damages or implications which occur by direct or indirect use of DiCRA services or content.
+
+#### Privacy
+
+User contact details are only shared with the administrator of the DiCRA Platform if the user needs to download datasets.
+
+UNDP upholds the highest standard of data protection for the personal data of DiCRA users and organization administrators. In case such personal data is exposed, UNDP will notify all affected individuals and remedy the incident.
+
+UNDP continually seeks to understand the behavior of users on the DiCRA platform in order to make improvements. To do so, UNDP uses third-party analytics services, such as Google Analytics. This service use cookies stored on users’ devices to send encrypted information to Google Analytics about how users arrived at DiCRA, what pages they visited on DiCRA, and their actions within those pages. UNDP does not send identifying information (including names, usernames, or email addresses) to Google Analytics. Google Analytics’ use of the data collected from the DiCRA platform is governed by their respective Terms of Use. If you would like to disable the tracking described above, you can install the Google Analytics Opt-out Browser Add-on to disable Google Analytics tracking. The data collected by these tracking systems will be retained indefinitely in order to understand how user behavior is changing over time.
+
+#### Data Licensing
+
+Each of the datasets made available through the DiCRA PLatform have their own data use licensing and citation requirement. More information regarding this is made available at the following [link](https://github.com/undpindia/dicra/blob/main/analytics/datasets_metadata.pdf). By downloading the data through DiCRA Platform, the user acknowledges the individual data source's data use agreements and have to cite the data source as per the citation format of the dataset.
\ No newline at end of file
diff --git a/src/documentation/robots.txt b/src/documentation/robots.txt
new file mode 100644
index 00000000..e31728c8
--- /dev/null
+++ b/src/documentation/robots.txt
@@ -0,0 +1,7 @@
+---
+layout: null
+sitemap: false
+---
+User-agent: *
+Sitemap: {{ site.url }}/sitemap.xml
+Disallow: /search/
\ No newline at end of file
diff --git a/src/documentation/scripts/lunr.min.js b/src/documentation/scripts/lunr.min.js
new file mode 100644
index 00000000..884d1f2a
--- /dev/null
+++ b/src/documentation/scripts/lunr.min.js
@@ -0,0 +1,7 @@
+/**
+ * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 0.7.0
+ * Copyright (C) 2016 Oliver Nightingale
+ * MIT Licensed
+ * @license
+ */
+!function(){var t=function(e){var n=new t.Index;return n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};t.version="0.7.0",t.utils={},t.utils.warn=function(t){return function(e){t.console&&console.warn&&console.warn(e)}}(this),t.utils.asString=function(t){return void 0===t||null===t?"":t.toString()},t.EventEmitter=function(){this.events={}},t.EventEmitter.prototype.addListener=function(){var t=Array.prototype.slice.call(arguments),e=t.pop(),n=t;if("function"!=typeof e)throw new TypeError("last argument must be a function");n.forEach(function(t){this.hasHandler(t)||(this.events[t]=[]),this.events[t].push(e)},this)},t.EventEmitter.prototype.removeListener=function(t,e){if(this.hasHandler(t)){var n=this.events[t].indexOf(e);this.events[t].splice(n,1),this.events[t].length||delete this.events[t]}},t.EventEmitter.prototype.emit=function(t){if(this.hasHandler(t)){var e=Array.prototype.slice.call(arguments,1);this.events[t].forEach(function(t){t.apply(void 0,e)})}},t.EventEmitter.prototype.hasHandler=function(t){return t in this.events},t.tokenizer=function(e){return arguments.length&&null!=e&&void 0!=e?Array.isArray(e)?e.map(function(e){return t.utils.asString(e).toLowerCase()}):e.toString().trim().toLowerCase().split(t.tokenizer.seperator):[]},t.tokenizer.seperator=/[\s\-]+/,t.tokenizer.load=function(t){var e=this.registeredFunctions[t];if(!e)throw new Error("Cannot load un-registered function: "+t);return e},t.tokenizer.label="default",t.tokenizer.registeredFunctions={"default":t.tokenizer},t.tokenizer.registerFunction=function(e,n){n in this.registeredFunctions&&t.utils.warn("Overwriting existing tokenizer: "+n),e.label=n,this.registeredFunctions[n]=e},t.Pipeline=function(){this._stack=[]},t.Pipeline.registeredFunctions={},t.Pipeline.registerFunction=function(e,n){n in this.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+n),e.label=n,t.Pipeline.registeredFunctions[e.label]=e},t.Pipeline.warnIfFunctionNotRegistered=function(e){var n=e.label&&e.label in this.registeredFunctions;n||t.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},t.Pipeline.load=function(e){var n=new t.Pipeline;return e.forEach(function(e){var i=t.Pipeline.registeredFunctions[e];if(!i)throw new Error("Cannot load un-registered function: "+e);n.add(i)}),n},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(e){t.Pipeline.warnIfFunctionNotRegistered(e),this._stack.push(e)},this)},t.Pipeline.prototype.after=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._stack.indexOf(e);if(-1==i)throw new Error("Cannot find existingFn");i+=1,this._stack.splice(i,0,n)},t.Pipeline.prototype.before=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._stack.indexOf(e);if(-1==i)throw new Error("Cannot find existingFn");this._stack.splice(i,0,n)},t.Pipeline.prototype.remove=function(t){var e=this._stack.indexOf(t);-1!=e&&this._stack.splice(e,1)},t.Pipeline.prototype.run=function(t){for(var e=[],n=t.length,i=this._stack.length,r=0;n>r;r++){for(var o=t[r],s=0;i>s&&(o=this._stack[s](o,r,t),void 0!==o&&""!==o);s++);void 0!==o&&""!==o&&e.push(o)}return e},t.Pipeline.prototype.reset=function(){this._stack=[]},t.Pipeline.prototype.toJSON=function(){return this._stack.map(function(e){return t.Pipeline.warnIfFunctionNotRegistered(e),e.label})},t.Vector=function(){this._magnitude=null,this.list=void 0,this.length=0},t.Vector.Node=function(t,e,n){this.idx=t,this.val=e,this.next=n},t.Vector.prototype.insert=function(e,n){this._magnitude=void 0;var i=this.list;if(!i)return this.list=new t.Vector.Node(e,n,i),this.length++;if(en.idx?n=n.next:(i+=e.val*n.val,e=e.next,n=n.next);return i},t.Vector.prototype.similarity=function(t){return this.dot(t)/(this.magnitude()*t.magnitude())},t.SortedSet=function(){this.length=0,this.elements=[]},t.SortedSet.load=function(t){var e=new this;return e.elements=t,e.length=t.length,e},t.SortedSet.prototype.add=function(){var t,e;for(t=0;t1;){if(o===t)return r;t>o&&(e=r),o>t&&(n=r),i=n-e,r=e+Math.floor(i/2),o=this.elements[r]}return o===t?r:-1},t.SortedSet.prototype.locationFor=function(t){for(var e=0,n=this.elements.length,i=n-e,r=e+Math.floor(i/2),o=this.elements[r];i>1;)t>o&&(e=r),o>t&&(n=r),i=n-e,r=e+Math.floor(i/2),o=this.elements[r];return o>t?r:t>o?r+1:void 0},t.SortedSet.prototype.intersect=function(e){for(var n=new t.SortedSet,i=0,r=0,o=this.length,s=e.length,a=this.elements,h=e.elements;;){if(i>o-1||r>s-1)break;a[i]!==h[r]?a[i]h[r]&&r++:(n.add(a[i]),i++,r++)}return n},t.SortedSet.prototype.clone=function(){var e=new t.SortedSet;return e.elements=this.toArray(),e.length=e.elements.length,e},t.SortedSet.prototype.union=function(t){var e,n,i;this.length>=t.length?(e=this,n=t):(e=t,n=this),i=e.clone();for(var r=0,o=n.toArray();rp;p++)c[p]===a&&d++;h+=d/f*l.boost}}this.tokenStore.add(a,{ref:o,tf:h})}n&&this.eventEmitter.emit("add",e,this)},t.Index.prototype.remove=function(t,e){var n=t[this._ref],e=void 0===e?!0:e;if(this.documentStore.has(n)){var i=this.documentStore.get(n);this.documentStore.remove(n),i.forEach(function(t){this.tokenStore.remove(t,n)},this),e&&this.eventEmitter.emit("remove",t,this)}},t.Index.prototype.update=function(t,e){var e=void 0===e?!0:e;this.remove(t,!1),this.add(t,!1),e&&this.eventEmitter.emit("update",t,this)},t.Index.prototype.idf=function(t){var e="@"+t;if(Object.prototype.hasOwnProperty.call(this._idfCache,e))return this._idfCache[e];var n=this.tokenStore.count(t),i=1;return n>0&&(i=1+Math.log(this.documentStore.length/n)),this._idfCache[e]=i},t.Index.prototype.search=function(e){var n=this.pipeline.run(this.tokenizerFn(e)),i=new t.Vector,r=[],o=this._fields.reduce(function(t,e){return t+e.boost},0),s=n.some(function(t){return this.tokenStore.has(t)},this);if(!s)return[];n.forEach(function(e,n,s){var a=1/s.length*this._fields.length*o,h=this,u=this.tokenStore.expand(e).reduce(function(n,r){var o=h.corpusTokens.indexOf(r),s=h.idf(r),u=1,l=new t.SortedSet;if(r!==e){var c=Math.max(3,r.length-e.length);u=1/Math.log(c)}o>-1&&i.insert(o,a*s*u);for(var f=h.tokenStore.get(r),d=Object.keys(f),p=d.length,v=0;p>v;v++)l.add(f[d[v]].ref);return n.union(l)},new t.SortedSet);r.push(u)},this);var a=r.reduce(function(t,e){return t.intersect(e)});return a.map(function(t){return{ref:t,score:i.similarity(this.documentVector(t))}},this).sort(function(t,e){return e.score-t.score})},t.Index.prototype.documentVector=function(e){for(var n=this.documentStore.get(e),i=n.length,r=new t.Vector,o=0;i>o;o++){var s=n.elements[o],a=this.tokenStore.get(s)[e].tf,h=this.idf(s);r.insert(this.corpusTokens.indexOf(s),a*h)}return r},t.Index.prototype.toJSON=function(){return{version:t.version,fields:this._fields,ref:this._ref,tokenizer:this.tokenizerFn.label,documentStore:this.documentStore.toJSON(),tokenStore:this.tokenStore.toJSON(),corpusTokens:this.corpusTokens.toJSON(),pipeline:this.pipeline.toJSON()}},t.Index.prototype.use=function(t){var e=Array.prototype.slice.call(arguments,1);e.unshift(this),t.apply(this,e)},t.Store=function(){this.store={},this.length=0},t.Store.load=function(e){var n=new this;return n.length=e.length,n.store=Object.keys(e.store).reduce(function(n,i){return n[i]=t.SortedSet.load(e.store[i]),n},{}),n},t.Store.prototype.set=function(t,e){this.has(t)||this.length++,this.store[t]=e},t.Store.prototype.get=function(t){return this.store[t]},t.Store.prototype.has=function(t){return t in this.store},t.Store.prototype.remove=function(t){this.has(t)&&(delete this.store[t],this.length--)},t.Store.prototype.toJSON=function(){return{store:this.store,length:this.length}},t.stemmer=function(){var t={ational:"ate",tional:"tion",enci:"ence",anci:"ance",izer:"ize",bli:"ble",alli:"al",entli:"ent",eli:"e",ousli:"ous",ization:"ize",ation:"ate",ator:"ate",alism:"al",iveness:"ive",fulness:"ful",ousness:"ous",aliti:"al",iviti:"ive",biliti:"ble",logi:"log"},e={icate:"ic",ative:"",alize:"al",iciti:"ic",ical:"ic",ful:"",ness:""},n="[^aeiou]",i="[aeiouy]",r=n+"[^aeiouy]*",o=i+"[aeiou]*",s="^("+r+")?"+o+r,a="^("+r+")?"+o+r+"("+o+")?$",h="^("+r+")?"+o+r+o+r,u="^("+r+")?"+i,l=new RegExp(s),c=new RegExp(h),f=new RegExp(a),d=new RegExp(u),p=/^(.+?)(ss|i)es$/,v=/^(.+?)([^s])s$/,g=/^(.+?)eed$/,m=/^(.+?)(ed|ing)$/,y=/.$/,S=/(at|bl|iz)$/,w=new RegExp("([^aeiouylsz])\\1$"),k=new RegExp("^"+r+i+"[^aeiouwxy]$"),x=/^(.+?[^aeiou])y$/,b=/^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/,E=/^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/,F=/^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/,_=/^(.+?)(s|t)(ion)$/,z=/^(.+?)e$/,O=/ll$/,P=new RegExp("^"+r+i+"[^aeiouwxy]$"),T=function(n){var i,r,o,s,a,h,u;if(n.length<3)return n;if(o=n.substr(0,1),"y"==o&&(n=o.toUpperCase()+n.substr(1)),s=p,a=v,s.test(n)?n=n.replace(s,"$1$2"):a.test(n)&&(n=n.replace(a,"$1$2")),s=g,a=m,s.test(n)){var T=s.exec(n);s=l,s.test(T[1])&&(s=y,n=n.replace(s,""))}else if(a.test(n)){var T=a.exec(n);i=T[1],a=d,a.test(i)&&(n=i,a=S,h=w,u=k,a.test(n)?n+="e":h.test(n)?(s=y,n=n.replace(s,"")):u.test(n)&&(n+="e"))}if(s=x,s.test(n)){var T=s.exec(n);i=T[1],n=i+"i"}if(s=b,s.test(n)){var T=s.exec(n);i=T[1],r=T[2],s=l,s.test(i)&&(n=i+t[r])}if(s=E,s.test(n)){var T=s.exec(n);i=T[1],r=T[2],s=l,s.test(i)&&(n=i+e[r])}if(s=F,a=_,s.test(n)){var T=s.exec(n);i=T[1],s=c,s.test(i)&&(n=i)}else if(a.test(n)){var T=a.exec(n);i=T[1]+T[2],a=c,a.test(i)&&(n=i)}if(s=z,s.test(n)){var T=s.exec(n);i=T[1],s=c,a=f,h=P,(s.test(i)||a.test(i)&&!h.test(i))&&(n=i)}return s=O,a=c,s.test(n)&&a.test(n)&&(s=y,n=n.replace(s,"")),"y"==o&&(n=o.toLowerCase()+n.substr(1)),n};return T}(),t.Pipeline.registerFunction(t.stemmer,"stemmer"),t.generateStopWordFilter=function(t){var e=t.reduce(function(t,e){return t[e]=e,t},{});return function(t){return t&&e[t]!==t?t:void 0}},t.stopWordFilter=t.generateStopWordFilter(["a","able","about","across","after","all","almost","also","am","among","an","and","any","are","as","at","be","because","been","but","by","can","cannot","could","dear","did","do","does","either","else","ever","every","for","from","get","got","had","has","have","he","her","hers","him","his","how","however","i","if","in","into","is","it","its","just","least","let","like","likely","may","me","might","most","must","my","neither","no","nor","not","of","off","often","on","only","or","other","our","own","rather","said","say","says","she","should","since","so","some","than","that","the","their","them","then","there","these","they","this","tis","to","too","twas","us","wants","was","we","were","what","when","where","which","while","who","whom","why","will","with","would","yet","you","your"]),t.Pipeline.registerFunction(t.stopWordFilter,"stopWordFilter"),t.trimmer=function(t){return t.replace(/^\W+/,"").replace(/\W+$/,"")},t.Pipeline.registerFunction(t.trimmer,"trimmer"),t.TokenStore=function(){this.root={docs:{}},this.length=0},t.TokenStore.load=function(t){var e=new this;return e.root=t.root,e.length=t.length,e},t.TokenStore.prototype.add=function(t,e,n){var n=n||this.root,i=t.charAt(0),r=t.slice(1);return i in n||(n[i]={docs:{}}),0===r.length?(n[i].docs[e.ref]=e,void(this.length+=1)):this.add(r,e,n[i])},t.TokenStore.prototype.has=function(t){if(!t)return!1;for(var e=this.root,n=0;n= 0) {
+ break;
+ }
+
+ match = content.toLowerCase().indexOf(parts[i].toLowerCase());
+ matchLength = parts[i].length;
+ }
+
+ // Create preview
+ if (match >= 0) {
+ var start = match - (previewLength / 2),
+ end = start > 0 ? match + matchLength + (previewLength / 2) : previewLength;
+
+ preview = content.substring(start, end).trim();
+
+ if (start > 0) {
+ preview = "..." + preview;
+ }
+
+ if (end < content.length) {
+ preview = preview + "...";
+ }
+
+ // Highlight query parts
+ preview = preview.replace(new RegExp("(" + parts.join("|") + ")", "gi"), "$1");
+ } else {
+ // Use start of content if no match found
+ preview = content.substring(0, previewLength).trim() + (content.length > previewLength ? "..." : "");
+ }
+
+ return preview;
+ }
+
+ function displaySearchResults(results, query) {
+ var searchResultsEl = document.getElementById("search-results"),
+ searchProcessEl = document.getElementById("search-process");
+
+ if (results.length) {
+ var resultsHTML = "";
+ results.forEach(function (result) {
+ var item = window.data[result.ref],
+ contentPreview = getPreview(query, item.content, 170),
+ titlePreview = getPreview(query, item.title);
+
+ resultsHTML += "