From 7289a9db74b5450b038694fa84a8aeb3e368e096 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 16:27:44 +0000 Subject: [PATCH] deploy: 304d42daeca9eb9e6c4b79b5dc6169c2245637c1 --- 404.html | 4 ++-- acknowledgement/index.html | 4 ++-- assets/js/{09722083.c9a62a66.js => 09722083.ab4a93ab.js} | 2 +- assets/js/{09f56f30.1eba0a1e.js => 09f56f30.bc3e8a3f.js} | 2 +- assets/js/{0e4359fd.41168bc6.js => 0e4359fd.ba9a5169.js} | 2 +- assets/js/{11da9ee4.1303f746.js => 11da9ee4.0594975c.js} | 2 +- assets/js/{1dc85e61.b9965823.js => 1dc85e61.9b1a218e.js} | 2 +- assets/js/{21dd1498.f137dea3.js => 21dd1498.353c9298.js} | 2 +- assets/js/{2406662c.ab664a38.js => 2406662c.eb0d4ab5.js} | 2 +- assets/js/{2b1adfae.ba3c5060.js => 2b1adfae.65f6cb6c.js} | 2 +- assets/js/{2b7d82ba.e100cab7.js => 2b7d82ba.7e2c4a40.js} | 2 +- assets/js/{2e2e5152.33980a74.js => 2e2e5152.e9751cc7.js} | 2 +- assets/js/{36f9137d.4437e9f9.js => 36f9137d.af2cc1b8.js} | 2 +- assets/js/{389e2b0f.7b1734df.js => 389e2b0f.d07c3e77.js} | 2 +- assets/js/{391576c4.9b8ecdf8.js => 391576c4.8129e6e7.js} | 2 +- assets/js/{3a93ea81.afbe68e0.js => 3a93ea81.ba42fafc.js} | 2 +- assets/js/{3cfdff65.fb0bd44a.js => 3cfdff65.fcef3080.js} | 2 +- assets/js/{3f4d8b80.1b019b6c.js => 3f4d8b80.e31fb42b.js} | 2 +- assets/js/{47a4a695.c2e3bbad.js => 47a4a695.1d355b94.js} | 2 +- assets/js/{4ba7e5a3.07c00a5e.js => 4ba7e5a3.942b72bd.js} | 2 +- assets/js/{521adc3d.4acaee7f.js => 521adc3d.4741c575.js} | 2 +- assets/js/{5514662e.9a79f6b2.js => 5514662e.7f729865.js} | 2 +- assets/js/{5890eb41.464e3bf2.js => 5890eb41.bc2091d8.js} | 2 +- assets/js/{5db33872.14f5bb11.js => 5db33872.75d53e63.js} | 2 +- assets/js/{613f0a4f.a0904496.js => 613f0a4f.af857580.js} | 2 +- assets/js/{63528e5f.07c1d667.js => 63528e5f.1f846c29.js} | 2 +- assets/js/{682467b2.681b6d9e.js => 682467b2.94ea0f7b.js} | 2 +- assets/js/{6b741ffd.a3e8248b.js => 6b741ffd.2028c3a5.js} | 2 +- assets/js/{707d3f57.95128426.js => 707d3f57.6bb3e3d6.js} | 2 +- assets/js/{7a61fedb.8ef3b0cc.js => 7a61fedb.b40e5606.js} | 2 +- assets/js/{8cf96c0d.7f6201a0.js => 8cf96c0d.4e15a67b.js} | 2 +- assets/js/{94db8302.cce39f08.js => 94db8302.6b3b63a5.js} | 2 +- assets/js/{97d64f0a.285b90ae.js => 97d64f0a.9ca2404d.js} | 2 +- assets/js/{99b0f44e.48e96a60.js => 99b0f44e.eb4f1c72.js} | 2 +- assets/js/{9bfda053.12c4ad81.js => 9bfda053.43b3657e.js} | 2 +- assets/js/{9e298cf7.254832af.js => 9e298cf7.379a0bd6.js} | 2 +- assets/js/{9f389101.a1aff6ed.js => 9f389101.1c470da1.js} | 2 +- assets/js/{a09c2993.d90872b2.js => a09c2993.ccaeace5.js} | 2 +- assets/js/{a1c3d222.14eb0f1e.js => a1c3d222.4714947a.js} | 2 +- assets/js/{b258fc9c.612ce544.js => b258fc9c.ab7ec905.js} | 2 +- assets/js/{b6e2013e.571c43af.js => b6e2013e.16157cdc.js} | 2 +- assets/js/{bbb26d62.ee73a795.js => bbb26d62.1c3dafe1.js} | 2 +- assets/js/{bd7c3f6d.f9421184.js => bd7c3f6d.64962f6e.js} | 2 +- assets/js/{cdd0b013.6d4e6062.js => cdd0b013.54b1a2a7.js} | 2 +- assets/js/{cfec30f8.e51dd3e9.js => cfec30f8.4752e40c.js} | 2 +- assets/js/{d013d563.7bbc8097.js => d013d563.3c7e8cb7.js} | 2 +- assets/js/{d306cda8.b590a76f.js => d306cda8.5dd22227.js} | 2 +- assets/js/{d8f096f7.0dc3596e.js => d8f096f7.ae3fb799.js} | 2 +- assets/js/{dbeba2b5.075f10f0.js => dbeba2b5.84064520.js} | 2 +- assets/js/{dca73612.7d8098f0.js => dca73612.61ddb0b9.js} | 2 +- assets/js/{de77a223.3f497115.js => de77a223.d065e143.js} | 2 +- assets/js/{e53f82ff.cb5ff291.js => e53f82ff.ed442302.js} | 2 +- assets/js/{e747ec83.7fef5581.js => e747ec83.75332ce7.js} | 2 +- assets/js/{e9a2555b.9a06c52f.js => e9a2555b.9f6e4e96.js} | 2 +- assets/js/{e9bae93a.e2db0242.js => e9bae93a.48910b99.js} | 2 +- assets/js/{ecdc8e44.a365e563.js => ecdc8e44.6e432d61.js} | 2 +- assets/js/{ed5ef82a.2bf7b169.js => ed5ef82a.9e89411d.js} | 2 +- assets/js/{f704770b.9a92e5fb.js => f704770b.3ea04af8.js} | 2 +- assets/js/{f8d48938.d768df8f.js => f8d48938.a5c11f2a.js} | 2 +- assets/js/{f931684d.db345f4e.js => f931684d.35ed4819.js} | 2 +- assets/js/{f97cc2c7.2c1cd784.js => f97cc2c7.0cec9eeb.js} | 2 +- assets/js/{fdea7475.78a5ce30.js => fdea7475.a3583034.js} | 2 +- .../{runtime~main.50131110.js => runtime~main.97c8b5aa.js} | 2 +- blog/2016/03/11/blog-post/index.html | 4 ++-- blog/2017/04/10/blog-post-two/index.html | 4 ++-- blog/2017/09/25/testing-rss/index.html | 4 ++-- blog/2017/09/26/adding-rss/index.html | 4 ++-- blog/2017/10/24/new-version-1.0.0/index.html | 4 ++-- blog/archive/index.html | 4 ++-- blog/index.html | 4 ++-- contact/index.html | 4 ++-- docs/access-dsri/index.html | 6 +++--- docs/access-um-servers/index.html | 6 +++--- docs/anatomy-of-an-application/index.html | 6 +++--- docs/catalog-data-streaming/index.html | 6 +++--- docs/catalog-genomics/index.html | 6 +++--- docs/catalog-imaging/index.html | 6 +++--- docs/catalog-opendatahub/index.html | 6 +++--- docs/catalog-utilities/index.html | 6 +++--- docs/checkpointing-ml-training/index.html | 6 +++--- docs/contribute/index.html | 6 +++--- docs/dask-cluster/index.html | 6 +++--- docs/dask-tutorial/index.html | 6 +++--- docs/deploy-database/index.html | 6 +++--- docs/deploy-from-docker/index.html | 6 +++--- docs/deploy-gitlab-runner/index.html | 6 +++--- docs/deploy-jupyter/index.html | 6 +++--- docs/deploy-jupyterhub/index.html | 6 +++--- docs/deploy-matlab/index.html | 6 +++--- docs/deploy-on-gpu/index.html | 6 +++--- docs/deploy-rstudio/index.html | 6 +++--- docs/deploy-spark/index.html | 6 +++--- docs/deploy-vscode/index.html | 6 +++--- docs/enabling-vpn-wsl/index.html | 6 +++--- docs/glossary/index.html | 6 +++--- docs/guide-dockerfile-to-openshift/index.html | 6 +++--- docs/guide-known-issues/index.html | 6 +++--- docs/guide-local-install/index.html | 6 +++--- docs/guide-monitoring/index.html | 6 +++--- docs/guide-publish-image/index.html | 6 +++--- docs/guide-vpn/index.html | 6 +++--- docs/guide-workshop/index.html | 6 +++--- docs/helm/index.html | 6 +++--- docs/increase-process-speed/index.html | 6 +++--- docs/index.html | 6 +++--- docs/jupyterhub-spark/index.html | 6 +++--- docs/jupyterhub-workspace/index.html | 6 +++--- docs/login-docker-registry/index.html | 6 +++--- docs/mpi-jobs/index.html | 6 +++--- docs/neuroscience/index.html | 6 +++--- docs/openshift-commands/index.html | 6 +++--- docs/openshift-delete-objects/index.html | 6 +++--- docs/openshift-delete-services/index.html | 6 +++--- docs/openshift-install/index.html | 6 +++--- docs/openshift-load-data/index.html | 6 +++--- docs/openshift-storage/index.html | 6 +++--- docs/operators/index.html | 6 +++--- docs/prepare-project-for-dsri/index.html | 6 +++--- docs/profile-pytorch-code/index.html | 6 +++--- docs/project-management/index.html | 6 +++--- docs/sensible-data/index.html | 6 +++--- docs/speeding-tensorflow-dl/index.html | 6 +++--- docs/start-workspace/index.html | 6 +++--- docs/surf-offerings/index.html | 6 +++--- docs/tools-machine-learning/index.html | 6 +++--- docs/workflows-airflow/index.html | 6 +++--- docs/workflows-argo/index.html | 6 +++--- docs/workflows-cwl/index.html | 6 +++--- docs/workflows-github-actions/index.html | 6 +++--- docs/workflows-introduction/index.html | 6 +++--- docs/workflows-nextflow/index.html | 6 +++--- gpu-booking/index.html | 4 ++-- help/index.html | 4 ++-- index.html | 4 ++-- register/index.html | 4 ++-- search-index.json | 2 +- search/index.html | 4 ++-- ticket/index.html | 4 ++-- training/index.html | 4 ++-- 139 files changed, 276 insertions(+), 276 deletions(-) rename assets/js/{09722083.c9a62a66.js => 09722083.ab4a93ab.js} (98%) rename assets/js/{09f56f30.1eba0a1e.js => 09f56f30.bc3e8a3f.js} (99%) rename assets/js/{0e4359fd.41168bc6.js => 0e4359fd.ba9a5169.js} (99%) rename assets/js/{11da9ee4.1303f746.js => 11da9ee4.0594975c.js} (99%) rename assets/js/{1dc85e61.b9965823.js => 1dc85e61.9b1a218e.js} (98%) rename assets/js/{21dd1498.f137dea3.js => 21dd1498.353c9298.js} (99%) rename assets/js/{2406662c.ab664a38.js => 2406662c.eb0d4ab5.js} (99%) rename assets/js/{2b1adfae.ba3c5060.js => 2b1adfae.65f6cb6c.js} (99%) rename assets/js/{2b7d82ba.e100cab7.js => 2b7d82ba.7e2c4a40.js} (99%) rename assets/js/{2e2e5152.33980a74.js => 2e2e5152.e9751cc7.js} (99%) rename assets/js/{36f9137d.4437e9f9.js => 36f9137d.af2cc1b8.js} (99%) rename assets/js/{389e2b0f.7b1734df.js => 389e2b0f.d07c3e77.js} (99%) rename assets/js/{391576c4.9b8ecdf8.js => 391576c4.8129e6e7.js} (98%) rename assets/js/{3a93ea81.afbe68e0.js => 3a93ea81.ba42fafc.js} (99%) rename assets/js/{3cfdff65.fb0bd44a.js => 3cfdff65.fcef3080.js} (99%) rename assets/js/{3f4d8b80.1b019b6c.js => 3f4d8b80.e31fb42b.js} (99%) rename assets/js/{47a4a695.c2e3bbad.js => 47a4a695.1d355b94.js} (99%) rename assets/js/{4ba7e5a3.07c00a5e.js => 4ba7e5a3.942b72bd.js} (99%) rename assets/js/{521adc3d.4acaee7f.js => 521adc3d.4741c575.js} (99%) rename assets/js/{5514662e.9a79f6b2.js => 5514662e.7f729865.js} (98%) rename assets/js/{5890eb41.464e3bf2.js => 5890eb41.bc2091d8.js} (99%) rename assets/js/{5db33872.14f5bb11.js => 5db33872.75d53e63.js} (99%) rename assets/js/{613f0a4f.a0904496.js => 613f0a4f.af857580.js} (99%) rename assets/js/{63528e5f.07c1d667.js => 63528e5f.1f846c29.js} (99%) rename assets/js/{682467b2.681b6d9e.js => 682467b2.94ea0f7b.js} (99%) rename assets/js/{6b741ffd.a3e8248b.js => 6b741ffd.2028c3a5.js} (98%) rename assets/js/{707d3f57.95128426.js => 707d3f57.6bb3e3d6.js} (99%) rename assets/js/{7a61fedb.8ef3b0cc.js => 7a61fedb.b40e5606.js} (99%) rename assets/js/{8cf96c0d.7f6201a0.js => 8cf96c0d.4e15a67b.js} (99%) rename assets/js/{94db8302.cce39f08.js => 94db8302.6b3b63a5.js} (99%) rename assets/js/{97d64f0a.285b90ae.js => 97d64f0a.9ca2404d.js} (99%) rename assets/js/{99b0f44e.48e96a60.js => 99b0f44e.eb4f1c72.js} (99%) rename assets/js/{9bfda053.12c4ad81.js => 9bfda053.43b3657e.js} (99%) rename assets/js/{9e298cf7.254832af.js => 9e298cf7.379a0bd6.js} (98%) rename assets/js/{9f389101.a1aff6ed.js => 9f389101.1c470da1.js} (98%) rename assets/js/{a09c2993.d90872b2.js => a09c2993.ccaeace5.js} (99%) rename assets/js/{a1c3d222.14eb0f1e.js => a1c3d222.4714947a.js} (99%) rename assets/js/{b258fc9c.612ce544.js => b258fc9c.ab7ec905.js} (99%) rename assets/js/{b6e2013e.571c43af.js => b6e2013e.16157cdc.js} (98%) rename assets/js/{bbb26d62.ee73a795.js => bbb26d62.1c3dafe1.js} (99%) rename assets/js/{bd7c3f6d.f9421184.js => bd7c3f6d.64962f6e.js} (98%) rename assets/js/{cdd0b013.6d4e6062.js => cdd0b013.54b1a2a7.js} (99%) rename assets/js/{cfec30f8.e51dd3e9.js => cfec30f8.4752e40c.js} (99%) rename assets/js/{d013d563.7bbc8097.js => d013d563.3c7e8cb7.js} (99%) rename assets/js/{d306cda8.b590a76f.js => d306cda8.5dd22227.js} (99%) rename assets/js/{d8f096f7.0dc3596e.js => d8f096f7.ae3fb799.js} (99%) rename assets/js/{dbeba2b5.075f10f0.js => dbeba2b5.84064520.js} (99%) rename assets/js/{dca73612.7d8098f0.js => dca73612.61ddb0b9.js} (99%) rename assets/js/{de77a223.3f497115.js => de77a223.d065e143.js} (99%) rename assets/js/{e53f82ff.cb5ff291.js => e53f82ff.ed442302.js} (99%) rename assets/js/{e747ec83.7fef5581.js => e747ec83.75332ce7.js} (98%) rename assets/js/{e9a2555b.9a06c52f.js => e9a2555b.9f6e4e96.js} (99%) rename assets/js/{e9bae93a.e2db0242.js => e9bae93a.48910b99.js} (98%) rename assets/js/{ecdc8e44.a365e563.js => ecdc8e44.6e432d61.js} (99%) rename assets/js/{ed5ef82a.2bf7b169.js => ed5ef82a.9e89411d.js} (99%) rename assets/js/{f704770b.9a92e5fb.js => f704770b.3ea04af8.js} (99%) rename assets/js/{f8d48938.d768df8f.js => f8d48938.a5c11f2a.js} (99%) rename assets/js/{f931684d.db345f4e.js => f931684d.35ed4819.js} (99%) rename assets/js/{f97cc2c7.2c1cd784.js => f97cc2c7.0cec9eeb.js} (99%) rename assets/js/{fdea7475.78a5ce30.js => fdea7475.a3583034.js} (99%) rename assets/js/{runtime~main.50131110.js => runtime~main.97c8b5aa.js} (81%) diff --git a/404.html b/404.html index cbf6b2de6..339e62912 100644 --- a/404.html +++ b/404.html @@ -16,13 +16,13 @@ - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/acknowledgement/index.html b/acknowledgement/index.html index 5db0d1cd3..e9a41ef26 100644 --- a/acknowledgement/index.html +++ b/acknowledgement/index.html @@ -16,13 +16,13 @@ - +
Skip to main content

Acknowledging the DSRI in your publications

If you are planning to present or publish your work which was made possible by using the DSRI, we encourage you to acknowledge the use of DSRI. For this purpose we propose to add the following sentence to your publication:

"This research was made possible, in part, using the Data Science Research Infrastructure (DSRI) hosted at Maastricht University."

Citations

- + \ No newline at end of file diff --git a/assets/js/09722083.c9a62a66.js b/assets/js/09722083.ab4a93ab.js similarity index 98% rename from assets/js/09722083.c9a62a66.js rename to assets/js/09722083.ab4a93ab.js index b9899889d..7974d1955 100644 --- a/assets/js/09722083.c9a62a66.js +++ b/assets/js/09722083.ab4a93ab.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9676],{5680:(e,t,r)=>{r.d(t,{xA:()=>l,yg:()=>g});var n=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var c=n.createContext({}),p=function(e){var t=n.useContext(c),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},l=function(e){var t=p(e.components);return n.createElement(c.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,c=e.parentName,l=s(e,["components","mdxType","originalType","parentName"]),m=p(r),g=a,y=m["".concat(c,".").concat(g)]||m[g]||u[g]||o;return r?n.createElement(y,i(i({ref:t},l),{},{components:r})):n.createElement(y,i({ref:t},l))}));function g(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=m;var s={};for(var c in t)hasOwnProperty.call(t,c)&&(s[c]=t[c]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var p=2;p{r.r(t),r.d(t,{assets:()=>l,contentTitle:()=>c,default:()=>g,frontMatter:()=>s,metadata:()=>p,toc:()=>u});var n=r(9668),a=r(1367),o=(r(6540),r(5680)),i=["components"],s={id:"catalog-genomics",title:"Genomics"},c=void 0,p={unversionedId:"catalog-genomics",id:"catalog-genomics",title:"Genomics",description:"Feel free to propose new services using pull requests, or to request them by creating new issues.",source:"@site/docs/catalog-genomics.md",sourceDirName:".",slug:"/catalog-genomics",permalink:"/docs/catalog-genomics",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/catalog-genomics.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"catalog-genomics",title:"Genomics"},sidebar:"docs",previous:{title:"Neuroscience research",permalink:"/docs/neuroscience"},next:{title:"Imaging softwares",permalink:"/docs/catalog-imaging"}},l={},u=[{value:"Trinity RNA Seq",id:"trinity-rna-seq",level:2}],m={toc:u};function g(e){var t=e.components,r=(0,a.A)(e,i);return(0,o.yg)("wrapper",(0,n.A)({},m,r,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"Feel free to propose new services using ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/pulls"},"pull requests"),", or to request them by creating ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues"},"new issues"),"."),(0,o.yg)("h2",{id:"trinity-rna-seq"},"Trinity RNA Seq"),(0,o.yg)("p",null,"Trinity assembles transcript sequences from Illumina RNA-Seq data. It represents a novel method for the efficient and robust de novo reconstruction of transcriptomes from RNA-seq data. See ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/trinityrnaseq/trinityrnaseq/wiki"},"their documentation"),"."),(0,o.yg)("p",null,"You can start a container using the ",(0,o.yg)("strong",{parentName:"p"},"Trinity RNA-Seq")," template in the ",(0,o.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,o.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,o.yg)("img",{src:"/img/screenshot_trinityrnaseq.png",alt:"Deploy Trinity RNA Seq",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"This template uses the Trinity RNA-Seq image hosted in the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/orgs/maastrichtu-ids/packages/container/package/trinityrnaseq"},"UM IDS GitHub Container Registry")," "),(0,o.yg)("admonition",{title:"Persistent data folder",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"\ud83d\udcc2 Use the ",(0,o.yg)("inlineCode",{parentName:"p"},"/usr/local/src/work")," folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,o.yg)("strong",{parentName:"p"},"Storage")," > ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),".")),(0,o.yg)("p",null,"We enabled the port ",(0,o.yg)("inlineCode",{parentName:"p"},"8787")," in the container, if you need to deploy applications."))}g.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9676],{5680:(e,t,r)=>{r.d(t,{xA:()=>l,yg:()=>g});var n=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var c=n.createContext({}),p=function(e){var t=n.useContext(c),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},l=function(e){var t=p(e.components);return n.createElement(c.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,c=e.parentName,l=s(e,["components","mdxType","originalType","parentName"]),m=p(r),g=a,y=m["".concat(c,".").concat(g)]||m[g]||u[g]||o;return r?n.createElement(y,i(i({ref:t},l),{},{components:r})):n.createElement(y,i({ref:t},l))}));function g(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=m;var s={};for(var c in t)hasOwnProperty.call(t,c)&&(s[c]=t[c]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var p=2;p{r.r(t),r.d(t,{assets:()=>l,contentTitle:()=>c,default:()=>g,frontMatter:()=>s,metadata:()=>p,toc:()=>u});var n=r(9668),a=r(1367),o=(r(6540),r(5680)),i=["components"],s={id:"catalog-genomics",title:"Genomics"},c=void 0,p={unversionedId:"catalog-genomics",id:"catalog-genomics",title:"Genomics",description:"Feel free to propose new services using pull requests, or to request them by creating new issues.",source:"@site/docs/catalog-genomics.md",sourceDirName:".",slug:"/catalog-genomics",permalink:"/docs/catalog-genomics",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/catalog-genomics.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"catalog-genomics",title:"Genomics"},sidebar:"docs",previous:{title:"Neuroscience research",permalink:"/docs/neuroscience"},next:{title:"Imaging softwares",permalink:"/docs/catalog-imaging"}},l={},u=[{value:"Trinity RNA Seq",id:"trinity-rna-seq",level:2}],m={toc:u};function g(e){var t=e.components,r=(0,a.A)(e,i);return(0,o.yg)("wrapper",(0,n.A)({},m,r,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"Feel free to propose new services using ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/pulls"},"pull requests"),", or to request them by creating ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues"},"new issues"),"."),(0,o.yg)("h2",{id:"trinity-rna-seq"},"Trinity RNA Seq"),(0,o.yg)("p",null,"Trinity assembles transcript sequences from Illumina RNA-Seq data. It represents a novel method for the efficient and robust de novo reconstruction of transcriptomes from RNA-seq data. See ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/trinityrnaseq/trinityrnaseq/wiki"},"their documentation"),"."),(0,o.yg)("p",null,"You can start a container using the ",(0,o.yg)("strong",{parentName:"p"},"Trinity RNA-Seq")," template in the ",(0,o.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,o.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,o.yg)("img",{src:"/img/screenshot_trinityrnaseq.png",alt:"Deploy Trinity RNA Seq",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"This template uses the Trinity RNA-Seq image hosted in the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/orgs/maastrichtu-ids/packages/container/package/trinityrnaseq"},"UM IDS GitHub Container Registry")," "),(0,o.yg)("admonition",{title:"Persistent data folder",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"\ud83d\udcc2 Use the ",(0,o.yg)("inlineCode",{parentName:"p"},"/usr/local/src/work")," folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,o.yg)("strong",{parentName:"p"},"Storage")," > ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),".")),(0,o.yg)("p",null,"We enabled the port ",(0,o.yg)("inlineCode",{parentName:"p"},"8787")," in the container, if you need to deploy applications."))}g.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/09f56f30.1eba0a1e.js b/assets/js/09f56f30.bc3e8a3f.js similarity index 99% rename from assets/js/09f56f30.1eba0a1e.js rename to assets/js/09f56f30.bc3e8a3f.js index 914e5d55e..1f532c079 100644 --- a/assets/js/09f56f30.1eba0a1e.js +++ b/assets/js/09f56f30.bc3e8a3f.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[3360],{5680:(e,t,n)=>{n.d(t,{xA:()=>c,yg:()=>d});var a=n(6540);function o(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function r(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var s=a.createContext({}),p=function(e){var t=a.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},c=function(e){var t=p(e.components);return a.createElement(s.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},m=a.forwardRef((function(e,t){var n=e.components,o=e.mdxType,i=e.originalType,s=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),m=p(n),d=o,y=m["".concat(s,".").concat(d)]||m[d]||u[d]||i;return n?a.createElement(y,r(r({ref:t},c),{},{components:n})):a.createElement(y,r({ref:t},c))}));function d(e,t){var n=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var i=n.length,r=new Array(i);r[0]=m;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:o,r[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>d,frontMatter:()=>l,metadata:()=>p,toc:()=>u});var a=n(9668),o=n(1367),i=(n(6540),n(5680)),r=["components"],l={id:"anatomy-of-an-application",title:"Anatomy of a DSRI application"},s=void 0,p={unversionedId:"anatomy-of-an-application",id:"anatomy-of-an-application",title:"Anatomy of a DSRI application",description:"This page will present you how an applications is typically built using an OpenShift template. This will also help you understand more in general the different objects that needs to be defined when deploying an application on a Kubernetes cluster. Even if OpenShift templates can only be deployed to OpenShift, the objects they define are the same as in Kubernetes (apart from the Route which becomes Ingress).",source:"@site/docs/anatomy-of-an-application.md",sourceDirName:".",slug:"/anatomy-of-an-application",permalink:"/docs/anatomy-of-an-application",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/anatomy-of-an-application.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"anatomy-of-an-application",title:"Anatomy of a DSRI application"},sidebar:"docs",previous:{title:"Deploy from a Docker image",permalink:"/docs/deploy-from-docker"},next:{title:"Install from Helm charts",permalink:"/docs/helm"}},c={},u=[{value:"Application walkthrough",id:"application-walkthrough",level:2},{value:"Parameters",id:"parameters",level:3},{value:"Image",id:"image",level:3},{value:"Create storage",id:"create-storage",level:3},{value:"Secret",id:"secret",level:3},{value:"Deployment",id:"deployment",level:3},{value:"Pod spec",id:"pod-spec",level:3},{value:"Environment variables in the container",id:"environment-variables-in-the-container",level:3},{value:"Mount storage",id:"mount-storage",level:3},{value:"Security context",id:"security-context",level:3},{value:"Service",id:"service",level:3},{value:"Route",id:"route",level:3},{value:"The complete application",id:"the-complete-application",level:2},{value:"Add a configuration file",id:"add-a-configuration-file",level:2},{value:"Add automated health checks",id:"add-automated-health-checks",level:2},{value:"Define resource limits",id:"define-resource-limits",level:2},{value:"Build your own application template",id:"build-your-own-application-template",level:2}],m={toc:u};function d(e){var t=e.components,n=(0,o.A)(e,r);return(0,i.yg)("wrapper",(0,a.A)({},m,n,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("p",null,"This page will present you how an applications is typically built using an ",(0,i.yg)("strong",{parentName:"p"},"OpenShift template"),". This will also help you understand more in general the different objects that needs to be defined when ",(0,i.yg)("strong",{parentName:"p"},"deploying an application on a Kubernetes cluster"),". Even if OpenShift templates can only be deployed to OpenShift, the objects they define are the same as in Kubernetes (apart from the Route which becomes Ingress)."),(0,i.yg)("p",null,"There are other ways to describe applications on OpenShift cluster (here the DSRI), such as Helm or Operators. But OpenShift templates are the easiest and quickest way to build an application that can be deployed from the DSRI web UI catalog in a few clicks, and by providing a few parameters."),(0,i.yg)("p",null,"It is better to have a basic understanding of what a docker container is to fully understand this walkthrough, but it should already gives a good idea of the different objects deployed with each DSRI application."),(0,i.yg)("p",null,"We will use the template used to deploy JupyterLab as example, and we will describe the goal, importance and caveats of each parts of the application definition. But the same template and instructions can be easily reused for other applications with a web UI to access."),(0,i.yg)("p",null,"Checkout the ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/blob/master/applications/templates/template-jupyterlab-root.yml"},"complete JupyterLab template here")," (it will be slightly different with a bit more comments, but there are globally the same)"),(0,i.yg)("p",null,"You will see that deploying on Kubernetes (and by extension, here OpenShift), is just about defining objects in a YAML file, like a complex ",(0,i.yg)("inlineCode",{parentName:"p"},"docker-compose.yml")," file. "),(0,i.yg)("admonition",{title:"Do you got what it takes?",type:"info"},(0,i.yg)("p",{parentName:"admonition"},"The amount of objects might seems a bit overwhelming at first, but this is what it takes to automatically deploy a complex application on a large cluster, automatically available through a generated URL, with ",(0,i.yg)("inlineCode",{parentName:"p"},"HTTPS")," encryption to protect your passwords when you log to a web UI!")),(0,i.yg)("h2",{id:"application-walkthrough"},"Application walkthrough"),(0,i.yg)("p",null,"First, you need to create your ",(0,i.yg)("strong",{parentName:"p"},"Template")," objects, this will be the main object we will create here as all other objects defined will be deployed by this template. "),(0,i.yg)("p",null,"In this part we mainly just provide the description and information that will be shown to users when deploying the application from the DSRI web UI catalog."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},"---\nkind: Template\napiVersion: template.openshift.io/v1\nlabels:\n template: jupyterlab-root\nmetadata:\n name: jupyterlab-root\n annotations:\n openshift.io/display-name: JupyterLab\n description: |-\n Start JupyterLab images as the `jovyan` user, with sudo privileges to install anything you need. \n \ud83d\udcc2 Use the `/home/jovyan` folder (workspace of the JupyterLab UI) to store your data in the persistent storage automatically created\n You can find the persistent storage in the DSRI web UI, go to Administrator view > Storage > Persistent Volume Claims\n You can use any image based on the official Jupyter docker stack https://github.com/jupyter/docker-stacks\n - jupyter/tensorflow-notebook\n - jupyter/r-notebook\n - jupyter/all-spark-notebook\n - ghcr.io/maastrichtu-ids/jupyterlab (with Java and SPARQL kernels)\n Or build your own! Checkout https://github.com/MaastrichtU-IDS/jupyterlab for an example of custom image\n Once JupyterLab is deployed you can install any pip packages, JupyterLab extensions, and apt packages.\n iconClass: icon-python\n tags: python,jupyter,notebook\n openshift.io/provider-display-name: Institute of Data Science, UM\n openshift.io/documentation-url: https://maastrichtu-ids.github.io/dsri-documentation/docs/deploy-jupyter\n openshift.io/support-url: https://maastrichtu-ids.github.io/dsri-documentation/help\n")),(0,i.yg)("h3",{id:"parameters"},"Parameters"),(0,i.yg)("p",null,"Then define the ",(0,i.yg)("strong",{parentName:"p"},"parameters")," the user will be able to define in the DSRI catalog web UI when instantiating the application. ",(0,i.yg)("inlineCode",{parentName:"p"},"APPLICATION_NAME")," is the most important as it will be used everywhere to create the objects and identify the application."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},"parameters:\n- name: APPLICATION_NAME\n displayName: Name for the application\n description: Must be without spaces (use -), and unique in the project.\n value: jupyterlab\n required: true\n- name: PASSWORD\n displayName: JupyterLab UI Password\n description: The password/token to access the JupyterLab web UI\n required: true\n- name: APPLICATION_IMAGE\n displayName: Jupyter notebook Docker image\n value: ghcr.io/maastrichtu-ids/jupyterlab:latest\n required: true\n description: You can use any image based on https://github.com/jupyter/docker-stacks\n- name: STORAGE_SIZE\n displayName: Storage size\n description: Size of the storage allocated to the notebook persistent storage in `/home/jovyan`.\n value: 5Gi\n required: true\n")),(0,i.yg)("p",null,"We can then refer to those parameters value (filled by the users of the template) in the rest of the template using this syntax: ",(0,i.yg)("inlineCode",{parentName:"p"},"${APPLICATION_NAME}")),(0,i.yg)("p",null,"We will now ",(0,i.yg)("strong",{parentName:"p"},"describe all objects deployed")," when we instantiate this template (to start an application). "),(0,i.yg)("h3",{id:"image"},"Image"),(0,i.yg)("p",null,"First we define the ",(0,i.yg)("strong",{parentName:"p"},"ImageStream")," object to import the Docker image(s) of your application(s) on the DSRI cluster"),(0,i.yg)("p",null,"Setting the ",(0,i.yg)("inlineCode",{parentName:"p"},"importPolicy: scheduled")," to ",(0,i.yg)("inlineCode",{parentName:"p"},"true")," will have the DSRI to automatically check for new version of this image, which can be useful if you want to always have the latest published version of an applications. Visit the ",(0,i.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.6/openshift_images/image-streams-manage.html"},"OpenShift ImageStreams documentation")," for more details. Be careful as enabling this feature without real need will cause the DSRI to query DockerHub more, which might require you to login to DockerHub to increase your pull request quota."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'objects:\n- kind: "ImageStream"\n apiVersion: image.openshift.io/v1\n metadata:\n name: ${APPLICATION_NAME}\n labels:\n app: ${APPLICATION_NAME}\n spec:\n tags:\n - name: latest\n from:\n kind: DockerImage\n name: ${APPLICATION_IMAGE}\n importPolicy:\n scheduled: true\n lookupPolicy:\n local: true\n')),(0,i.yg)("h3",{id:"create-storage"},"Create storage"),(0,i.yg)("p",null,"Then we define the ",(0,i.yg)("strong",{parentName:"p"},"PersistentVolumeClaim"),", which is a persistent storage on which we will mount the ",(0,i.yg)("inlineCode",{parentName:"p"},"/home/jovyan")," folder to avoid loosing data if our application is restarted."),(0,i.yg)("p",null,"Any file outside of a persistent volume can be lost at any moment if the pod restart, usually it only consists in temporary file if you are properly working in the persistent volume folder. This can be useful also if your application is crashing, stopping and restarting your pod (application) might fix it."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'- kind: "PersistentVolumeClaim"\n apiVersion: "v1"\n metadata:\n name: ${APPLICATION_NAME}\n labels:\n app: ${APPLICATION_NAME}\n spec:\n accessModes:\n - "ReadWriteMany"\n resources:\n requests:\n storage: ${STORAGE_SIZE}\n')),(0,i.yg)("h3",{id:"secret"},"Secret"),(0,i.yg)("p",null,"Then the ",(0,i.yg)("strong",{parentName:"p"},"Secret")," to store the password"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'- kind: "Secret"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: ${APPLICATION_NAME}\n stringData:\n application-password: "${PASSWORD}"\n')),(0,i.yg)("h3",{id:"deployment"},"Deployment"),(0,i.yg)("p",null,"Then the ",(0,i.yg)("strong",{parentName:"p"},"DeploymentConfig")," (aka. Deployment) define how to deploy the JupyterLab image, if you want to deploy another application alongside JupyterLab you can do it by adding as many deployments as you want! (and use the same, or different, persistent volume claims for storage). Checkout the ",(0,i.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.6/applications/deployments/what-deployments-are.html"},"OpenShift Deployments documentation")," for more details."),(0,i.yg)("p",null,"In this first block we will define the strategy to update and recreate our applications if you change the YAML configuration, or when a new latest docker image is updated, allowing your service to always use the latest up-to-date version of a software without any intervention from you. "),(0,i.yg)("p",null,"We chose the ",(0,i.yg)("inlineCode",{parentName:"p"},"Recreate")," release option to make sure the container is properly recreated and avoid unnecessary resources consumption, but you can also use ",(0,i.yg)("inlineCode",{parentName:"p"},"Rolling")," to have a downtime free transition between deployments."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'- kind: "DeploymentConfig"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: "${APPLICATION_NAME}"\n spec:\n replicas: 1\n strategy:\n type: "Recreate"\n triggers:\n - type: "ConfigChange"\n - type: "ImageChange"\n imageChangeParams:\n automatic: true\n containerNames:\n - jupyter-notebook\n from:\n kind: ImageStreamTag\n name: ${APPLICATION_NAME}:latest\n selector:\n app: "${APPLICATION_NAME}"\n deploymentconfig: "${APPLICATION_NAME}"\n')),(0,i.yg)("h3",{id:"pod-spec"},"Pod spec"),(0,i.yg)("p",null,"Then we define the spec of the ",(0,i.yg)("strong",{parentName:"p"},"pod")," that will be deployed by this DeploymentConfig."),(0,i.yg)("p",null,"Setting the ",(0,i.yg)("inlineCode",{parentName:"p"},"serviceAccountName: anyuid")," is required for most Docker containers as it allows to run a container using any user ID (e.g. root). Otherwise OpenShift expect to use a random user ID, which is require to build the Docker image especially to work with random user IDs."),(0,i.yg)("p",null,"We then create the ",(0,i.yg)("inlineCode",{parentName:"p"},"containers:")," array which is where we will define the containers deployed in the pod. It is recommended to deploy 1 container per pod, as it enables a better separation and management of the applications, apart if you know what you are doing. You can also provide the command to run at the start of the container to overwrite the default one, and define the exposed ports (here 8080)."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' template:\n metadata:\n labels:\n app: "${APPLICATION_NAME}"\n deploymentconfig: "${APPLICATION_NAME}"\n spec:\n serviceAccountName: "anyuid"\n containers:\n - name: "jupyter-notebook"\n image: "${APPLICATION_NAME}:latest"\n command:\n - "start-notebook.sh"\n - "--no-browser"\n - "--ip=0.0.0.0"\n ports:\n - containerPort: 8888\n protocol: TCP\n')),(0,i.yg)("h3",{id:"environment-variables-in-the-container"},"Environment variables in the container"),(0,i.yg)("p",null,"Then define the ",(0,i.yg)("strong",{parentName:"p"},"environment variables")," used in your container, usually the password and most parameters are set here, such as enabling ",(0,i.yg)("inlineCode",{parentName:"p"},"sudo")," in the container."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' env:\n - name: JUPYTER_TOKEN\n valueFrom:\n secretKeyRef:\n key: "application-password"\n name: "${APPLICATION_NAME}"\n - name: JUPYTER_ENABLE_LAB\n value: "yes"\n - name: GRANT_SUDO\n value: "yes"\n')),(0,i.yg)("h3",{id:"mount-storage"},"Mount storage"),(0,i.yg)("p",null,"Then we need to mount the previously created ",(0,i.yg)("strong",{parentName:"p"},"PersistentVolume")," on ",(0,i.yg)("inlineCode",{parentName:"p"},"/home/jovyan")," , the workspace of JupyterLab. Be careful: ",(0,i.yg)("inlineCode",{parentName:"p"},"volumeMounts")," is in the ",(0,i.yg)("inlineCode",{parentName:"p"},"containers:")," object, and ",(0,i.yg)("inlineCode",{parentName:"p"},"volumes")," is defined in the ",(0,i.yg)("inlineCode",{parentName:"p"},"spec:")," object"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' volumeMounts:\n - name: data\n mountPath: "/home/jovyan"\n volumes:\n - name: data\n persistentVolumeClaim:\n claimName: "${APPLICATION_NAME}"\n')),(0,i.yg)("h3",{id:"security-context"},"Security context"),(0,i.yg)("p",null,"Then we define the ",(0,i.yg)("strong",{parentName:"p"},"securityContext")," to allow JupyterLab to run as root, this is ",(0,i.yg)("strong",{parentName:"p"},"not required for most applications"),", just a specificity of the official Jupyter images to run with root privileges."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"}," securityContext:\n runAsUser: 0\n supplementalGroups:\n - 100\n automountServiceAccountToken: false\n")),(0,i.yg)("h3",{id:"service"},"Service"),(0,i.yg)("p",null,"Then we create the ",(0,i.yg)("strong",{parentName:"p"},"Service")," to expose the port 8888 of our JupyterLab container on the project network. This means that the JupyterLab web UI will reachable by all other application deployed in your project using its application name as hostname (e.g. ",(0,i.yg)("inlineCode",{parentName:"p"},"jupyterlab"),")"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'- kind: "Service"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: ${APPLICATION_NAME}\n spec:\n ports:\n - name: 8888-tcp\n protocol: TCP\n port: 8888\n targetPort: 8888\n selector:\n app: ${APPLICATION_NAME}\n deploymentconfig: "${APPLICATION_NAME}"\n type: ClusterIP\n')),(0,i.yg)("h3",{id:"route"},"Route"),(0,i.yg)("p",null,"Finally, we define the ",(0,i.yg)("strong",{parentName:"p"},"Route")," which will automatically generate a URL for the service of your application based following this template: ",(0,i.yg)("inlineCode",{parentName:"p"},"APPLICATION_NAME-PROJECT_ID-DSRI_URL")),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'- kind: "Route"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: ${APPLICATION_NAME}\n spec:\n host: \'\'\n to:\n kind: Service\n name: "${APPLICATION_NAME}"\n weight: 100\n port:\n targetPort: 8888-tcp\n tls:\n termination: edge\n insecureEdgeTerminationPolicy: Redirect\n')),(0,i.yg)("h2",{id:"the-complete-application"},"The complete application"),(0,i.yg)("p",null,"Here is a complete file to describe the JupyterLab deployment template, you can add it to your project catalog by going to ",(0,i.yg)("strong",{parentName:"p"},"+Add")," in the DSRI web UI, then click on the option to add a ",(0,i.yg)("strong",{parentName:"p"},"YAML")," file content, and copy paste the template YAML."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'---\nkind: Template\napiVersion: template.openshift.io/v1\nlabels:\n template: jupyterlab-root\nmetadata:\n name: jupyterlab-root\n annotations:\n openshift.io/display-name: JupyterLab\n description: |-\n Start JupyterLab images as the `jovyan` user, with sudo privileges to install anything you need. \n \ud83d\udcc2 Use the `/home/jovyan` folder (workspace of the JupyterLab UI) to store your data in the persistent storage automatically created\n You can find the persistent storage in the DSRI web UI, go to Administrator view > Storage > Persistent Volume Claims\n You can use any image based on the official Jupyter docker stack https://github.com/jupyter/docker-stacks\n - jupyter/tensorflow-notebook\n - jupyter/r-notebook\n - jupyter/all-spark-notebook\n - ghcr.io/maastrichtu-ids/jupyterlab (with Java and SPARQL kernels)\n Or build your own! Checkout https://github.com/MaastrichtU-IDS/jupyterlab for an example of custom image\n Once JupyterLab is deployed you can install any pip packages, JupyterLab extensions, and apt packages.\n iconClass: icon-python\n tags: python,jupyter,notebook\n openshift.io/provider-display-name: Institute of Data Science, UM\n openshift.io/documentation-url: https://maastrichtu-ids.github.io/dsri-documentation/docs/deploy-jupyter\n openshift.io/support-url: https://maastrichtu-ids.github.io/dsri-documentation/help\n \nparameters:\n- name: APPLICATION_NAME\n displayName: Name for the application\n description: Must be without spaces (use -), and unique in the project.\n value: jupyterlab\n required: true\n- name: PASSWORD\n displayName: JupyterLab UI Password\n description: The password/token to access the JupyterLab web UI\n required: true\n- name: APPLICATION_IMAGE\n displayName: Jupyter notebook Docker image\n value: ghcr.io/maastrichtu-ids/jupyterlab:latest\n required: true\n description: You can use any image based on https://github.com/jupyter/docker-stacks\n- name: STORAGE_SIZE\n displayName: Storage size\n description: Size of the storage allocated to the notebook persistent storage in `/home/jovyan`.\n value: 5Gi\n required: true\n \nobjects:\n- kind: "ImageStream"\n apiVersion: image.openshift.io/v1\n metadata:\n name: ${APPLICATION_NAME}\n labels:\n app: ${APPLICATION_NAME}\n spec:\n tags:\n - name: latest\n from:\n kind: DockerImage\n name: ${APPLICATION_IMAGE}\n lookupPolicy:\n local: true\n\n- kind: "PersistentVolumeClaim"\n apiVersion: "v1"\n metadata:\n name: ${APPLICATION_NAME}\n labels:\n app: ${APPLICATION_NAME}\n spec:\n accessModes:\n - "ReadWriteMany"\n resources:\n requests:\n storage: ${STORAGE_SIZE}\n\n- kind: "Secret"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: ${APPLICATION_NAME}\n stringData:\n application-password: "${PASSWORD}"\n\n- kind: "DeploymentConfig"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: "${APPLICATION_NAME}"\n spec:\n replicas: 1\n strategy:\n type: Recreate\n triggers:\n - type: ConfigChange\n - type: ImageChange\n imageChangeParams:\n automatic: true\n containerNames:\n - jupyter-notebook\n from:\n kind: ImageStreamTag\n name: ${APPLICATION_NAME}:latest\n selector:\n app: "${APPLICATION_NAME}"\n deploymentconfig: "${APPLICATION_NAME}"\n\n template:\n metadata:\n labels:\n app: "${APPLICATION_NAME}"\n deploymentconfig: "${APPLICATION_NAME}"\n spec:\n serviceAccountName: "anyuid"\n containers:\n - name: jupyter-notebook\n image: "${APPLICATION_NAME}:latest"\n command:\n - "start-notebook.sh"\n - "--no-browser"\n - "--ip=0.0.0.0"\n ports:\n - containerPort: 8888\n protocol: TCP\n\n env:\n - name: "JUPYTER_TOKEN"\n valueFrom:\n secretKeyRef:\n key: application-password\n name: "${APPLICATION_NAME}"\n - name: JUPYTER_ENABLE_LAB\n value: "yes"\n - name: GRANT_SUDO\n value: "yes"\n\n volumeMounts:\n - name: data\n mountPath: "/home/jovyan"\n volumes:\n - name: data\n persistentVolumeClaim:\n claimName: "${APPLICATION_NAME}"\n\n securityContext:\n runAsUser: 0\n supplementalGroups:\n - 100\n automountServiceAccountToken: false\n\n- kind: "Service"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: ${APPLICATION_NAME}\n spec:\n ports:\n - name: 8888-tcp\n protocol: TCP\n port: 8888\n targetPort: 8888\n selector:\n app: ${APPLICATION_NAME}\n deploymentconfig: "${APPLICATION_NAME}"\n type: ClusterIP\n\n- kind: "Route"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: ${APPLICATION_NAME}\n spec:\n host: \'\'\n to:\n kind: Service\n name: "${APPLICATION_NAME}"\n weight: 100\n port:\n targetPort: 8888-tcp\n tls:\n termination: edge\n insecureEdgeTerminationPolicy: Redirect\n')),(0,i.yg)("h2",{id:"add-a-configuration-file"},"Add a configuration file"),(0,i.yg)("p",null,"This practice is more advanced, and is not required for most deployments, but you can easily create a ",(0,i.yg)("strong",{parentName:"p"},"ConfigMap")," object to define any file to be provided at runtime to the application."),(0,i.yg)("p",null,"For example here we are going to define a python script that will be run when starting JupyterLab (",(0,i.yg)("inlineCode",{parentName:"p"},"jupyter_notebook_config.py"),"). It will clone the git repository URL, provided by the user when creating the template, at the start of JupyterLab in the workspace. If this repo contains files with list of packages in the root folder (",(0,i.yg)("inlineCode",{parentName:"p"},"requirements.txt")," and ",(0,i.yg)("inlineCode",{parentName:"p"},"packages.txt"),"), they will be installed at start"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},"- kind: ConfigMap\n apiVersion: v1\n metadata:\n name: \"${APPLICATION_NAME}-cfg\"\n labels:\n app: \"${APPLICATION_NAME}\"\n data:\n # Clone git repo, then install requirements.txt and packages.txt\n jupyter_notebook_config.py: |\n import os\n git_url = os.environ.get('GIT_URL')\n home_dir = os.environ.get('HOME')\n os.chdir(home_dir)\n if git_url:\n repo_id = git_url.rsplit('/', 1)[-1]\n os.system('git clone --quiet --recursive ' + git_url)\n os.chdir(repo_id)\n if os.path.exists('packages.txt'):\n os.system('sudo apt-get update')\n os.system('cat packages.txt | xargs sudo apt-get install -y')\n if os.path.exists('requirements.txt'):\n os.system('pip install -r requirements.txt')\n os.chdir(home_dir)\n")),(0,i.yg)("p",null,"We will then need to mount this config file like a persistent volume in the path we want it to be (here ",(0,i.yg)("inlineCode",{parentName:"p"},"/etc/jupyter/openshift"),"), change the ",(0,i.yg)("strong",{parentName:"p"},"volumes")," and ",(0,i.yg)("strong",{parentName:"p"},"volumeMounts")," of your ",(0,i.yg)("strong",{parentName:"p"},"DeploymentConfig"),":"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' volumeMounts:\n - name: data\n mountPath: "/home/jovyan"\n - name: configs\n mountPath: "/etc/jupyter/openshift"\n automountServiceAccountToken: false\n volumes:\n - name: data\n persistentVolumeClaim:\n claimName: "${APPLICATION_NAME}"\n - name: configs\n configMap:\n name: "${APPLICATION_NAME}-cfg"\n')),(0,i.yg)("p",null,"Then change the ",(0,i.yg)("inlineCode",{parentName:"p"},"jupyter-notebook")," container start command to include this config file:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' command:\n - "start-notebook.sh"\n - "--no-browser"\n - "--ip=0.0.0.0"\n - "--config=/etc/jupyter/openshift/jupyter_notebook_config.py"\n')),(0,i.yg)("p",null,"Add the ",(0,i.yg)("strong",{parentName:"p"},"optional parameter")," to get the git URL to clone when the user create the template:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},"parameters:\n- name: GIT_URL\n displayName: URL of the git repository to clone (optional)\n required: false\n description: Source code will be automatically cloned, then requirements.txt and packages.txt content will be automatically installed if presents\n")),(0,i.yg)("p",null,"Finally, add the git URL parameter provided by the user as ",(0,i.yg)("strong",{parentName:"p"},"environment variable")," of the container, so that it is picked up by the config script when running at the start of JupyterLab:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' env:\n - name: GIT_URL\n value: "${GIT_URL}"\n')),(0,i.yg)("h2",{id:"add-automated-health-checks"},"Add automated health checks"),(0,i.yg)("p",null,"You can add ",(0,i.yg)("strong",{parentName:"p"},"readiness and liveness probes")," to a container to automatically check if the web application is up and ready. This will allow to wait for the JupyterLab web UI to be accessible before showing the application as ready in the Topology. Useful if you are cloning a repository and installing packages, which will take more time to start JupyterLab."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"}," containers:\n - name: jupyter-notebook\n readinessProbe: \n tcpSocket:\n port: 8888\n livenessProbe: \n initialDelaySeconds: 15 \n tcpSocket: \n port: 8888 \n failureThreshold: 40\n periodSeconds: 10\n timeoutSeconds: 2\n")),(0,i.yg)("p",null,"Checkout the ",(0,i.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.6/applications/application-health.html"},"OpenShift Application health documentation")," for more details."),(0,i.yg)("h2",{id:"define-resource-limits"},"Define resource limits"),(0,i.yg)("p",null,"You can also define resources request and limits for each ",(0,i.yg)("strong",{parentName:"p"},"DeploymentConfig"),", in ",(0,i.yg)("inlineCode",{parentName:"p"},"spec:")),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' spec:\n resources:\n requests: \n cpu: "1"\n memory: "2Gi"\n limits:\n cpu: "128"\n memory: "300Gi"\n')),(0,i.yg)("h2",{id:"build-your-own-application-template"},"Build your own application template"),(0,i.yg)("p",null,"The easiest way to build a template for a new application is to start from this JupyterLab template:"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},"Replace ",(0,i.yg)("inlineCode",{parentName:"li"},"jupyterlab-root")," by your application name"),(0,i.yg)("li",{parentName:"ul"},"Replace ",(0,i.yg)("inlineCode",{parentName:"li"},"8888")," by your application"),(0,i.yg)("li",{parentName:"ul"},"Change the template and parameters descriptions to match your application"),(0,i.yg)("li",{parentName:"ul"},"Remove the ",(0,i.yg)("inlineCode",{parentName:"li"},"securityContext")," part, and other objects you do not need")),(0,i.yg)("p",null,"If you need to start multiple containers, copy/paste the objects you need to create and edit them"))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[3360],{5680:(e,t,n)=>{n.d(t,{xA:()=>c,yg:()=>d});var a=n(6540);function o(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function r(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var s=a.createContext({}),p=function(e){var t=a.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},c=function(e){var t=p(e.components);return a.createElement(s.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},m=a.forwardRef((function(e,t){var n=e.components,o=e.mdxType,i=e.originalType,s=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),m=p(n),d=o,y=m["".concat(s,".").concat(d)]||m[d]||u[d]||i;return n?a.createElement(y,r(r({ref:t},c),{},{components:n})):a.createElement(y,r({ref:t},c))}));function d(e,t){var n=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var i=n.length,r=new Array(i);r[0]=m;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:o,r[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>d,frontMatter:()=>l,metadata:()=>p,toc:()=>u});var a=n(9668),o=n(1367),i=(n(6540),n(5680)),r=["components"],l={id:"anatomy-of-an-application",title:"Anatomy of a DSRI application"},s=void 0,p={unversionedId:"anatomy-of-an-application",id:"anatomy-of-an-application",title:"Anatomy of a DSRI application",description:"This page will present you how an applications is typically built using an OpenShift template. This will also help you understand more in general the different objects that needs to be defined when deploying an application on a Kubernetes cluster. Even if OpenShift templates can only be deployed to OpenShift, the objects they define are the same as in Kubernetes (apart from the Route which becomes Ingress).",source:"@site/docs/anatomy-of-an-application.md",sourceDirName:".",slug:"/anatomy-of-an-application",permalink:"/docs/anatomy-of-an-application",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/anatomy-of-an-application.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"anatomy-of-an-application",title:"Anatomy of a DSRI application"},sidebar:"docs",previous:{title:"Deploy from a Docker image",permalink:"/docs/deploy-from-docker"},next:{title:"Install from Helm charts",permalink:"/docs/helm"}},c={},u=[{value:"Application walkthrough",id:"application-walkthrough",level:2},{value:"Parameters",id:"parameters",level:3},{value:"Image",id:"image",level:3},{value:"Create storage",id:"create-storage",level:3},{value:"Secret",id:"secret",level:3},{value:"Deployment",id:"deployment",level:3},{value:"Pod spec",id:"pod-spec",level:3},{value:"Environment variables in the container",id:"environment-variables-in-the-container",level:3},{value:"Mount storage",id:"mount-storage",level:3},{value:"Security context",id:"security-context",level:3},{value:"Service",id:"service",level:3},{value:"Route",id:"route",level:3},{value:"The complete application",id:"the-complete-application",level:2},{value:"Add a configuration file",id:"add-a-configuration-file",level:2},{value:"Add automated health checks",id:"add-automated-health-checks",level:2},{value:"Define resource limits",id:"define-resource-limits",level:2},{value:"Build your own application template",id:"build-your-own-application-template",level:2}],m={toc:u};function d(e){var t=e.components,n=(0,o.A)(e,r);return(0,i.yg)("wrapper",(0,a.A)({},m,n,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("p",null,"This page will present you how an applications is typically built using an ",(0,i.yg)("strong",{parentName:"p"},"OpenShift template"),". This will also help you understand more in general the different objects that needs to be defined when ",(0,i.yg)("strong",{parentName:"p"},"deploying an application on a Kubernetes cluster"),". Even if OpenShift templates can only be deployed to OpenShift, the objects they define are the same as in Kubernetes (apart from the Route which becomes Ingress)."),(0,i.yg)("p",null,"There are other ways to describe applications on OpenShift cluster (here the DSRI), such as Helm or Operators. But OpenShift templates are the easiest and quickest way to build an application that can be deployed from the DSRI web UI catalog in a few clicks, and by providing a few parameters."),(0,i.yg)("p",null,"It is better to have a basic understanding of what a docker container is to fully understand this walkthrough, but it should already gives a good idea of the different objects deployed with each DSRI application."),(0,i.yg)("p",null,"We will use the template used to deploy JupyterLab as example, and we will describe the goal, importance and caveats of each parts of the application definition. But the same template and instructions can be easily reused for other applications with a web UI to access."),(0,i.yg)("p",null,"Checkout the ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/blob/master/applications/templates/template-jupyterlab-root.yml"},"complete JupyterLab template here")," (it will be slightly different with a bit more comments, but there are globally the same)"),(0,i.yg)("p",null,"You will see that deploying on Kubernetes (and by extension, here OpenShift), is just about defining objects in a YAML file, like a complex ",(0,i.yg)("inlineCode",{parentName:"p"},"docker-compose.yml")," file. "),(0,i.yg)("admonition",{title:"Do you got what it takes?",type:"info"},(0,i.yg)("p",{parentName:"admonition"},"The amount of objects might seems a bit overwhelming at first, but this is what it takes to automatically deploy a complex application on a large cluster, automatically available through a generated URL, with ",(0,i.yg)("inlineCode",{parentName:"p"},"HTTPS")," encryption to protect your passwords when you log to a web UI!")),(0,i.yg)("h2",{id:"application-walkthrough"},"Application walkthrough"),(0,i.yg)("p",null,"First, you need to create your ",(0,i.yg)("strong",{parentName:"p"},"Template")," objects, this will be the main object we will create here as all other objects defined will be deployed by this template. "),(0,i.yg)("p",null,"In this part we mainly just provide the description and information that will be shown to users when deploying the application from the DSRI web UI catalog."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},"---\nkind: Template\napiVersion: template.openshift.io/v1\nlabels:\n template: jupyterlab-root\nmetadata:\n name: jupyterlab-root\n annotations:\n openshift.io/display-name: JupyterLab\n description: |-\n Start JupyterLab images as the `jovyan` user, with sudo privileges to install anything you need. \n \ud83d\udcc2 Use the `/home/jovyan` folder (workspace of the JupyterLab UI) to store your data in the persistent storage automatically created\n You can find the persistent storage in the DSRI web UI, go to Administrator view > Storage > Persistent Volume Claims\n You can use any image based on the official Jupyter docker stack https://github.com/jupyter/docker-stacks\n - jupyter/tensorflow-notebook\n - jupyter/r-notebook\n - jupyter/all-spark-notebook\n - ghcr.io/maastrichtu-ids/jupyterlab (with Java and SPARQL kernels)\n Or build your own! Checkout https://github.com/MaastrichtU-IDS/jupyterlab for an example of custom image\n Once JupyterLab is deployed you can install any pip packages, JupyterLab extensions, and apt packages.\n iconClass: icon-python\n tags: python,jupyter,notebook\n openshift.io/provider-display-name: Institute of Data Science, UM\n openshift.io/documentation-url: https://maastrichtu-ids.github.io/dsri-documentation/docs/deploy-jupyter\n openshift.io/support-url: https://maastrichtu-ids.github.io/dsri-documentation/help\n")),(0,i.yg)("h3",{id:"parameters"},"Parameters"),(0,i.yg)("p",null,"Then define the ",(0,i.yg)("strong",{parentName:"p"},"parameters")," the user will be able to define in the DSRI catalog web UI when instantiating the application. ",(0,i.yg)("inlineCode",{parentName:"p"},"APPLICATION_NAME")," is the most important as it will be used everywhere to create the objects and identify the application."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},"parameters:\n- name: APPLICATION_NAME\n displayName: Name for the application\n description: Must be without spaces (use -), and unique in the project.\n value: jupyterlab\n required: true\n- name: PASSWORD\n displayName: JupyterLab UI Password\n description: The password/token to access the JupyterLab web UI\n required: true\n- name: APPLICATION_IMAGE\n displayName: Jupyter notebook Docker image\n value: ghcr.io/maastrichtu-ids/jupyterlab:latest\n required: true\n description: You can use any image based on https://github.com/jupyter/docker-stacks\n- name: STORAGE_SIZE\n displayName: Storage size\n description: Size of the storage allocated to the notebook persistent storage in `/home/jovyan`.\n value: 5Gi\n required: true\n")),(0,i.yg)("p",null,"We can then refer to those parameters value (filled by the users of the template) in the rest of the template using this syntax: ",(0,i.yg)("inlineCode",{parentName:"p"},"${APPLICATION_NAME}")),(0,i.yg)("p",null,"We will now ",(0,i.yg)("strong",{parentName:"p"},"describe all objects deployed")," when we instantiate this template (to start an application). "),(0,i.yg)("h3",{id:"image"},"Image"),(0,i.yg)("p",null,"First we define the ",(0,i.yg)("strong",{parentName:"p"},"ImageStream")," object to import the Docker image(s) of your application(s) on the DSRI cluster"),(0,i.yg)("p",null,"Setting the ",(0,i.yg)("inlineCode",{parentName:"p"},"importPolicy: scheduled")," to ",(0,i.yg)("inlineCode",{parentName:"p"},"true")," will have the DSRI to automatically check for new version of this image, which can be useful if you want to always have the latest published version of an applications. Visit the ",(0,i.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.6/openshift_images/image-streams-manage.html"},"OpenShift ImageStreams documentation")," for more details. Be careful as enabling this feature without real need will cause the DSRI to query DockerHub more, which might require you to login to DockerHub to increase your pull request quota."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'objects:\n- kind: "ImageStream"\n apiVersion: image.openshift.io/v1\n metadata:\n name: ${APPLICATION_NAME}\n labels:\n app: ${APPLICATION_NAME}\n spec:\n tags:\n - name: latest\n from:\n kind: DockerImage\n name: ${APPLICATION_IMAGE}\n importPolicy:\n scheduled: true\n lookupPolicy:\n local: true\n')),(0,i.yg)("h3",{id:"create-storage"},"Create storage"),(0,i.yg)("p",null,"Then we define the ",(0,i.yg)("strong",{parentName:"p"},"PersistentVolumeClaim"),", which is a persistent storage on which we will mount the ",(0,i.yg)("inlineCode",{parentName:"p"},"/home/jovyan")," folder to avoid loosing data if our application is restarted."),(0,i.yg)("p",null,"Any file outside of a persistent volume can be lost at any moment if the pod restart, usually it only consists in temporary file if you are properly working in the persistent volume folder. This can be useful also if your application is crashing, stopping and restarting your pod (application) might fix it."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'- kind: "PersistentVolumeClaim"\n apiVersion: "v1"\n metadata:\n name: ${APPLICATION_NAME}\n labels:\n app: ${APPLICATION_NAME}\n spec:\n accessModes:\n - "ReadWriteMany"\n resources:\n requests:\n storage: ${STORAGE_SIZE}\n')),(0,i.yg)("h3",{id:"secret"},"Secret"),(0,i.yg)("p",null,"Then the ",(0,i.yg)("strong",{parentName:"p"},"Secret")," to store the password"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'- kind: "Secret"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: ${APPLICATION_NAME}\n stringData:\n application-password: "${PASSWORD}"\n')),(0,i.yg)("h3",{id:"deployment"},"Deployment"),(0,i.yg)("p",null,"Then the ",(0,i.yg)("strong",{parentName:"p"},"DeploymentConfig")," (aka. Deployment) define how to deploy the JupyterLab image, if you want to deploy another application alongside JupyterLab you can do it by adding as many deployments as you want! (and use the same, or different, persistent volume claims for storage). Checkout the ",(0,i.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.6/applications/deployments/what-deployments-are.html"},"OpenShift Deployments documentation")," for more details."),(0,i.yg)("p",null,"In this first block we will define the strategy to update and recreate our applications if you change the YAML configuration, or when a new latest docker image is updated, allowing your service to always use the latest up-to-date version of a software without any intervention from you. "),(0,i.yg)("p",null,"We chose the ",(0,i.yg)("inlineCode",{parentName:"p"},"Recreate")," release option to make sure the container is properly recreated and avoid unnecessary resources consumption, but you can also use ",(0,i.yg)("inlineCode",{parentName:"p"},"Rolling")," to have a downtime free transition between deployments."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'- kind: "DeploymentConfig"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: "${APPLICATION_NAME}"\n spec:\n replicas: 1\n strategy:\n type: "Recreate"\n triggers:\n - type: "ConfigChange"\n - type: "ImageChange"\n imageChangeParams:\n automatic: true\n containerNames:\n - jupyter-notebook\n from:\n kind: ImageStreamTag\n name: ${APPLICATION_NAME}:latest\n selector:\n app: "${APPLICATION_NAME}"\n deploymentconfig: "${APPLICATION_NAME}"\n')),(0,i.yg)("h3",{id:"pod-spec"},"Pod spec"),(0,i.yg)("p",null,"Then we define the spec of the ",(0,i.yg)("strong",{parentName:"p"},"pod")," that will be deployed by this DeploymentConfig."),(0,i.yg)("p",null,"Setting the ",(0,i.yg)("inlineCode",{parentName:"p"},"serviceAccountName: anyuid")," is required for most Docker containers as it allows to run a container using any user ID (e.g. root). Otherwise OpenShift expect to use a random user ID, which is require to build the Docker image especially to work with random user IDs."),(0,i.yg)("p",null,"We then create the ",(0,i.yg)("inlineCode",{parentName:"p"},"containers:")," array which is where we will define the containers deployed in the pod. It is recommended to deploy 1 container per pod, as it enables a better separation and management of the applications, apart if you know what you are doing. You can also provide the command to run at the start of the container to overwrite the default one, and define the exposed ports (here 8080)."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' template:\n metadata:\n labels:\n app: "${APPLICATION_NAME}"\n deploymentconfig: "${APPLICATION_NAME}"\n spec:\n serviceAccountName: "anyuid"\n containers:\n - name: "jupyter-notebook"\n image: "${APPLICATION_NAME}:latest"\n command:\n - "start-notebook.sh"\n - "--no-browser"\n - "--ip=0.0.0.0"\n ports:\n - containerPort: 8888\n protocol: TCP\n')),(0,i.yg)("h3",{id:"environment-variables-in-the-container"},"Environment variables in the container"),(0,i.yg)("p",null,"Then define the ",(0,i.yg)("strong",{parentName:"p"},"environment variables")," used in your container, usually the password and most parameters are set here, such as enabling ",(0,i.yg)("inlineCode",{parentName:"p"},"sudo")," in the container."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' env:\n - name: JUPYTER_TOKEN\n valueFrom:\n secretKeyRef:\n key: "application-password"\n name: "${APPLICATION_NAME}"\n - name: JUPYTER_ENABLE_LAB\n value: "yes"\n - name: GRANT_SUDO\n value: "yes"\n')),(0,i.yg)("h3",{id:"mount-storage"},"Mount storage"),(0,i.yg)("p",null,"Then we need to mount the previously created ",(0,i.yg)("strong",{parentName:"p"},"PersistentVolume")," on ",(0,i.yg)("inlineCode",{parentName:"p"},"/home/jovyan")," , the workspace of JupyterLab. Be careful: ",(0,i.yg)("inlineCode",{parentName:"p"},"volumeMounts")," is in the ",(0,i.yg)("inlineCode",{parentName:"p"},"containers:")," object, and ",(0,i.yg)("inlineCode",{parentName:"p"},"volumes")," is defined in the ",(0,i.yg)("inlineCode",{parentName:"p"},"spec:")," object"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' volumeMounts:\n - name: data\n mountPath: "/home/jovyan"\n volumes:\n - name: data\n persistentVolumeClaim:\n claimName: "${APPLICATION_NAME}"\n')),(0,i.yg)("h3",{id:"security-context"},"Security context"),(0,i.yg)("p",null,"Then we define the ",(0,i.yg)("strong",{parentName:"p"},"securityContext")," to allow JupyterLab to run as root, this is ",(0,i.yg)("strong",{parentName:"p"},"not required for most applications"),", just a specificity of the official Jupyter images to run with root privileges."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"}," securityContext:\n runAsUser: 0\n supplementalGroups:\n - 100\n automountServiceAccountToken: false\n")),(0,i.yg)("h3",{id:"service"},"Service"),(0,i.yg)("p",null,"Then we create the ",(0,i.yg)("strong",{parentName:"p"},"Service")," to expose the port 8888 of our JupyterLab container on the project network. This means that the JupyterLab web UI will reachable by all other application deployed in your project using its application name as hostname (e.g. ",(0,i.yg)("inlineCode",{parentName:"p"},"jupyterlab"),")"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'- kind: "Service"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: ${APPLICATION_NAME}\n spec:\n ports:\n - name: 8888-tcp\n protocol: TCP\n port: 8888\n targetPort: 8888\n selector:\n app: ${APPLICATION_NAME}\n deploymentconfig: "${APPLICATION_NAME}"\n type: ClusterIP\n')),(0,i.yg)("h3",{id:"route"},"Route"),(0,i.yg)("p",null,"Finally, we define the ",(0,i.yg)("strong",{parentName:"p"},"Route")," which will automatically generate a URL for the service of your application based following this template: ",(0,i.yg)("inlineCode",{parentName:"p"},"APPLICATION_NAME-PROJECT_ID-DSRI_URL")),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'- kind: "Route"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: ${APPLICATION_NAME}\n spec:\n host: \'\'\n to:\n kind: Service\n name: "${APPLICATION_NAME}"\n weight: 100\n port:\n targetPort: 8888-tcp\n tls:\n termination: edge\n insecureEdgeTerminationPolicy: Redirect\n')),(0,i.yg)("h2",{id:"the-complete-application"},"The complete application"),(0,i.yg)("p",null,"Here is a complete file to describe the JupyterLab deployment template, you can add it to your project catalog by going to ",(0,i.yg)("strong",{parentName:"p"},"+Add")," in the DSRI web UI, then click on the option to add a ",(0,i.yg)("strong",{parentName:"p"},"YAML")," file content, and copy paste the template YAML."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},'---\nkind: Template\napiVersion: template.openshift.io/v1\nlabels:\n template: jupyterlab-root\nmetadata:\n name: jupyterlab-root\n annotations:\n openshift.io/display-name: JupyterLab\n description: |-\n Start JupyterLab images as the `jovyan` user, with sudo privileges to install anything you need. \n \ud83d\udcc2 Use the `/home/jovyan` folder (workspace of the JupyterLab UI) to store your data in the persistent storage automatically created\n You can find the persistent storage in the DSRI web UI, go to Administrator view > Storage > Persistent Volume Claims\n You can use any image based on the official Jupyter docker stack https://github.com/jupyter/docker-stacks\n - jupyter/tensorflow-notebook\n - jupyter/r-notebook\n - jupyter/all-spark-notebook\n - ghcr.io/maastrichtu-ids/jupyterlab (with Java and SPARQL kernels)\n Or build your own! Checkout https://github.com/MaastrichtU-IDS/jupyterlab for an example of custom image\n Once JupyterLab is deployed you can install any pip packages, JupyterLab extensions, and apt packages.\n iconClass: icon-python\n tags: python,jupyter,notebook\n openshift.io/provider-display-name: Institute of Data Science, UM\n openshift.io/documentation-url: https://maastrichtu-ids.github.io/dsri-documentation/docs/deploy-jupyter\n openshift.io/support-url: https://maastrichtu-ids.github.io/dsri-documentation/help\n \nparameters:\n- name: APPLICATION_NAME\n displayName: Name for the application\n description: Must be without spaces (use -), and unique in the project.\n value: jupyterlab\n required: true\n- name: PASSWORD\n displayName: JupyterLab UI Password\n description: The password/token to access the JupyterLab web UI\n required: true\n- name: APPLICATION_IMAGE\n displayName: Jupyter notebook Docker image\n value: ghcr.io/maastrichtu-ids/jupyterlab:latest\n required: true\n description: You can use any image based on https://github.com/jupyter/docker-stacks\n- name: STORAGE_SIZE\n displayName: Storage size\n description: Size of the storage allocated to the notebook persistent storage in `/home/jovyan`.\n value: 5Gi\n required: true\n \nobjects:\n- kind: "ImageStream"\n apiVersion: image.openshift.io/v1\n metadata:\n name: ${APPLICATION_NAME}\n labels:\n app: ${APPLICATION_NAME}\n spec:\n tags:\n - name: latest\n from:\n kind: DockerImage\n name: ${APPLICATION_IMAGE}\n lookupPolicy:\n local: true\n\n- kind: "PersistentVolumeClaim"\n apiVersion: "v1"\n metadata:\n name: ${APPLICATION_NAME}\n labels:\n app: ${APPLICATION_NAME}\n spec:\n accessModes:\n - "ReadWriteMany"\n resources:\n requests:\n storage: ${STORAGE_SIZE}\n\n- kind: "Secret"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: ${APPLICATION_NAME}\n stringData:\n application-password: "${PASSWORD}"\n\n- kind: "DeploymentConfig"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: "${APPLICATION_NAME}"\n spec:\n replicas: 1\n strategy:\n type: Recreate\n triggers:\n - type: ConfigChange\n - type: ImageChange\n imageChangeParams:\n automatic: true\n containerNames:\n - jupyter-notebook\n from:\n kind: ImageStreamTag\n name: ${APPLICATION_NAME}:latest\n selector:\n app: "${APPLICATION_NAME}"\n deploymentconfig: "${APPLICATION_NAME}"\n\n template:\n metadata:\n labels:\n app: "${APPLICATION_NAME}"\n deploymentconfig: "${APPLICATION_NAME}"\n spec:\n serviceAccountName: "anyuid"\n containers:\n - name: jupyter-notebook\n image: "${APPLICATION_NAME}:latest"\n command:\n - "start-notebook.sh"\n - "--no-browser"\n - "--ip=0.0.0.0"\n ports:\n - containerPort: 8888\n protocol: TCP\n\n env:\n - name: "JUPYTER_TOKEN"\n valueFrom:\n secretKeyRef:\n key: application-password\n name: "${APPLICATION_NAME}"\n - name: JUPYTER_ENABLE_LAB\n value: "yes"\n - name: GRANT_SUDO\n value: "yes"\n\n volumeMounts:\n - name: data\n mountPath: "/home/jovyan"\n volumes:\n - name: data\n persistentVolumeClaim:\n claimName: "${APPLICATION_NAME}"\n\n securityContext:\n runAsUser: 0\n supplementalGroups:\n - 100\n automountServiceAccountToken: false\n\n- kind: "Service"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: ${APPLICATION_NAME}\n spec:\n ports:\n - name: 8888-tcp\n protocol: TCP\n port: 8888\n targetPort: 8888\n selector:\n app: ${APPLICATION_NAME}\n deploymentconfig: "${APPLICATION_NAME}"\n type: ClusterIP\n\n- kind: "Route"\n apiVersion: v1\n metadata:\n name: "${APPLICATION_NAME}"\n labels:\n app: ${APPLICATION_NAME}\n spec:\n host: \'\'\n to:\n kind: Service\n name: "${APPLICATION_NAME}"\n weight: 100\n port:\n targetPort: 8888-tcp\n tls:\n termination: edge\n insecureEdgeTerminationPolicy: Redirect\n')),(0,i.yg)("h2",{id:"add-a-configuration-file"},"Add a configuration file"),(0,i.yg)("p",null,"This practice is more advanced, and is not required for most deployments, but you can easily create a ",(0,i.yg)("strong",{parentName:"p"},"ConfigMap")," object to define any file to be provided at runtime to the application."),(0,i.yg)("p",null,"For example here we are going to define a python script that will be run when starting JupyterLab (",(0,i.yg)("inlineCode",{parentName:"p"},"jupyter_notebook_config.py"),"). It will clone the git repository URL, provided by the user when creating the template, at the start of JupyterLab in the workspace. If this repo contains files with list of packages in the root folder (",(0,i.yg)("inlineCode",{parentName:"p"},"requirements.txt")," and ",(0,i.yg)("inlineCode",{parentName:"p"},"packages.txt"),"), they will be installed at start"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},"- kind: ConfigMap\n apiVersion: v1\n metadata:\n name: \"${APPLICATION_NAME}-cfg\"\n labels:\n app: \"${APPLICATION_NAME}\"\n data:\n # Clone git repo, then install requirements.txt and packages.txt\n jupyter_notebook_config.py: |\n import os\n git_url = os.environ.get('GIT_URL')\n home_dir = os.environ.get('HOME')\n os.chdir(home_dir)\n if git_url:\n repo_id = git_url.rsplit('/', 1)[-1]\n os.system('git clone --quiet --recursive ' + git_url)\n os.chdir(repo_id)\n if os.path.exists('packages.txt'):\n os.system('sudo apt-get update')\n os.system('cat packages.txt | xargs sudo apt-get install -y')\n if os.path.exists('requirements.txt'):\n os.system('pip install -r requirements.txt')\n os.chdir(home_dir)\n")),(0,i.yg)("p",null,"We will then need to mount this config file like a persistent volume in the path we want it to be (here ",(0,i.yg)("inlineCode",{parentName:"p"},"/etc/jupyter/openshift"),"), change the ",(0,i.yg)("strong",{parentName:"p"},"volumes")," and ",(0,i.yg)("strong",{parentName:"p"},"volumeMounts")," of your ",(0,i.yg)("strong",{parentName:"p"},"DeploymentConfig"),":"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' volumeMounts:\n - name: data\n mountPath: "/home/jovyan"\n - name: configs\n mountPath: "/etc/jupyter/openshift"\n automountServiceAccountToken: false\n volumes:\n - name: data\n persistentVolumeClaim:\n claimName: "${APPLICATION_NAME}"\n - name: configs\n configMap:\n name: "${APPLICATION_NAME}-cfg"\n')),(0,i.yg)("p",null,"Then change the ",(0,i.yg)("inlineCode",{parentName:"p"},"jupyter-notebook")," container start command to include this config file:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' command:\n - "start-notebook.sh"\n - "--no-browser"\n - "--ip=0.0.0.0"\n - "--config=/etc/jupyter/openshift/jupyter_notebook_config.py"\n')),(0,i.yg)("p",null,"Add the ",(0,i.yg)("strong",{parentName:"p"},"optional parameter")," to get the git URL to clone when the user create the template:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},"parameters:\n- name: GIT_URL\n displayName: URL of the git repository to clone (optional)\n required: false\n description: Source code will be automatically cloned, then requirements.txt and packages.txt content will be automatically installed if presents\n")),(0,i.yg)("p",null,"Finally, add the git URL parameter provided by the user as ",(0,i.yg)("strong",{parentName:"p"},"environment variable")," of the container, so that it is picked up by the config script when running at the start of JupyterLab:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' env:\n - name: GIT_URL\n value: "${GIT_URL}"\n')),(0,i.yg)("h2",{id:"add-automated-health-checks"},"Add automated health checks"),(0,i.yg)("p",null,"You can add ",(0,i.yg)("strong",{parentName:"p"},"readiness and liveness probes")," to a container to automatically check if the web application is up and ready. This will allow to wait for the JupyterLab web UI to be accessible before showing the application as ready in the Topology. Useful if you are cloning a repository and installing packages, which will take more time to start JupyterLab."),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"}," containers:\n - name: jupyter-notebook\n readinessProbe: \n tcpSocket:\n port: 8888\n livenessProbe: \n initialDelaySeconds: 15 \n tcpSocket: \n port: 8888 \n failureThreshold: 40\n periodSeconds: 10\n timeoutSeconds: 2\n")),(0,i.yg)("p",null,"Checkout the ",(0,i.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.6/applications/application-health.html"},"OpenShift Application health documentation")," for more details."),(0,i.yg)("h2",{id:"define-resource-limits"},"Define resource limits"),(0,i.yg)("p",null,"You can also define resources request and limits for each ",(0,i.yg)("strong",{parentName:"p"},"DeploymentConfig"),", in ",(0,i.yg)("inlineCode",{parentName:"p"},"spec:")),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},' spec:\n resources:\n requests: \n cpu: "1"\n memory: "2Gi"\n limits:\n cpu: "128"\n memory: "300Gi"\n')),(0,i.yg)("h2",{id:"build-your-own-application-template"},"Build your own application template"),(0,i.yg)("p",null,"The easiest way to build a template for a new application is to start from this JupyterLab template:"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},"Replace ",(0,i.yg)("inlineCode",{parentName:"li"},"jupyterlab-root")," by your application name"),(0,i.yg)("li",{parentName:"ul"},"Replace ",(0,i.yg)("inlineCode",{parentName:"li"},"8888")," by your application"),(0,i.yg)("li",{parentName:"ul"},"Change the template and parameters descriptions to match your application"),(0,i.yg)("li",{parentName:"ul"},"Remove the ",(0,i.yg)("inlineCode",{parentName:"li"},"securityContext")," part, and other objects you do not need")),(0,i.yg)("p",null,"If you need to start multiple containers, copy/paste the objects you need to create and edit them"))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/0e4359fd.41168bc6.js b/assets/js/0e4359fd.ba9a5169.js similarity index 99% rename from assets/js/0e4359fd.41168bc6.js rename to assets/js/0e4359fd.ba9a5169.js index df03320be..b7131d4bb 100644 --- a/assets/js/0e4359fd.41168bc6.js +++ b/assets/js/0e4359fd.ba9a5169.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2957],{5680:(e,a,t)=>{t.d(a,{xA:()=>m,yg:()=>g});var l=t(6540);function n(e,a,t){return a in e?Object.defineProperty(e,a,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[a]=t,e}function r(e,a){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);a&&(l=l.filter((function(a){return Object.getOwnPropertyDescriptor(e,a).enumerable}))),t.push.apply(t,l)}return t}function o(e){for(var a=1;a=0||(n[t]=e[t]);return n}(e,a);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(n[t]=e[t])}return n}var i=l.createContext({}),p=function(e){var a=l.useContext(i),t=a;return e&&(t="function"==typeof e?e(a):o(o({},a),e)),t},m=function(e){var a=p(e.components);return l.createElement(i.Provider,{value:a},e.children)},c={inlineCode:"code",wrapper:function(e){var a=e.children;return l.createElement(l.Fragment,{},a)}},h=l.forwardRef((function(e,a){var t=e.components,n=e.mdxType,r=e.originalType,i=e.parentName,m=s(e,["components","mdxType","originalType","parentName"]),h=p(t),g=n,u=h["".concat(i,".").concat(g)]||h[g]||c[g]||r;return t?l.createElement(u,o(o({ref:a},m),{},{components:t})):l.createElement(u,o({ref:a},m))}));function g(e,a){var t=arguments,n=a&&a.mdxType;if("string"==typeof e||n){var r=t.length,o=new Array(r);o[0]=h;var s={};for(var i in a)hasOwnProperty.call(a,i)&&(s[i]=a[i]);s.originalType=e,s.mdxType="string"==typeof e?e:n,o[1]=s;for(var p=2;p{t.r(a),t.d(a,{assets:()=>m,contentTitle:()=>i,default:()=>g,frontMatter:()=>s,metadata:()=>p,toc:()=>c});var l=t(9668),n=t(1367),r=(t(6540),t(5680)),o=["components"],s={id:"helm",title:"Install from Helm charts"},i=void 0,p={unversionedId:"helm",id:"helm",title:"Install from Helm charts",description:"Helm is a popular package manager for Kubernetes. A Helm chart is a bundle of parameterizable YAML resources for Kubernetes/OpenShift.",source:"@site/docs/helm.md",sourceDirName:".",slug:"/helm",permalink:"/docs/helm",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/helm.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"helm",title:"Install from Helm charts"},sidebar:"docs",previous:{title:"Anatomy of a DSRI application",permalink:"/docs/anatomy-of-an-application"},next:{title:"Install from Operators",permalink:"/docs/operators"}},m={},c=[{value:"Install the Helm client",id:"install-the-helm-client",level:2},{value:"Install Golang",id:"install-golang",level:3},{value:"Install Helm",id:"install-helm",level:3},{value:"Install on Linux",id:"install-on-linux",level:4},{value:"Install on MacOS",id:"install-on-macos",level:4},{value:"Install on Windows",id:"install-on-windows",level:4},{value:"Check Helm installation",id:"check-helm-installation",level:3},{value:"Install a Helm chart",id:"install-a-helm-chart",level:2},{value:"Start a MySQL database with Helm",id:"start-a-mysql-database-with-helm",level:3},{value:"Uninstall the application",id:"uninstall-the-application",level:3},{value:"Set deployment parameters",id:"set-deployment-parameters",level:3}],h={toc:c};function g(e){var a=e.components,t=(0,n.A)(e,o);return(0,r.yg)("wrapper",(0,l.A)({},h,t,{components:a,mdxType:"MDXLayout"}),(0,r.yg)("p",null,(0,r.yg)("a",{parentName:"p",href:"https://helm.sh/"},"Helm")," is a popular package manager for ",(0,r.yg)("a",{parentName:"p",href:"https://kubernetes.io/"},"Kubernetes"),". A Helm chart is a bundle of parameterizable YAML resources for Kubernetes/OpenShift."),(0,r.yg)("admonition",{title:"Difference with Operators",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"Helm charts can be defined as ",(0,r.yg)("a",{parentName:"p",href:"/docs/operators"},"Operators")," (if they are packaged using the ",(0,r.yg)("inlineCode",{parentName:"p"},"operator-sdk"),"), but they are not all Operators.")),(0,r.yg)("p",null,"See the official ",(0,r.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.6/cli_reference/helm_cli/getting-started-with-helm-on-openshift-container-platform.html"},"documentation for Helm on OpenShift"),"."),(0,r.yg)("h2",{id:"install-the-helm-client"},"Install the Helm client"),(0,r.yg)("h3",{id:"install-golang"},"Install Golang"),(0,r.yg)("p",null,"Go lang is required to run Helm. Install ",(0,r.yg)("inlineCode",{parentName:"p"},"go 1.14.4")," on Linux, you can find instructions for MacOS, Windows and newer versions at ",(0,r.yg)("a",{parentName:"p",href:"https://golang.org/dl"},"https://golang.org/dl")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},'wget https://dl.google.com/go/go1.14.4.linux-amd64.tar.gz\n\n# Extract to /usr/local\ntar -C /usr/local -xzf go1.14.4.linux-amd64.tar.gz\n\n# Add Go to path in .profile\necho "export PATH=$PATH:/usr/local/go/bin" >> ~/.profile\n# Or in .zshrc if you use ZSH\necho "export PATH=$PATH:/usr/local/go/bin" >> ~/.zshrc\n')),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"Restart your laptop for the changes to take effects or execute ",(0,r.yg)("inlineCode",{parentName:"p"},"source ~/.profile"))),(0,r.yg)("h3",{id:"install-helm"},"Install Helm"),(0,r.yg)("p",null,"You can also use the ",(0,r.yg)("a",{parentName:"p",href:"https://helm.sh/docs/intro/install/"},"official documentation to install Helm")," on your machine."),(0,r.yg)("h4",{id:"install-on-linux"},"Install on Linux"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash\n")),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"See ",(0,r.yg)("a",{parentName:"p",href:"https://helm.sh/docs/intro/install/#from-the-binary-releases"},"Helm documentation for Linux"),".")),(0,r.yg)("h4",{id:"install-on-macos"},"Install on MacOS"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"brew install helm\n")),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"See ",(0,r.yg)("a",{parentName:"p",href:"https://helm.sh/docs/intro/install/#from-homebrew-macos"},"Helm documentation for MacOS"),".")),(0,r.yg)("h4",{id:"install-on-windows"},"Install on Windows"),(0,r.yg)("p",null,"Install using ",(0,r.yg)("a",{parentName:"p",href:"https://chocolatey.org/"},"Chocolatey"),"."),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"choco install kubernetes-helm\n")),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"See ",(0,r.yg)("a",{parentName:"p",href:"https://helm.sh/docs/intro/install/#from-chocolatey-windows"},"Helm documentation for Windows"),".")),(0,r.yg)("h3",{id:"check-helm-installation"},"Check Helm installation"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"helm version\n")),(0,r.yg)("h2",{id:"install-a-helm-chart"},"Install a Helm chart"),(0,r.yg)("p",null,"Explore published Helm charts at ",(0,r.yg)("a",{parentName:"p",href:"https://hub.helm.sh"},"https://hub.helm.sh \u26f5")),(0,r.yg)("h3",{id:"start-a-mysql-database-with-helm"},"Start a MySQL database with Helm"),(0,r.yg)("p",null,"Example from the ",(0,r.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.3/cli_reference/helm_cli/getting-started-with-helm-on-openshift-container-platform.html"},"OpenShift 4.3 documentation"),". See also the ",(0,r.yg)("a",{parentName:"p",href:"https://helm.sh/docs/intro/using_helm/"},"official Helm documentation"),"."),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Add the repository of official Helm charts to your local Helm client:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add stable https://kubernetes-charts.storage.googleapis.com/\n")),(0,r.yg)("ol",{start:2},(0,r.yg)("li",{parentName:"ol"},"Update the repository:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo update\n")),(0,r.yg)("ol",{start:3},(0,r.yg)("li",{parentName:"ol"},"Install an example MySQL chart, and start the application named ",(0,r.yg)("inlineCode",{parentName:"li"},"example-mysql"),":")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm install example-mysql stable/mysql\n")),(0,r.yg)("admonition",{title:"Password",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"The instructions to retrieve the admin password and connect to the database will be displayed in the terminal. "),(0,r.yg)("p",{parentName:"admonition"},"Retrieve the database password with this command (N.B.: ",(0,r.yg)("inlineCode",{parentName:"p"},"kubectl")," can also be used in place of ",(0,r.yg)("inlineCode",{parentName:"p"},"oc"),"):"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'oc get secret example-mysql -o jsonpath="{.data.mysql-root-password}" | base64 --decode; echo\n'))),(0,r.yg)("ol",{start:4},(0,r.yg)("li",{parentName:"ol"},"Verify that the chart has installed successfully:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm list\n")),(0,r.yg)("ol",{start:5},(0,r.yg)("li",{parentName:"ol"},"Expose the MySQL service as a route:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc expose service example-mysql\noc get routes\n")),(0,r.yg)("p",null,"Or port-forward to http://localhost:3306"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc port-forward svc/example-mysql 3306\n")),(0,r.yg)("h3",{id:"uninstall-the-application"},"Uninstall the application"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm uninstall example-mysql\n")),(0,r.yg)("h3",{id:"set-deployment-parameters"},"Set deployment parameters"),(0,r.yg)("p",null,"You can also define deployment parameters when installing a Helm chart, such as the ",(0,r.yg)("strong",{parentName:"p"},"service account")," and ",(0,r.yg)("strong",{parentName:"p"},"node selector"),". "),(0,r.yg)("p",null,"For example, here we make sure the application will run on DSRI CPU nodes and use the ",(0,r.yg)("inlineCode",{parentName:"p"},"anyuid")," service account:"),(0,r.yg)("p",null,"Add Bitnami repository:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add bitnami https://charts.bitnami.com/bitnami\n")),(0,r.yg)("p",null,"Install and start Postgresql:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm install postgresql-db bitnami/postgresql --set nodeSelector.dsri.unimaas.nl/cpu=true --set serviceAccount.name=anyuid\n")))}g.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2957],{5680:(e,a,t)=>{t.d(a,{xA:()=>m,yg:()=>g});var l=t(6540);function n(e,a,t){return a in e?Object.defineProperty(e,a,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[a]=t,e}function r(e,a){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);a&&(l=l.filter((function(a){return Object.getOwnPropertyDescriptor(e,a).enumerable}))),t.push.apply(t,l)}return t}function o(e){for(var a=1;a=0||(n[t]=e[t]);return n}(e,a);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(n[t]=e[t])}return n}var i=l.createContext({}),p=function(e){var a=l.useContext(i),t=a;return e&&(t="function"==typeof e?e(a):o(o({},a),e)),t},m=function(e){var a=p(e.components);return l.createElement(i.Provider,{value:a},e.children)},c={inlineCode:"code",wrapper:function(e){var a=e.children;return l.createElement(l.Fragment,{},a)}},h=l.forwardRef((function(e,a){var t=e.components,n=e.mdxType,r=e.originalType,i=e.parentName,m=s(e,["components","mdxType","originalType","parentName"]),h=p(t),g=n,u=h["".concat(i,".").concat(g)]||h[g]||c[g]||r;return t?l.createElement(u,o(o({ref:a},m),{},{components:t})):l.createElement(u,o({ref:a},m))}));function g(e,a){var t=arguments,n=a&&a.mdxType;if("string"==typeof e||n){var r=t.length,o=new Array(r);o[0]=h;var s={};for(var i in a)hasOwnProperty.call(a,i)&&(s[i]=a[i]);s.originalType=e,s.mdxType="string"==typeof e?e:n,o[1]=s;for(var p=2;p{t.r(a),t.d(a,{assets:()=>m,contentTitle:()=>i,default:()=>g,frontMatter:()=>s,metadata:()=>p,toc:()=>c});var l=t(9668),n=t(1367),r=(t(6540),t(5680)),o=["components"],s={id:"helm",title:"Install from Helm charts"},i=void 0,p={unversionedId:"helm",id:"helm",title:"Install from Helm charts",description:"Helm is a popular package manager for Kubernetes. A Helm chart is a bundle of parameterizable YAML resources for Kubernetes/OpenShift.",source:"@site/docs/helm.md",sourceDirName:".",slug:"/helm",permalink:"/docs/helm",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/helm.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"helm",title:"Install from Helm charts"},sidebar:"docs",previous:{title:"Anatomy of a DSRI application",permalink:"/docs/anatomy-of-an-application"},next:{title:"Install from Operators",permalink:"/docs/operators"}},m={},c=[{value:"Install the Helm client",id:"install-the-helm-client",level:2},{value:"Install Golang",id:"install-golang",level:3},{value:"Install Helm",id:"install-helm",level:3},{value:"Install on Linux",id:"install-on-linux",level:4},{value:"Install on MacOS",id:"install-on-macos",level:4},{value:"Install on Windows",id:"install-on-windows",level:4},{value:"Check Helm installation",id:"check-helm-installation",level:3},{value:"Install a Helm chart",id:"install-a-helm-chart",level:2},{value:"Start a MySQL database with Helm",id:"start-a-mysql-database-with-helm",level:3},{value:"Uninstall the application",id:"uninstall-the-application",level:3},{value:"Set deployment parameters",id:"set-deployment-parameters",level:3}],h={toc:c};function g(e){var a=e.components,t=(0,n.A)(e,o);return(0,r.yg)("wrapper",(0,l.A)({},h,t,{components:a,mdxType:"MDXLayout"}),(0,r.yg)("p",null,(0,r.yg)("a",{parentName:"p",href:"https://helm.sh/"},"Helm")," is a popular package manager for ",(0,r.yg)("a",{parentName:"p",href:"https://kubernetes.io/"},"Kubernetes"),". A Helm chart is a bundle of parameterizable YAML resources for Kubernetes/OpenShift."),(0,r.yg)("admonition",{title:"Difference with Operators",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"Helm charts can be defined as ",(0,r.yg)("a",{parentName:"p",href:"/docs/operators"},"Operators")," (if they are packaged using the ",(0,r.yg)("inlineCode",{parentName:"p"},"operator-sdk"),"), but they are not all Operators.")),(0,r.yg)("p",null,"See the official ",(0,r.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.6/cli_reference/helm_cli/getting-started-with-helm-on-openshift-container-platform.html"},"documentation for Helm on OpenShift"),"."),(0,r.yg)("h2",{id:"install-the-helm-client"},"Install the Helm client"),(0,r.yg)("h3",{id:"install-golang"},"Install Golang"),(0,r.yg)("p",null,"Go lang is required to run Helm. Install ",(0,r.yg)("inlineCode",{parentName:"p"},"go 1.14.4")," on Linux, you can find instructions for MacOS, Windows and newer versions at ",(0,r.yg)("a",{parentName:"p",href:"https://golang.org/dl"},"https://golang.org/dl")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},'wget https://dl.google.com/go/go1.14.4.linux-amd64.tar.gz\n\n# Extract to /usr/local\ntar -C /usr/local -xzf go1.14.4.linux-amd64.tar.gz\n\n# Add Go to path in .profile\necho "export PATH=$PATH:/usr/local/go/bin" >> ~/.profile\n# Or in .zshrc if you use ZSH\necho "export PATH=$PATH:/usr/local/go/bin" >> ~/.zshrc\n')),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"Restart your laptop for the changes to take effects or execute ",(0,r.yg)("inlineCode",{parentName:"p"},"source ~/.profile"))),(0,r.yg)("h3",{id:"install-helm"},"Install Helm"),(0,r.yg)("p",null,"You can also use the ",(0,r.yg)("a",{parentName:"p",href:"https://helm.sh/docs/intro/install/"},"official documentation to install Helm")," on your machine."),(0,r.yg)("h4",{id:"install-on-linux"},"Install on Linux"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash\n")),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"See ",(0,r.yg)("a",{parentName:"p",href:"https://helm.sh/docs/intro/install/#from-the-binary-releases"},"Helm documentation for Linux"),".")),(0,r.yg)("h4",{id:"install-on-macos"},"Install on MacOS"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"brew install helm\n")),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"See ",(0,r.yg)("a",{parentName:"p",href:"https://helm.sh/docs/intro/install/#from-homebrew-macos"},"Helm documentation for MacOS"),".")),(0,r.yg)("h4",{id:"install-on-windows"},"Install on Windows"),(0,r.yg)("p",null,"Install using ",(0,r.yg)("a",{parentName:"p",href:"https://chocolatey.org/"},"Chocolatey"),"."),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"choco install kubernetes-helm\n")),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"See ",(0,r.yg)("a",{parentName:"p",href:"https://helm.sh/docs/intro/install/#from-chocolatey-windows"},"Helm documentation for Windows"),".")),(0,r.yg)("h3",{id:"check-helm-installation"},"Check Helm installation"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"helm version\n")),(0,r.yg)("h2",{id:"install-a-helm-chart"},"Install a Helm chart"),(0,r.yg)("p",null,"Explore published Helm charts at ",(0,r.yg)("a",{parentName:"p",href:"https://hub.helm.sh"},"https://hub.helm.sh \u26f5")),(0,r.yg)("h3",{id:"start-a-mysql-database-with-helm"},"Start a MySQL database with Helm"),(0,r.yg)("p",null,"Example from the ",(0,r.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.3/cli_reference/helm_cli/getting-started-with-helm-on-openshift-container-platform.html"},"OpenShift 4.3 documentation"),". See also the ",(0,r.yg)("a",{parentName:"p",href:"https://helm.sh/docs/intro/using_helm/"},"official Helm documentation"),"."),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Add the repository of official Helm charts to your local Helm client:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add stable https://kubernetes-charts.storage.googleapis.com/\n")),(0,r.yg)("ol",{start:2},(0,r.yg)("li",{parentName:"ol"},"Update the repository:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo update\n")),(0,r.yg)("ol",{start:3},(0,r.yg)("li",{parentName:"ol"},"Install an example MySQL chart, and start the application named ",(0,r.yg)("inlineCode",{parentName:"li"},"example-mysql"),":")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm install example-mysql stable/mysql\n")),(0,r.yg)("admonition",{title:"Password",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"The instructions to retrieve the admin password and connect to the database will be displayed in the terminal. "),(0,r.yg)("p",{parentName:"admonition"},"Retrieve the database password with this command (N.B.: ",(0,r.yg)("inlineCode",{parentName:"p"},"kubectl")," can also be used in place of ",(0,r.yg)("inlineCode",{parentName:"p"},"oc"),"):"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'oc get secret example-mysql -o jsonpath="{.data.mysql-root-password}" | base64 --decode; echo\n'))),(0,r.yg)("ol",{start:4},(0,r.yg)("li",{parentName:"ol"},"Verify that the chart has installed successfully:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm list\n")),(0,r.yg)("ol",{start:5},(0,r.yg)("li",{parentName:"ol"},"Expose the MySQL service as a route:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc expose service example-mysql\noc get routes\n")),(0,r.yg)("p",null,"Or port-forward to http://localhost:3306"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc port-forward svc/example-mysql 3306\n")),(0,r.yg)("h3",{id:"uninstall-the-application"},"Uninstall the application"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm uninstall example-mysql\n")),(0,r.yg)("h3",{id:"set-deployment-parameters"},"Set deployment parameters"),(0,r.yg)("p",null,"You can also define deployment parameters when installing a Helm chart, such as the ",(0,r.yg)("strong",{parentName:"p"},"service account")," and ",(0,r.yg)("strong",{parentName:"p"},"node selector"),". "),(0,r.yg)("p",null,"For example, here we make sure the application will run on DSRI CPU nodes and use the ",(0,r.yg)("inlineCode",{parentName:"p"},"anyuid")," service account:"),(0,r.yg)("p",null,"Add Bitnami repository:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add bitnami https://charts.bitnami.com/bitnami\n")),(0,r.yg)("p",null,"Install and start Postgresql:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"helm install postgresql-db bitnami/postgresql --set nodeSelector.dsri.unimaas.nl/cpu=true --set serviceAccount.name=anyuid\n")))}g.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/11da9ee4.1303f746.js b/assets/js/11da9ee4.0594975c.js similarity index 99% rename from assets/js/11da9ee4.1303f746.js rename to assets/js/11da9ee4.0594975c.js index 995cb3f65..18798ccd6 100644 --- a/assets/js/11da9ee4.1303f746.js +++ b/assets/js/11da9ee4.0594975c.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4809],{5680:(t,e,a)=>{a.d(e,{xA:()=>u,yg:()=>h});var r=a(6540);function n(t,e,a){return e in t?Object.defineProperty(t,e,{value:a,enumerable:!0,configurable:!0,writable:!0}):t[e]=a,t}function o(t,e){var a=Object.keys(t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(t);e&&(r=r.filter((function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable}))),a.push.apply(a,r)}return a}function i(t){for(var e=1;e=0||(n[a]=t[a]);return n}(t,e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(t);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(t,a)&&(n[a]=t[a])}return n}var l=r.createContext({}),s=function(t){var e=r.useContext(l),a=e;return t&&(a="function"==typeof t?t(e):i(i({},e),t)),a},u=function(t){var e=s(t.components);return r.createElement(l.Provider,{value:e},t.children)},c={inlineCode:"code",wrapper:function(t){var e=t.children;return r.createElement(r.Fragment,{},e)}},m=r.forwardRef((function(t,e){var a=t.components,n=t.mdxType,o=t.originalType,l=t.parentName,u=p(t,["components","mdxType","originalType","parentName"]),m=s(a),h=n,d=m["".concat(l,".").concat(h)]||m[h]||c[h]||o;return a?r.createElement(d,i(i({ref:e},u),{},{components:a})):r.createElement(d,i({ref:e},u))}));function h(t,e){var a=arguments,n=e&&e.mdxType;if("string"==typeof t||n){var o=a.length,i=new Array(o);i[0]=m;var p={};for(var l in e)hasOwnProperty.call(e,l)&&(p[l]=e[l]);p.originalType=t,p.mdxType="string"==typeof t?t:n,i[1]=p;for(var s=2;s{a.r(e),a.d(e,{assets:()=>u,contentTitle:()=>l,default:()=>h,frontMatter:()=>p,metadata:()=>s,toc:()=>c});var r=a(9668),n=a(1367),o=(a(6540),a(5680)),i=["components"],p={id:"catalog-opendatahub",title:"OpenDataHub"},l=void 0,s={unversionedId:"catalog-opendatahub",id:"catalog-opendatahub",title:"OpenDataHub",description:"Deploying an OpenDataHub cluster is a work in progress on the DSRI, contact us if you are interested in trying it out.",source:"@site/docs/catalog-opendatahub.md",sourceDirName:".",slug:"/catalog-opendatahub",permalink:"/docs/catalog-opendatahub",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/catalog-opendatahub.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"catalog-opendatahub",title:"OpenDataHub"}},u={},c=[{value:"Components available on DSRI",id:"components-available-on-dsri",level:2},{value:"Start Spark with JupyterHub",id:"start-spark-with-jupyterhub",level:3},{value:"All components",id:"all-components",level:2}],m={toc:c};function h(t){var e=t.components,a=(0,n.A)(t,i);return(0,o.yg)("wrapper",(0,r.A)({},m,a,{components:e,mdxType:"MDXLayout"}),(0,o.yg)("admonition",{title:"Work in progress",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"Deploying an ",(0,o.yg)("a",{parentName:"p",href:"https://opendatahub.io"},"OpenDataHub")," cluster is a ",(0,o.yg)("strong",{parentName:"p"},"work in progress")," on the DSRI, ",(0,o.yg)("strong",{parentName:"p"},(0,o.yg)("a",{parentName:"strong",href:"/help"},"contact us"))," if you are interested in trying it out.")),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://opendatahub.io"},"OpenDataHub")," is a project to orchestrate the deployment of Data Science applications on OpenShift, based on KubeFlow."),(0,o.yg)("h2",{id:"components-available-on-dsri"},"Components available on DSRI"),(0,o.yg)("p",null,"Those components have been tested on the DSRI:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/jupyterhub/README.md"},"JupyterHub")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/radanalyticsio/README.md"},"Spark Operator")," from ",(0,o.yg)("a",{parentName:"li",href:"https://radanalytics.io/"},"radanalytics"))),(0,o.yg)("h3",{id:"start-spark-with-jupyterhub"},"Start Spark with JupyterHub"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Checkout the ",(0,o.yg)("a",{parentName:"p",href:"https://opendatahub.io/docs/getting-started/quick-installation.html"},"official documentation to start an instance of OpenDataHub")," (note that the Operator has already been installed)")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Then visit the ",(0,o.yg)("a",{parentName:"p",href:"https://opendatahub.io/docs/getting-started/basic-tutorial.html"},"documentation to reach the Spark cluster")," from a Jupyter notebook."))),(0,o.yg)("h2",{id:"all-components"},"All components"),(0,o.yg)("p",null,"Here are all the components that can be deployed as part of an ",(0,o.yg)("a",{parentName:"p",href:"https://opendatahub.io"},"OpenDataHub"),":"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/jupyterhub/README.md"},"JupyterHub")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/airflow/README.md"},"Airflow")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/odhargo/README.md"},"Argo")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/grafana/README.md"},"Grafana")," & ",(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/prometheus/README.md"},"Prometheus")," for data/logs visualization"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/radanalyticsio/README.md"},"Spark Operator")," from ",(0,o.yg)("a",{parentName:"li",href:"https://radanalytics.io/"},"radanalytics")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/kafka/README.md"},"Kafka"),"/Strimzi for streaming applications"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/superset/README.md"},"Superset")," for data visualization"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/ai-library/README.md"},"AI Library")," (Seldon to publish AI models)")),(0,o.yg)("p",null,"Let us know if you need help to deploy one of those components on the DSRI."))}h.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4809],{5680:(t,e,a)=>{a.d(e,{xA:()=>u,yg:()=>h});var r=a(6540);function n(t,e,a){return e in t?Object.defineProperty(t,e,{value:a,enumerable:!0,configurable:!0,writable:!0}):t[e]=a,t}function o(t,e){var a=Object.keys(t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(t);e&&(r=r.filter((function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable}))),a.push.apply(a,r)}return a}function i(t){for(var e=1;e=0||(n[a]=t[a]);return n}(t,e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(t);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(t,a)&&(n[a]=t[a])}return n}var l=r.createContext({}),s=function(t){var e=r.useContext(l),a=e;return t&&(a="function"==typeof t?t(e):i(i({},e),t)),a},u=function(t){var e=s(t.components);return r.createElement(l.Provider,{value:e},t.children)},c={inlineCode:"code",wrapper:function(t){var e=t.children;return r.createElement(r.Fragment,{},e)}},m=r.forwardRef((function(t,e){var a=t.components,n=t.mdxType,o=t.originalType,l=t.parentName,u=p(t,["components","mdxType","originalType","parentName"]),m=s(a),h=n,d=m["".concat(l,".").concat(h)]||m[h]||c[h]||o;return a?r.createElement(d,i(i({ref:e},u),{},{components:a})):r.createElement(d,i({ref:e},u))}));function h(t,e){var a=arguments,n=e&&e.mdxType;if("string"==typeof t||n){var o=a.length,i=new Array(o);i[0]=m;var p={};for(var l in e)hasOwnProperty.call(e,l)&&(p[l]=e[l]);p.originalType=t,p.mdxType="string"==typeof t?t:n,i[1]=p;for(var s=2;s{a.r(e),a.d(e,{assets:()=>u,contentTitle:()=>l,default:()=>h,frontMatter:()=>p,metadata:()=>s,toc:()=>c});var r=a(9668),n=a(1367),o=(a(6540),a(5680)),i=["components"],p={id:"catalog-opendatahub",title:"OpenDataHub"},l=void 0,s={unversionedId:"catalog-opendatahub",id:"catalog-opendatahub",title:"OpenDataHub",description:"Deploying an OpenDataHub cluster is a work in progress on the DSRI, contact us if you are interested in trying it out.",source:"@site/docs/catalog-opendatahub.md",sourceDirName:".",slug:"/catalog-opendatahub",permalink:"/docs/catalog-opendatahub",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/catalog-opendatahub.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"catalog-opendatahub",title:"OpenDataHub"}},u={},c=[{value:"Components available on DSRI",id:"components-available-on-dsri",level:2},{value:"Start Spark with JupyterHub",id:"start-spark-with-jupyterhub",level:3},{value:"All components",id:"all-components",level:2}],m={toc:c};function h(t){var e=t.components,a=(0,n.A)(t,i);return(0,o.yg)("wrapper",(0,r.A)({},m,a,{components:e,mdxType:"MDXLayout"}),(0,o.yg)("admonition",{title:"Work in progress",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"Deploying an ",(0,o.yg)("a",{parentName:"p",href:"https://opendatahub.io"},"OpenDataHub")," cluster is a ",(0,o.yg)("strong",{parentName:"p"},"work in progress")," on the DSRI, ",(0,o.yg)("strong",{parentName:"p"},(0,o.yg)("a",{parentName:"strong",href:"/help"},"contact us"))," if you are interested in trying it out.")),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://opendatahub.io"},"OpenDataHub")," is a project to orchestrate the deployment of Data Science applications on OpenShift, based on KubeFlow."),(0,o.yg)("h2",{id:"components-available-on-dsri"},"Components available on DSRI"),(0,o.yg)("p",null,"Those components have been tested on the DSRI:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/jupyterhub/README.md"},"JupyterHub")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/radanalyticsio/README.md"},"Spark Operator")," from ",(0,o.yg)("a",{parentName:"li",href:"https://radanalytics.io/"},"radanalytics"))),(0,o.yg)("h3",{id:"start-spark-with-jupyterhub"},"Start Spark with JupyterHub"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Checkout the ",(0,o.yg)("a",{parentName:"p",href:"https://opendatahub.io/docs/getting-started/quick-installation.html"},"official documentation to start an instance of OpenDataHub")," (note that the Operator has already been installed)")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Then visit the ",(0,o.yg)("a",{parentName:"p",href:"https://opendatahub.io/docs/getting-started/basic-tutorial.html"},"documentation to reach the Spark cluster")," from a Jupyter notebook."))),(0,o.yg)("h2",{id:"all-components"},"All components"),(0,o.yg)("p",null,"Here are all the components that can be deployed as part of an ",(0,o.yg)("a",{parentName:"p",href:"https://opendatahub.io"},"OpenDataHub"),":"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/jupyterhub/README.md"},"JupyterHub")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/airflow/README.md"},"Airflow")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/odhargo/README.md"},"Argo")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/grafana/README.md"},"Grafana")," & ",(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/prometheus/README.md"},"Prometheus")," for data/logs visualization"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/radanalyticsio/README.md"},"Spark Operator")," from ",(0,o.yg)("a",{parentName:"li",href:"https://radanalytics.io/"},"radanalytics")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/kafka/README.md"},"Kafka"),"/Strimzi for streaming applications"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/superset/README.md"},"Superset")," for data visualization"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/odh-manifests/blob/master/ai-library/README.md"},"AI Library")," (Seldon to publish AI models)")),(0,o.yg)("p",null,"Let us know if you need help to deploy one of those components on the DSRI."))}h.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/1dc85e61.b9965823.js b/assets/js/1dc85e61.9b1a218e.js similarity index 98% rename from assets/js/1dc85e61.b9965823.js rename to assets/js/1dc85e61.9b1a218e.js index 51380ad34..79d754c16 100644 --- a/assets/js/1dc85e61.b9965823.js +++ b/assets/js/1dc85e61.9b1a218e.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9328],{5680:(e,n,t)=>{t.d(n,{xA:()=>p,yg:()=>m});var r=t(6540);function a(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function o(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);n&&(r=r.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,r)}return t}function i(e){for(var n=1;n=0||(a[t]=e[t]);return a}(e,n);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(a[t]=e[t])}return a}var u=r.createContext({}),c=function(e){var n=r.useContext(u),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},p=function(e){var n=c(e.components);return r.createElement(u.Provider,{value:n},e.children)},s={inlineCode:"code",wrapper:function(e){var n=e.children;return r.createElement(r.Fragment,{},n)}},g=r.forwardRef((function(e,n){var t=e.components,a=e.mdxType,o=e.originalType,u=e.parentName,p=l(e,["components","mdxType","originalType","parentName"]),g=c(t),m=a,y=g["".concat(u,".").concat(m)]||g[m]||s[m]||o;return t?r.createElement(y,i(i({ref:n},p),{},{components:t})):r.createElement(y,i({ref:n},p))}));function m(e,n){var t=arguments,a=n&&n.mdxType;if("string"==typeof e||a){var o=t.length,i=new Array(o);i[0]=g;var l={};for(var u in n)hasOwnProperty.call(n,u)&&(l[u]=n[u]);l.originalType=e,l.mdxType="string"==typeof e?e:a,i[1]=l;for(var c=2;c{t.r(n),t.d(n,{assets:()=>p,contentTitle:()=>u,default:()=>m,frontMatter:()=>l,metadata:()=>c,toc:()=>s});var r=t(9668),a=t(1367),o=(t(6540),t(5680)),i=["components"],l={},u=void 0,c={unversionedId:"deploy-gitlab-runner",id:"deploy-gitlab-runner",title:"deploy-gitlab-runner",description:"First, obtain gitlab runner registration token via the gitlab webinterface",source:"@site/docs/deploy-gitlab-runner.md",sourceDirName:".",slug:"/deploy-gitlab-runner",permalink:"/docs/deploy-gitlab-runner",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-gitlab-runner.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{}},p={},s=[],g={toc:s};function m(e){var n=e.components,t=(0,a.A)(e,i);return(0,o.yg)("wrapper",(0,r.A)({},g,t,{components:n,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"First, obtain gitlab runner registration token via the gitlab webinterface"),(0,o.yg)("p",null,"TODO: add screenshot"),(0,o.yg)("p",null,'Add "GitLab Runner" operator to your project from the Operators --\x3e OperatorHub page.\nMake sure you choose the "certified" GitLab Runner (v1.4.0) The community runner (v1.10.0) is a bit more up to date, but currently does not work.'),(0,o.yg)("p",null,"Install in a specific namespace on the cluster. Choose your namespace in the dropdown."),(0,o.yg)("p",null,"Create registration token secret:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-yaml"},"---\napiVersion: v1\nkind: Secret\nmetadata:\n name: gitlab-runner-secret\ntype: Opaque\nstringData:\n runner-registration-token: \n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"oc create -f gitlab-runner-secret.yaml\n")),(0,o.yg)("p",null,"Although, this should also work:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"oc create secret generic gitlab-runner-secret --from-literal=runner-registration-token=\n")),(0,o.yg)("p",null,"Add the following to the ConfigMap of the GitLab Runner operator:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-yaml"},'[[runners]]\n executor = "kubernetes"\n [runners.kubernetes]\n [runners.kubernetes.volumes]\n [[runners.kubernetes.volumes.empty_dir]]\n name = "empty-dir"\n mount_path = "/"\n medium = "Memory"\n')),(0,o.yg)("p",null,"Create the configmap:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"oc create configmap custom-config-toml --from-file config.toml=/tmp/customconfig \n")),(0,o.yg)("p",null,"Create the gitlab runner Custom Resource Definition:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-yaml"},"apiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n name: gitlab-runner\nspec:\n gitlabUrl: https://gitlab.maastrichtuniversity.nl\n token: gitlab-runner-secret\n config: custom-config-toml\n tags: openshift\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-yaml"},"--- other stuff dont use!\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n name: gitlab-runner\nspec:\n gitlabUrl: https://gitlab.maastrichtuniversity.nl\n buildImage: alpine\n token: gitlab-runner-secret\n tags: openshift\n")))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9328],{5680:(e,n,t)=>{t.d(n,{xA:()=>p,yg:()=>m});var r=t(6540);function a(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function o(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);n&&(r=r.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,r)}return t}function i(e){for(var n=1;n=0||(a[t]=e[t]);return a}(e,n);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(a[t]=e[t])}return a}var u=r.createContext({}),c=function(e){var n=r.useContext(u),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},p=function(e){var n=c(e.components);return r.createElement(u.Provider,{value:n},e.children)},s={inlineCode:"code",wrapper:function(e){var n=e.children;return r.createElement(r.Fragment,{},n)}},g=r.forwardRef((function(e,n){var t=e.components,a=e.mdxType,o=e.originalType,u=e.parentName,p=l(e,["components","mdxType","originalType","parentName"]),g=c(t),m=a,y=g["".concat(u,".").concat(m)]||g[m]||s[m]||o;return t?r.createElement(y,i(i({ref:n},p),{},{components:t})):r.createElement(y,i({ref:n},p))}));function m(e,n){var t=arguments,a=n&&n.mdxType;if("string"==typeof e||a){var o=t.length,i=new Array(o);i[0]=g;var l={};for(var u in n)hasOwnProperty.call(n,u)&&(l[u]=n[u]);l.originalType=e,l.mdxType="string"==typeof e?e:a,i[1]=l;for(var c=2;c{t.r(n),t.d(n,{assets:()=>p,contentTitle:()=>u,default:()=>m,frontMatter:()=>l,metadata:()=>c,toc:()=>s});var r=t(9668),a=t(1367),o=(t(6540),t(5680)),i=["components"],l={},u=void 0,c={unversionedId:"deploy-gitlab-runner",id:"deploy-gitlab-runner",title:"deploy-gitlab-runner",description:"First, obtain gitlab runner registration token via the gitlab webinterface",source:"@site/docs/deploy-gitlab-runner.md",sourceDirName:".",slug:"/deploy-gitlab-runner",permalink:"/docs/deploy-gitlab-runner",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-gitlab-runner.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{}},p={},s=[],g={toc:s};function m(e){var n=e.components,t=(0,a.A)(e,i);return(0,o.yg)("wrapper",(0,r.A)({},g,t,{components:n,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"First, obtain gitlab runner registration token via the gitlab webinterface"),(0,o.yg)("p",null,"TODO: add screenshot"),(0,o.yg)("p",null,'Add "GitLab Runner" operator to your project from the Operators --\x3e OperatorHub page.\nMake sure you choose the "certified" GitLab Runner (v1.4.0) The community runner (v1.10.0) is a bit more up to date, but currently does not work.'),(0,o.yg)("p",null,"Install in a specific namespace on the cluster. Choose your namespace in the dropdown."),(0,o.yg)("p",null,"Create registration token secret:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-yaml"},"---\napiVersion: v1\nkind: Secret\nmetadata:\n name: gitlab-runner-secret\ntype: Opaque\nstringData:\n runner-registration-token: \n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"oc create -f gitlab-runner-secret.yaml\n")),(0,o.yg)("p",null,"Although, this should also work:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"oc create secret generic gitlab-runner-secret --from-literal=runner-registration-token=\n")),(0,o.yg)("p",null,"Add the following to the ConfigMap of the GitLab Runner operator:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-yaml"},'[[runners]]\n executor = "kubernetes"\n [runners.kubernetes]\n [runners.kubernetes.volumes]\n [[runners.kubernetes.volumes.empty_dir]]\n name = "empty-dir"\n mount_path = "/"\n medium = "Memory"\n')),(0,o.yg)("p",null,"Create the configmap:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"oc create configmap custom-config-toml --from-file config.toml=/tmp/customconfig \n")),(0,o.yg)("p",null,"Create the gitlab runner Custom Resource Definition:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-yaml"},"apiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n name: gitlab-runner\nspec:\n gitlabUrl: https://gitlab.maastrichtuniversity.nl\n token: gitlab-runner-secret\n config: custom-config-toml\n tags: openshift\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-yaml"},"--- other stuff dont use!\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n name: gitlab-runner\nspec:\n gitlabUrl: https://gitlab.maastrichtuniversity.nl\n buildImage: alpine\n token: gitlab-runner-secret\n tags: openshift\n")))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/21dd1498.f137dea3.js b/assets/js/21dd1498.353c9298.js similarity index 99% rename from assets/js/21dd1498.f137dea3.js rename to assets/js/21dd1498.353c9298.js index e48317dfb..74440a78a 100644 --- a/assets/js/21dd1498.f137dea3.js +++ b/assets/js/21dd1498.353c9298.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4896],{5680:(e,t,a)=>{a.d(t,{xA:()=>g,yg:()=>d});var n=a(6540);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var p=n.createContext({}),l=function(e){var t=n.useContext(p),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},g=function(e){var t=l(e.components);return n.createElement(p.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},y=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,o=e.originalType,p=e.parentName,g=s(e,["components","mdxType","originalType","parentName"]),y=l(a),d=r,c=y["".concat(p,".").concat(d)]||y[d]||m[d]||o;return a?n.createElement(c,i(i({ref:t},g),{},{components:a})):n.createElement(c,i({ref:t},g))}));function d(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=a.length,i=new Array(o);i[0]=y;var s={};for(var p in t)hasOwnProperty.call(t,p)&&(s[p]=t[p]);s.originalType=e,s.mdxType="string"==typeof e?e:r,i[1]=s;for(var l=2;l{a.r(t),a.d(t,{assets:()=>g,contentTitle:()=>p,default:()=>d,frontMatter:()=>s,metadata:()=>l,toc:()=>m});var n=a(9668),r=a(1367),o=(a(6540),a(5680)),i=["components"],s={id:"openshift-storage",title:"Data storage"},p=void 0,l={unversionedId:"openshift-storage",id:"openshift-storage",title:"Data storage",description:"Different storages can be used when running services on the DSRI:",source:"@site/docs/openshift-storage.md",sourceDirName:".",slug:"/openshift-storage",permalink:"/docs/openshift-storage",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/openshift-storage.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"openshift-storage",title:"Data storage"},sidebar:"docs",previous:{title:"Command Line Interface",permalink:"/docs/openshift-commands"},next:{title:"Delete objects (advanced)",permalink:"/docs/openshift-delete-objects"}},g={},m=[{value:"Create the Persistent Storage",id:"create-the-persistent-storage",level:3},{value:"Connect the Existing Persistent Storage",id:"connect-the-existing-persistent-storage",level:3},{value:"Expand existing Persistent Storage",id:"expand-existing-persistent-storage",level:3},{value:"Use the dynamic storage",id:"use-the-dynamic-storage",level:3},{value:"Use the ephemeral storage",id:"use-the-ephemeral-storage",level:3}],y={toc:m};function d(e){var t=e.components,a=(0,r.A)(e,i);return(0,o.yg)("wrapper",(0,n.A)({},y,a,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"Different storages can be used when running services on the DSRI:"),(0,o.yg)("p",null,"\ud83e\udd8b ",(0,o.yg)("strong",{parentName:"p"},"Ephemeral storage"),": storage is bound to the pod, data will be lost when the pod is deleted (but this deployment does not require to request the creation of a persistent storage, and is faster to test code)."),(0,o.yg)("p",null,"\u26a1 ",(0,o.yg)("strong",{parentName:"p"},"Dynamic storage"),": automatically create a persistent storage when starting an application. Can also be created in the OpenShift web UI, using the ",(0,o.yg)("inlineCode",{parentName:"p"},"dynamic-maprfs")," Storage Class."),(0,o.yg)("p",null,"\ud83d\uddc4\ufe0f ",(0,o.yg)("strong",{parentName:"p"},"Persistent storage"),": You can use a persistent storage volume to store data. Please see the Create the Persistent Storage section. You can do this yourself. Please keep in mind that there are no backups made of data on DSRI. "),(0,o.yg)("admonition",{title:"Storage per project",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"A storage (aka. Persistent Volume Claim) is only accessible in the project where it has been created.")),(0,o.yg)("h3",{id:"create-the-persistent-storage"},"Create the Persistent Storage"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Switch to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Go to the ",(0,o.yg)("strong",{parentName:"p"},"Project")," panel ")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Select your project")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Expand the ",(0,o.yg)("strong",{parentName:"p"},"Storage")," panel then go to the ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claim")," panel")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click the button call ",(0,o.yg)("strong",{parentName:"p"},"Create Persistent Volume Claim")),(0,o.yg)("p",{parentName:"li"},"then you will redirect the wizard of Create Persistent Volume Claim ")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Provide the unique ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claim Name")," start with ",(0,o.yg)("inlineCode",{parentName:"p"}," pvc-")," "),(0,o.yg)("p",{parentName:"li"},"example: ",(0,o.yg)("inlineCode",{parentName:"p"}," pvc-filebrowser"))),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Select the ",(0,o.yg)("strong",{parentName:"p"},"Access Mode")," ",(0,o.yg)("strong",{parentName:"p"},(0,o.yg)("inlineCode",{parentName:"strong"}," RWX")),"and ",(0,o.yg)("strong",{parentName:"p"},"Storage Size")),(0,o.yg)("table",{parentName:"li"},(0,o.yg)("thead",{parentName:"table"},(0,o.yg)("tr",{parentName:"thead"},(0,o.yg)("th",{parentName:"tr",align:"left"},"Access Mode"),(0,o.yg)("th",{parentName:"tr",align:"left"},"CLI abbreviation"),(0,o.yg)("th",{parentName:"tr",align:"left"},"Description"))),(0,o.yg)("tbody",{parentName:"table"},(0,o.yg)("tr",{parentName:"tbody"},(0,o.yg)("td",{parentName:"tr",align:"left"},"ReadWriteOnce"),(0,o.yg)("td",{parentName:"tr",align:"left"},(0,o.yg)("inlineCode",{parentName:"td"},"RWO")),(0,o.yg)("td",{parentName:"tr",align:"left"},"The volume can be mounted as read-write by a single node.")),(0,o.yg)("tr",{parentName:"tbody"},(0,o.yg)("td",{parentName:"tr",align:"left"},"ReadOnlyMany"),(0,o.yg)("td",{parentName:"tr",align:"left"},(0,o.yg)("inlineCode",{parentName:"td"},"ROX")),(0,o.yg)("td",{parentName:"tr",align:"left"},"The volume can be mounted as read-only by many nodes.")),(0,o.yg)("tr",{parentName:"tbody"},(0,o.yg)("td",{parentName:"tr",align:"left"},"ReadWriteMany"),(0,o.yg)("td",{parentName:"tr",align:"left"},(0,o.yg)("inlineCode",{parentName:"td"},"RWX")),(0,o.yg)("td",{parentName:"tr",align:"left"},"The volume can be mounted as read-write by many nodes."))))),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click ",(0,o.yg)("strong",{parentName:"p"},"Create")))),(0,o.yg)("img",{src:"/img/screenshot_pvc_storage.png",alt:"Create Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("img",{src:"/img/screenshot_pvc_storage_create.png",alt:"Create Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("admonition",{type:"info"},(0,o.yg)("p",{parentName:"admonition"},"The DSRI using the ",(0,o.yg)("a",{parentName:"p",href:"https://www.openshift.com/products/container-storage/"},(0,o.yg)("strong",{parentName:"a"},"Openshift Container Stroage"))," (",(0,o.yg)("inlineCode",{parentName:"p"}," OCS"),") which is based on ",(0,o.yg)("a",{parentName:"p",href:"https://ceph.io/ceph-storage/"},(0,o.yg)("strong",{parentName:"a"},"CEPH"))," offers ",(0,o.yg)("inlineCode",{parentName:"p"},"ReadWriteOnce")," access mode. "),(0,o.yg)("ul",{parentName:"admonition"},(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("inlineCode",{parentName:"li"},"ReadWriteOnce")," (",(0,o.yg)("a",{parentName:"li",href:"https://docs.openshift.com/container-platform/4.6/storage/understanding-persistent-storage.html"},(0,o.yg)("strong",{parentName:"a"},"RWO")),") volumes cannot be mounted on multiple nodes. Use the ",(0,o.yg)("inlineCode",{parentName:"li"},"ReadWriteMany")," (",(0,o.yg)("a",{parentName:"li",href:"https://docs.openshift.com/container-platform/4.6/storage/understanding-persistent-storage.html"},(0,o.yg)("strong",{parentName:"a"},"RWX")),") access mode when possible. If a node fails, the system does not allow the attached RWO volume to be mounted on a new node because it is already assigned to the failed node. If you encounter a multi-attach error message as a result, force delete the pod on a shut down or crashed node. "))),(0,o.yg)("p",null,"Static persistent volumes provides a sustainable persistent storage over time for applications that need to run regular Docker images (which usually use the ",(0,o.yg)("inlineCode",{parentName:"p"},"root")," user)."),(0,o.yg)("admonition",{type:"info"},(0,o.yg)("p",{parentName:"admonition"},"Some Applications such as ",(0,o.yg)("strong",{parentName:"p"},"Jupyter")," template automatically creates a persistent storage")),(0,o.yg)("h3",{id:"connect-the-existing-persistent-storage"},"Connect the Existing Persistent Storage"),(0,o.yg)("p",null,"On the ",(0,o.yg)("strong",{parentName:"p"},"Topology")," page select your application,"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click ",(0,o.yg)("strong",{parentName:"p"},"Action")," on your application")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Select the ",(0,o.yg)("strong",{parentName:"p"},"Add Storage")," option from the dropdown list.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Select the ",(0,o.yg)("strong",{parentName:"p"},"Use Existing Claim")," option from the Add Storage wizard and Select the Claim")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Add the ",(0,o.yg)("strong",{parentName:"p"},"Mount Path"))),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Save"))),(0,o.yg)("img",{src:"/img/screenshot_existing_storage.png",alt:"Add Existing Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("img",{src:"/img/screenshot_add_storage.png",alt:"Add Existing Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("admonition",{type:"info"},(0,o.yg)("p",{parentName:"admonition"},"You can try above method if you want to connect ",(0,o.yg)("strong",{parentName:"p"},"more applications to the same storage"))),(0,o.yg)("h3",{id:"expand-existing-persistent-storage"},"Expand existing Persistent Storage"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Switch to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Go to the ",(0,o.yg)("strong",{parentName:"p"},"Project")," panel ")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Select your project")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Expand the ",(0,o.yg)("strong",{parentName:"p"},"Storage")," panel then go to the ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claim")," panel")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click on the three dots (\u22ee) next to the ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claim")," you want to expand.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click on ",(0,o.yg)("strong",{parentName:"p"},"Expand PVC")," in the menu.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Enter the size you want to expand your PVC with.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Hit ",(0,o.yg)("strong",{parentName:"p"},"Expand"),". It can take upto 2 minutes before your PVC is expanded."))),(0,o.yg)("h3",{id:"use-the-dynamic-storage"},"Use the dynamic storage"),(0,o.yg)("p",null,"Dynamic ",(0,o.yg)("strong",{parentName:"p"},"persistent")," volumes can be created automatically by an application template."),(0,o.yg)("p",null,"Dynamic storage can also be created manually, go to ",(0,o.yg)("strong",{parentName:"p"},"Storage")," on the left sidebar in a project:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},"Click ",(0,o.yg)("strong",{parentName:"li"},"Create Storage")," top right of the Storage page."),(0,o.yg)("li",{parentName:"ol"},"Storage class: ",(0,o.yg)("strong",{parentName:"li"},"ceph-fs")),(0,o.yg)("li",{parentName:"ol"},"Access Mode:",(0,o.yg)("ul",{parentName:"li"},(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Single User (RWO)"),": only the user who created this volume can read/write to this volume."),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Shared Access (RWX)"),": all users with access to the projects can read/write this volume."),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Read Only (ROX)"),": all users with access to the projects can read this volume.")))),(0,o.yg)("h3",{id:"use-the-ephemeral-storage"},"Use the ephemeral storage"),(0,o.yg)("admonition",{title:"Disabled",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"We currently disabled this solution by default, as it was confusing for users and would lead to data loss.")),(0,o.yg)("p",null,"When creating a pod, OpenShift will by default use ephemeral storage. It creates a volumes bind to the pod. So the volume will be deleted."),(0,o.yg)("p",null,"It is recommended to use dynamic provisioning for a more sustainable storage solution. But ephemeral storage can be sufficient for testing."))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4896],{5680:(e,t,a)=>{a.d(t,{xA:()=>g,yg:()=>d});var n=a(6540);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var p=n.createContext({}),l=function(e){var t=n.useContext(p),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},g=function(e){var t=l(e.components);return n.createElement(p.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},y=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,o=e.originalType,p=e.parentName,g=s(e,["components","mdxType","originalType","parentName"]),y=l(a),d=r,c=y["".concat(p,".").concat(d)]||y[d]||m[d]||o;return a?n.createElement(c,i(i({ref:t},g),{},{components:a})):n.createElement(c,i({ref:t},g))}));function d(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=a.length,i=new Array(o);i[0]=y;var s={};for(var p in t)hasOwnProperty.call(t,p)&&(s[p]=t[p]);s.originalType=e,s.mdxType="string"==typeof e?e:r,i[1]=s;for(var l=2;l{a.r(t),a.d(t,{assets:()=>g,contentTitle:()=>p,default:()=>d,frontMatter:()=>s,metadata:()=>l,toc:()=>m});var n=a(9668),r=a(1367),o=(a(6540),a(5680)),i=["components"],s={id:"openshift-storage",title:"Data storage"},p=void 0,l={unversionedId:"openshift-storage",id:"openshift-storage",title:"Data storage",description:"Different storages can be used when running services on the DSRI:",source:"@site/docs/openshift-storage.md",sourceDirName:".",slug:"/openshift-storage",permalink:"/docs/openshift-storage",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/openshift-storage.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"openshift-storage",title:"Data storage"},sidebar:"docs",previous:{title:"Command Line Interface",permalink:"/docs/openshift-commands"},next:{title:"Delete objects (advanced)",permalink:"/docs/openshift-delete-objects"}},g={},m=[{value:"Create the Persistent Storage",id:"create-the-persistent-storage",level:3},{value:"Connect the Existing Persistent Storage",id:"connect-the-existing-persistent-storage",level:3},{value:"Expand existing Persistent Storage",id:"expand-existing-persistent-storage",level:3},{value:"Use the dynamic storage",id:"use-the-dynamic-storage",level:3},{value:"Use the ephemeral storage",id:"use-the-ephemeral-storage",level:3}],y={toc:m};function d(e){var t=e.components,a=(0,r.A)(e,i);return(0,o.yg)("wrapper",(0,n.A)({},y,a,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"Different storages can be used when running services on the DSRI:"),(0,o.yg)("p",null,"\ud83e\udd8b ",(0,o.yg)("strong",{parentName:"p"},"Ephemeral storage"),": storage is bound to the pod, data will be lost when the pod is deleted (but this deployment does not require to request the creation of a persistent storage, and is faster to test code)."),(0,o.yg)("p",null,"\u26a1 ",(0,o.yg)("strong",{parentName:"p"},"Dynamic storage"),": automatically create a persistent storage when starting an application. Can also be created in the OpenShift web UI, using the ",(0,o.yg)("inlineCode",{parentName:"p"},"dynamic-maprfs")," Storage Class."),(0,o.yg)("p",null,"\ud83d\uddc4\ufe0f ",(0,o.yg)("strong",{parentName:"p"},"Persistent storage"),": You can use a persistent storage volume to store data. Please see the Create the Persistent Storage section. You can do this yourself. Please keep in mind that there are no backups made of data on DSRI. "),(0,o.yg)("admonition",{title:"Storage per project",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"A storage (aka. Persistent Volume Claim) is only accessible in the project where it has been created.")),(0,o.yg)("h3",{id:"create-the-persistent-storage"},"Create the Persistent Storage"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Switch to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Go to the ",(0,o.yg)("strong",{parentName:"p"},"Project")," panel ")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Select your project")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Expand the ",(0,o.yg)("strong",{parentName:"p"},"Storage")," panel then go to the ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claim")," panel")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click the button call ",(0,o.yg)("strong",{parentName:"p"},"Create Persistent Volume Claim")),(0,o.yg)("p",{parentName:"li"},"then you will redirect the wizard of Create Persistent Volume Claim ")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Provide the unique ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claim Name")," start with ",(0,o.yg)("inlineCode",{parentName:"p"}," pvc-")," "),(0,o.yg)("p",{parentName:"li"},"example: ",(0,o.yg)("inlineCode",{parentName:"p"}," pvc-filebrowser"))),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Select the ",(0,o.yg)("strong",{parentName:"p"},"Access Mode")," ",(0,o.yg)("strong",{parentName:"p"},(0,o.yg)("inlineCode",{parentName:"strong"}," RWX")),"and ",(0,o.yg)("strong",{parentName:"p"},"Storage Size")),(0,o.yg)("table",{parentName:"li"},(0,o.yg)("thead",{parentName:"table"},(0,o.yg)("tr",{parentName:"thead"},(0,o.yg)("th",{parentName:"tr",align:"left"},"Access Mode"),(0,o.yg)("th",{parentName:"tr",align:"left"},"CLI abbreviation"),(0,o.yg)("th",{parentName:"tr",align:"left"},"Description"))),(0,o.yg)("tbody",{parentName:"table"},(0,o.yg)("tr",{parentName:"tbody"},(0,o.yg)("td",{parentName:"tr",align:"left"},"ReadWriteOnce"),(0,o.yg)("td",{parentName:"tr",align:"left"},(0,o.yg)("inlineCode",{parentName:"td"},"RWO")),(0,o.yg)("td",{parentName:"tr",align:"left"},"The volume can be mounted as read-write by a single node.")),(0,o.yg)("tr",{parentName:"tbody"},(0,o.yg)("td",{parentName:"tr",align:"left"},"ReadOnlyMany"),(0,o.yg)("td",{parentName:"tr",align:"left"},(0,o.yg)("inlineCode",{parentName:"td"},"ROX")),(0,o.yg)("td",{parentName:"tr",align:"left"},"The volume can be mounted as read-only by many nodes.")),(0,o.yg)("tr",{parentName:"tbody"},(0,o.yg)("td",{parentName:"tr",align:"left"},"ReadWriteMany"),(0,o.yg)("td",{parentName:"tr",align:"left"},(0,o.yg)("inlineCode",{parentName:"td"},"RWX")),(0,o.yg)("td",{parentName:"tr",align:"left"},"The volume can be mounted as read-write by many nodes."))))),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click ",(0,o.yg)("strong",{parentName:"p"},"Create")))),(0,o.yg)("img",{src:"/img/screenshot_pvc_storage.png",alt:"Create Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("img",{src:"/img/screenshot_pvc_storage_create.png",alt:"Create Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("admonition",{type:"info"},(0,o.yg)("p",{parentName:"admonition"},"The DSRI using the ",(0,o.yg)("a",{parentName:"p",href:"https://www.openshift.com/products/container-storage/"},(0,o.yg)("strong",{parentName:"a"},"Openshift Container Stroage"))," (",(0,o.yg)("inlineCode",{parentName:"p"}," OCS"),") which is based on ",(0,o.yg)("a",{parentName:"p",href:"https://ceph.io/ceph-storage/"},(0,o.yg)("strong",{parentName:"a"},"CEPH"))," offers ",(0,o.yg)("inlineCode",{parentName:"p"},"ReadWriteOnce")," access mode. "),(0,o.yg)("ul",{parentName:"admonition"},(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("inlineCode",{parentName:"li"},"ReadWriteOnce")," (",(0,o.yg)("a",{parentName:"li",href:"https://docs.openshift.com/container-platform/4.6/storage/understanding-persistent-storage.html"},(0,o.yg)("strong",{parentName:"a"},"RWO")),") volumes cannot be mounted on multiple nodes. Use the ",(0,o.yg)("inlineCode",{parentName:"li"},"ReadWriteMany")," (",(0,o.yg)("a",{parentName:"li",href:"https://docs.openshift.com/container-platform/4.6/storage/understanding-persistent-storage.html"},(0,o.yg)("strong",{parentName:"a"},"RWX")),") access mode when possible. If a node fails, the system does not allow the attached RWO volume to be mounted on a new node because it is already assigned to the failed node. If you encounter a multi-attach error message as a result, force delete the pod on a shut down or crashed node. "))),(0,o.yg)("p",null,"Static persistent volumes provides a sustainable persistent storage over time for applications that need to run regular Docker images (which usually use the ",(0,o.yg)("inlineCode",{parentName:"p"},"root")," user)."),(0,o.yg)("admonition",{type:"info"},(0,o.yg)("p",{parentName:"admonition"},"Some Applications such as ",(0,o.yg)("strong",{parentName:"p"},"Jupyter")," template automatically creates a persistent storage")),(0,o.yg)("h3",{id:"connect-the-existing-persistent-storage"},"Connect the Existing Persistent Storage"),(0,o.yg)("p",null,"On the ",(0,o.yg)("strong",{parentName:"p"},"Topology")," page select your application,"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click ",(0,o.yg)("strong",{parentName:"p"},"Action")," on your application")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Select the ",(0,o.yg)("strong",{parentName:"p"},"Add Storage")," option from the dropdown list.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Select the ",(0,o.yg)("strong",{parentName:"p"},"Use Existing Claim")," option from the Add Storage wizard and Select the Claim")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Add the ",(0,o.yg)("strong",{parentName:"p"},"Mount Path"))),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Save"))),(0,o.yg)("img",{src:"/img/screenshot_existing_storage.png",alt:"Add Existing Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("img",{src:"/img/screenshot_add_storage.png",alt:"Add Existing Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("admonition",{type:"info"},(0,o.yg)("p",{parentName:"admonition"},"You can try above method if you want to connect ",(0,o.yg)("strong",{parentName:"p"},"more applications to the same storage"))),(0,o.yg)("h3",{id:"expand-existing-persistent-storage"},"Expand existing Persistent Storage"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Switch to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Go to the ",(0,o.yg)("strong",{parentName:"p"},"Project")," panel ")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Select your project")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Expand the ",(0,o.yg)("strong",{parentName:"p"},"Storage")," panel then go to the ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claim")," panel")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click on the three dots (\u22ee) next to the ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claim")," you want to expand.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click on ",(0,o.yg)("strong",{parentName:"p"},"Expand PVC")," in the menu.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Enter the size you want to expand your PVC with.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Hit ",(0,o.yg)("strong",{parentName:"p"},"Expand"),". It can take upto 2 minutes before your PVC is expanded."))),(0,o.yg)("h3",{id:"use-the-dynamic-storage"},"Use the dynamic storage"),(0,o.yg)("p",null,"Dynamic ",(0,o.yg)("strong",{parentName:"p"},"persistent")," volumes can be created automatically by an application template."),(0,o.yg)("p",null,"Dynamic storage can also be created manually, go to ",(0,o.yg)("strong",{parentName:"p"},"Storage")," on the left sidebar in a project:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},"Click ",(0,o.yg)("strong",{parentName:"li"},"Create Storage")," top right of the Storage page."),(0,o.yg)("li",{parentName:"ol"},"Storage class: ",(0,o.yg)("strong",{parentName:"li"},"ceph-fs")),(0,o.yg)("li",{parentName:"ol"},"Access Mode:",(0,o.yg)("ul",{parentName:"li"},(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Single User (RWO)"),": only the user who created this volume can read/write to this volume."),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Shared Access (RWX)"),": all users with access to the projects can read/write this volume."),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Read Only (ROX)"),": all users with access to the projects can read this volume.")))),(0,o.yg)("h3",{id:"use-the-ephemeral-storage"},"Use the ephemeral storage"),(0,o.yg)("admonition",{title:"Disabled",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"We currently disabled this solution by default, as it was confusing for users and would lead to data loss.")),(0,o.yg)("p",null,"When creating a pod, OpenShift will by default use ephemeral storage. It creates a volumes bind to the pod. So the volume will be deleted."),(0,o.yg)("p",null,"It is recommended to use dynamic provisioning for a more sustainable storage solution. But ephemeral storage can be sufficient for testing."))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/2406662c.ab664a38.js b/assets/js/2406662c.eb0d4ab5.js similarity index 99% rename from assets/js/2406662c.ab664a38.js rename to assets/js/2406662c.eb0d4ab5.js index 746e43e65..b66873327 100644 --- a/assets/js/2406662c.ab664a38.js +++ b/assets/js/2406662c.eb0d4ab5.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9030],{5680:(e,t,r)=>{r.d(t,{xA:()=>c,yg:()=>y});var o=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function n(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var p=o.createContext({}),l=function(e){var t=o.useContext(p),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=l(e.components);return o.createElement(p.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},u=o.forwardRef((function(e,t){var r=e.components,a=e.mdxType,n=e.originalType,p=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),u=l(r),y=a,m=u["".concat(p,".").concat(y)]||u[y]||d[y]||n;return r?o.createElement(m,i(i({ref:t},c),{},{components:r})):o.createElement(m,i({ref:t},c))}));function y(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var n=r.length,i=new Array(n);i[0]=u;var s={};for(var p in t)hasOwnProperty.call(t,p)&&(s[p]=t[p]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var l=2;l{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>p,default:()=>y,frontMatter:()=>s,metadata:()=>l,toc:()=>d});var o=r(9668),a=r(1367),n=(r(6540),r(5680)),i=["components"],s={id:"prepare-project-for-dsri",title:"Prepare your project"},p=void 0,l={unversionedId:"prepare-project-for-dsri",id:"prepare-project-for-dsri",title:"Prepare your project",description:"Code in a git repository",source:"@site/docs/prepare-project-for-dsri.md",sourceDirName:".",slug:"/prepare-project-for-dsri",permalink:"/docs/prepare-project-for-dsri",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/prepare-project-for-dsri.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"prepare-project-for-dsri",title:"Prepare your project"},sidebar:"docs",previous:{title:"Install the client",permalink:"/docs/openshift-install"},next:{title:"Upload data",permalink:"/docs/openshift-load-data"}},c={},d=[{value:"Code in a git repository",id:"code-in-a-git-repository",level:2},{value:"Get your data ready",id:"get-your-data-ready",level:2},{value:"Data is on your local machine",id:"data-is-on-your-local-machine",level:3},{value:"Data is on a server",id:"data-is-on-a-server",level:3},{value:"Request access to internal UM servers",id:"request-access-to-internal-um-servers",level:3}],u={toc:d};function y(e){var t=e.components,r=(0,a.A)(e,i);return(0,n.yg)("wrapper",(0,o.A)({},u,r,{components:t,mdxType:"MDXLayout"}),(0,n.yg)("h2",{id:"code-in-a-git-repository"},"Code in a git repository"),(0,n.yg)("p",null,"Using ",(0,n.yg)("inlineCode",{parentName:"p"},"git")," is mandatory to deploy your code on the DSRI. Store your code in a git repository to keep track of changes, and make it easier to share and re-use your code outside of your computer."),(0,n.yg)("admonition",{title:"Platform recommendations",type:"info"},(0,n.yg)("p",{parentName:"admonition"},"We recommend those platforms depending on your use-case:"),(0,n.yg)("ul",{parentName:"admonition"},(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://github.com"},"GitHub")," for public repositories"),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://gitlab.maastrichtuniversity.nl"},"GitLab hosted at Maastricht University")," for private repositories"))),(0,n.yg)("blockquote",null,(0,n.yg)("p",{parentName:"blockquote"},"Any other git platform, such as BitBucket or gitlab.com, is fine too.")),(0,n.yg)("h2",{id:"get-your-data-ready"},"Get your data ready"),(0,n.yg)("p",null,"If your project is using a large amount of data that cannot be pushed to a git repository, you will need to use a persistent storage to store your data on the DSRI. See the ",(0,n.yg)("a",{parentName:"p",href:"/docs/openshift-storage"},"Storage on the DSRI")," documentation for more details about creating a persistent storage."),(0,n.yg)("p",null,"Here are the options to upload your data to the DSRI storage:"),(0,n.yg)("h3",{id:"data-is-on-your-local-machine"},"Data is on your local machine"),(0,n.yg)("p",null,"If the data is stored on a local machine, such as your computer:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"Drag and drop files from your computer to the VisualStudio Code or JupyterLab web UI, if applicable."),(0,n.yg)("li",{parentName:"ul"},"Otherwise, use the ",(0,n.yg)("inlineCode",{parentName:"li"},"oc cp")," command to copy data to your application pod. See the ",(0,n.yg)("a",{parentName:"li",href:"/docs/openshift-load-data"},"Load data")," documentation page for more information.")),(0,n.yg)("admonition",{title:"Upload to persistent storage",type:"caution"},(0,n.yg)("p",{parentName:"admonition"},"Make sure you ",(0,n.yg)("strong",{parentName:"p"},"upload the data to a folder mounted on a persistent storage")," in the pod to avoid losing your data if the pod restarts.")),(0,n.yg)("h3",{id:"data-is-on-a-server"},"Data is on a server"),(0,n.yg)("p",null,"Same as for your laptop, you will need to install and use the ",(0,n.yg)("inlineCode",{parentName:"p"},"oc cp")," command to copy data to your application pod. See the ",(0,n.yg)("a",{parentName:"p",href:"/docs/openshift-load-data"},"Load data")," documentation page for more information."),(0,n.yg)("h3",{id:"request-access-to-internal-um-servers"},"Request access to internal UM servers"),(0,n.yg)("p",null,"In certain cases, UM servers are not accessible by default from the DSRI. This is even the case for servers that are normally publicly accessible. To be able to access these UM servers from the DSRI, we need to put in the request to open the connection. "),(0,n.yg)("p",null,"Please let us know either the servername and port you like to access, or the URL (e.g. um-vm0057.unimaas.nl on port 443 or ",(0,n.yg)("a",{parentName:"p",href:"https://gitlab.maastrichtuniversity.nl"},"https://gitlab.maastrichtuniversity.nl"),"). You can reach out to us either by mail or by Slack."),(0,n.yg)("p",null,"The procedure is described in the diagram below:"),(0,n.yg)("img",{src:"/img/request-access-um-servers.svg",alt:"Access procedure UM servers",style:{maxWidth:"100%",maxHeight:"100%"}}))}y.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9030],{5680:(e,t,r)=>{r.d(t,{xA:()=>c,yg:()=>y});var o=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function n(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var p=o.createContext({}),l=function(e){var t=o.useContext(p),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=l(e.components);return o.createElement(p.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},u=o.forwardRef((function(e,t){var r=e.components,a=e.mdxType,n=e.originalType,p=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),u=l(r),y=a,m=u["".concat(p,".").concat(y)]||u[y]||d[y]||n;return r?o.createElement(m,i(i({ref:t},c),{},{components:r})):o.createElement(m,i({ref:t},c))}));function y(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var n=r.length,i=new Array(n);i[0]=u;var s={};for(var p in t)hasOwnProperty.call(t,p)&&(s[p]=t[p]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var l=2;l{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>p,default:()=>y,frontMatter:()=>s,metadata:()=>l,toc:()=>d});var o=r(9668),a=r(1367),n=(r(6540),r(5680)),i=["components"],s={id:"prepare-project-for-dsri",title:"Prepare your project"},p=void 0,l={unversionedId:"prepare-project-for-dsri",id:"prepare-project-for-dsri",title:"Prepare your project",description:"Code in a git repository",source:"@site/docs/prepare-project-for-dsri.md",sourceDirName:".",slug:"/prepare-project-for-dsri",permalink:"/docs/prepare-project-for-dsri",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/prepare-project-for-dsri.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"prepare-project-for-dsri",title:"Prepare your project"},sidebar:"docs",previous:{title:"Install the client",permalink:"/docs/openshift-install"},next:{title:"Upload data",permalink:"/docs/openshift-load-data"}},c={},d=[{value:"Code in a git repository",id:"code-in-a-git-repository",level:2},{value:"Get your data ready",id:"get-your-data-ready",level:2},{value:"Data is on your local machine",id:"data-is-on-your-local-machine",level:3},{value:"Data is on a server",id:"data-is-on-a-server",level:3},{value:"Request access to internal UM servers",id:"request-access-to-internal-um-servers",level:3}],u={toc:d};function y(e){var t=e.components,r=(0,a.A)(e,i);return(0,n.yg)("wrapper",(0,o.A)({},u,r,{components:t,mdxType:"MDXLayout"}),(0,n.yg)("h2",{id:"code-in-a-git-repository"},"Code in a git repository"),(0,n.yg)("p",null,"Using ",(0,n.yg)("inlineCode",{parentName:"p"},"git")," is mandatory to deploy your code on the DSRI. Store your code in a git repository to keep track of changes, and make it easier to share and re-use your code outside of your computer."),(0,n.yg)("admonition",{title:"Platform recommendations",type:"info"},(0,n.yg)("p",{parentName:"admonition"},"We recommend those platforms depending on your use-case:"),(0,n.yg)("ul",{parentName:"admonition"},(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://github.com"},"GitHub")," for public repositories"),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://gitlab.maastrichtuniversity.nl"},"GitLab hosted at Maastricht University")," for private repositories"))),(0,n.yg)("blockquote",null,(0,n.yg)("p",{parentName:"blockquote"},"Any other git platform, such as BitBucket or gitlab.com, is fine too.")),(0,n.yg)("h2",{id:"get-your-data-ready"},"Get your data ready"),(0,n.yg)("p",null,"If your project is using a large amount of data that cannot be pushed to a git repository, you will need to use a persistent storage to store your data on the DSRI. See the ",(0,n.yg)("a",{parentName:"p",href:"/docs/openshift-storage"},"Storage on the DSRI")," documentation for more details about creating a persistent storage."),(0,n.yg)("p",null,"Here are the options to upload your data to the DSRI storage:"),(0,n.yg)("h3",{id:"data-is-on-your-local-machine"},"Data is on your local machine"),(0,n.yg)("p",null,"If the data is stored on a local machine, such as your computer:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"Drag and drop files from your computer to the VisualStudio Code or JupyterLab web UI, if applicable."),(0,n.yg)("li",{parentName:"ul"},"Otherwise, use the ",(0,n.yg)("inlineCode",{parentName:"li"},"oc cp")," command to copy data to your application pod. See the ",(0,n.yg)("a",{parentName:"li",href:"/docs/openshift-load-data"},"Load data")," documentation page for more information.")),(0,n.yg)("admonition",{title:"Upload to persistent storage",type:"caution"},(0,n.yg)("p",{parentName:"admonition"},"Make sure you ",(0,n.yg)("strong",{parentName:"p"},"upload the data to a folder mounted on a persistent storage")," in the pod to avoid losing your data if the pod restarts.")),(0,n.yg)("h3",{id:"data-is-on-a-server"},"Data is on a server"),(0,n.yg)("p",null,"Same as for your laptop, you will need to install and use the ",(0,n.yg)("inlineCode",{parentName:"p"},"oc cp")," command to copy data to your application pod. See the ",(0,n.yg)("a",{parentName:"p",href:"/docs/openshift-load-data"},"Load data")," documentation page for more information."),(0,n.yg)("h3",{id:"request-access-to-internal-um-servers"},"Request access to internal UM servers"),(0,n.yg)("p",null,"In certain cases, UM servers are not accessible by default from the DSRI. This is even the case for servers that are normally publicly accessible. To be able to access these UM servers from the DSRI, we need to put in the request to open the connection. "),(0,n.yg)("p",null,"Please let us know either the servername and port you like to access, or the URL (e.g. um-vm0057.unimaas.nl on port 443 or ",(0,n.yg)("a",{parentName:"p",href:"https://gitlab.maastrichtuniversity.nl"},"https://gitlab.maastrichtuniversity.nl"),"). You can reach out to us either by mail or by Slack."),(0,n.yg)("p",null,"The procedure is described in the diagram below:"),(0,n.yg)("img",{src:"/img/request-access-um-servers.svg",alt:"Access procedure UM servers",style:{maxWidth:"100%",maxHeight:"100%"}}))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/2b1adfae.ba3c5060.js b/assets/js/2b1adfae.65f6cb6c.js similarity index 99% rename from assets/js/2b1adfae.ba3c5060.js rename to assets/js/2b1adfae.65f6cb6c.js index 0f71ba35b..d4a989b2f 100644 --- a/assets/js/2b1adfae.ba3c5060.js +++ b/assets/js/2b1adfae.65f6cb6c.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[8793],{5680:(e,t,o)=>{o.d(t,{xA:()=>u,yg:()=>h});var r=o(6540);function a(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}function n(e,t){var o=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),o.push.apply(o,r)}return o}function s(e){for(var t=1;t=0||(a[o]=e[o]);return a}(e,t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,o)&&(a[o]=e[o])}return a}var l=r.createContext({}),p=function(e){var t=r.useContext(l),o=t;return e&&(o="function"==typeof e?e(t):s(s({},t),e)),o},u=function(e){var t=p(e.components);return r.createElement(l.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},c=r.forwardRef((function(e,t){var o=e.components,a=e.mdxType,n=e.originalType,l=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),c=p(o),h=a,y=c["".concat(l,".").concat(h)]||c[h]||d[h]||n;return o?r.createElement(y,s(s({ref:t},u),{},{components:o})):r.createElement(y,s({ref:t},u))}));function h(e,t){var o=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var n=o.length,s=new Array(n);s[0]=c;var i={};for(var l in t)hasOwnProperty.call(t,l)&&(i[l]=t[l]);i.originalType=e,i.mdxType="string"==typeof e?e:a,s[1]=i;for(var p=2;p{o.r(t),o.d(t,{assets:()=>u,contentTitle:()=>l,default:()=>h,frontMatter:()=>i,metadata:()=>p,toc:()=>d});var r=o(9668),a=o(1367),n=(o(6540),o(5680)),s=["components"],i={id:"increase-process-speed",title:"Increase your processes speed"},l=void 0,p={unversionedId:"increase-process-speed",id:"increase-process-speed",title:"Increase your processes speed",description:"DSRI provides a lot of computing resources, but there are a few things to know if you want to increase the speed of your processes.",source:"@site/docs/increase-process-speed.md",sourceDirName:".",slug:"/increase-process-speed",permalink:"/docs/increase-process-speed",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/increase-process-speed.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"increase-process-speed",title:"Increase your processes speed"},sidebar:"docs",previous:{title:"Enabling VPN access in WSL2",permalink:"/docs/enabling-vpn-wsl"},next:{title:"PyTorch Profiling",permalink:"/docs/profile-pytorch-code"}},u={},d=[{value:"The good",id:"the-good",level:2},{value:"The bad",id:"the-bad",level:2},{value:"The solution",id:"the-solution",level:2}],c={toc:d};function h(e){var t=e.components,o=(0,a.A)(e,s);return(0,n.yg)("wrapper",(0,r.A)({},c,o,{components:t,mdxType:"MDXLayout"}),(0,n.yg)("p",null,"DSRI provides a lot of computing resources, but there are a few things to know if you want to increase the speed of your processes."),(0,n.yg)("h2",{id:"the-good"},"The good"),(0,n.yg)("p",null,"With the DSRI you get access to a workspace with more memory and cores than your laptop (around 200G memory, and 64 cores on the DSRI, against around 16G memory and 8 cores on your laptop)"),(0,n.yg)("p",null,"Those additional resources ",(0,n.yg)("strong",{parentName:"p"},"might")," help to make your workload run faster, but not automatically! It will run faster "),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"If your code can make use of the really large amount of RAM to load more of the data to process in memory. But if your workload does not require dozens of GB of memory, and your laptop does not face out of memory issues, or crash, when you run your workload, then you probably already have enough memory on your laptop, and will not gain a significant boost from the increased memory."),(0,n.yg)("li",{parentName:"ul"},"If you can run your workload in parallel, or enable the libraries you use to use the available cores. This will highly depend on the libraries you use. Do they support running their processes in parallel? Do you need to explicitly enable parallelism on a specific number of cores?")),(0,n.yg)("p",null,"Proper parallelism is not achieved easily, it needs to be manually implemented within the library processes. "),(0,n.yg)("p",null,'For example, Python has a "Global Interpreter Lock" (aka. GIL) that limit threads parallelism by design, so when you are doing some work on a spreadsheet with ',(0,n.yg)("inlineCode",{parentName:"p"},"pandas"),", you are only going to use 1 thread (which is nice, because it makes the conceptualization and understanding of algorithms easier, but it also makes it harder to write truly efficient libraries)"),(0,n.yg)("p",null,"You will need to use complementary libraries if you want to use more threads while processing data with ",(0,n.yg)("inlineCode",{parentName:"p"},"pandas"),". There are multiple ways and libraries to achieve this, but the easiest, if you want to check it yourself with pandas, is to use ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/nalepae/pandarallel"},"pandarallel"),". You could also implement the parallelism yourself with ",(0,n.yg)("inlineCode",{parentName:"p"},"concurrent.futures")),(0,n.yg)("h2",{id:"the-bad"},"The bad"),(0,n.yg)("p",null,"Until now everything seems good, more memory, more cores... So, what's the catch? It can only get better, no?"),(0,n.yg)("p",null,"Application and workspace running on the DSRI uses a persistent volume to avoid losing data when the application is restarted. On most workspaces this persistent volume is mounted on the workspace working directory."),(0,n.yg)("p",null,"This persistent volume is not hosted directly on the same node as your application, it's hosted on the cluster in a distributed fashion (remember you can attach this persistent volume to different applications, which might be hosted on different nodes themselves)"),(0,n.yg)("p",null,"And distributed storage means: slower read and write times! "),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("p",{parentName:"li"},"In your laptop the data is in a hard drive disc sitting at 2 cm of the CPU and memory.")),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("p",{parentName:"li"},"In the DSRI your workspace might be on node 4, when the persistent volume is on node 8. In this case the data will need to go through the network"))),(0,n.yg)("p",null,"So if you write a script to just load data, do no computing, and write back the data to the persistent volume, it will probably be much faster on your laptop than on the DSRI!"),(0,n.yg)("h2",{id:"the-solution"},"The solution"),(0,n.yg)("p",null,'Only 1 folder (and its subfolders) usually mounted on the persistent volume. The rest is "ephemeral storage", which is the data bound to the application you started, this means the data will be stored on the same node as your workspace. '),(0,n.yg)("p",null,"Which might result in faster read/write speed! But also means the data will be lost if the workspace is restarted (which does not happen everyday, but can happen without notice)"),(0,n.yg)("p",null,"A solution could be to:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"Keep your code and important data as backup in the persistent volume (the workspace working dir usually)"),(0,n.yg)("li",{parentName:"ul"},"Copy the data your process needs to load in a folder outside of the persistent volume (on the ephemeral storage)"),(0,n.yg)("li",{parentName:"ul"},"Read/write data mostly from this folder on the ephemeral storage, avoid using the data in the persistent volume folder as much as possible"),(0,n.yg)("li",{parentName:"ul"},"Copy the important result files or temporary files you don't want to lose from the folder on the ephemeral storage to the folder on the persistent storage")),(0,n.yg)("p",null,"Let us know how it works for you on the Slack ",(0,n.yg)("strong",{parentName:"p"},"#general")," channel, and if you have suggestions to improve the workspaces."))}h.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[8793],{5680:(e,t,o)=>{o.d(t,{xA:()=>u,yg:()=>h});var r=o(6540);function a(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}function n(e,t){var o=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),o.push.apply(o,r)}return o}function s(e){for(var t=1;t=0||(a[o]=e[o]);return a}(e,t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,o)&&(a[o]=e[o])}return a}var l=r.createContext({}),p=function(e){var t=r.useContext(l),o=t;return e&&(o="function"==typeof e?e(t):s(s({},t),e)),o},u=function(e){var t=p(e.components);return r.createElement(l.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},c=r.forwardRef((function(e,t){var o=e.components,a=e.mdxType,n=e.originalType,l=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),c=p(o),h=a,y=c["".concat(l,".").concat(h)]||c[h]||d[h]||n;return o?r.createElement(y,s(s({ref:t},u),{},{components:o})):r.createElement(y,s({ref:t},u))}));function h(e,t){var o=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var n=o.length,s=new Array(n);s[0]=c;var i={};for(var l in t)hasOwnProperty.call(t,l)&&(i[l]=t[l]);i.originalType=e,i.mdxType="string"==typeof e?e:a,s[1]=i;for(var p=2;p{o.r(t),o.d(t,{assets:()=>u,contentTitle:()=>l,default:()=>h,frontMatter:()=>i,metadata:()=>p,toc:()=>d});var r=o(9668),a=o(1367),n=(o(6540),o(5680)),s=["components"],i={id:"increase-process-speed",title:"Increase your processes speed"},l=void 0,p={unversionedId:"increase-process-speed",id:"increase-process-speed",title:"Increase your processes speed",description:"DSRI provides a lot of computing resources, but there are a few things to know if you want to increase the speed of your processes.",source:"@site/docs/increase-process-speed.md",sourceDirName:".",slug:"/increase-process-speed",permalink:"/docs/increase-process-speed",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/increase-process-speed.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"increase-process-speed",title:"Increase your processes speed"},sidebar:"docs",previous:{title:"Enabling VPN access in WSL2",permalink:"/docs/enabling-vpn-wsl"},next:{title:"PyTorch Profiling",permalink:"/docs/profile-pytorch-code"}},u={},d=[{value:"The good",id:"the-good",level:2},{value:"The bad",id:"the-bad",level:2},{value:"The solution",id:"the-solution",level:2}],c={toc:d};function h(e){var t=e.components,o=(0,a.A)(e,s);return(0,n.yg)("wrapper",(0,r.A)({},c,o,{components:t,mdxType:"MDXLayout"}),(0,n.yg)("p",null,"DSRI provides a lot of computing resources, but there are a few things to know if you want to increase the speed of your processes."),(0,n.yg)("h2",{id:"the-good"},"The good"),(0,n.yg)("p",null,"With the DSRI you get access to a workspace with more memory and cores than your laptop (around 200G memory, and 64 cores on the DSRI, against around 16G memory and 8 cores on your laptop)"),(0,n.yg)("p",null,"Those additional resources ",(0,n.yg)("strong",{parentName:"p"},"might")," help to make your workload run faster, but not automatically! It will run faster "),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"If your code can make use of the really large amount of RAM to load more of the data to process in memory. But if your workload does not require dozens of GB of memory, and your laptop does not face out of memory issues, or crash, when you run your workload, then you probably already have enough memory on your laptop, and will not gain a significant boost from the increased memory."),(0,n.yg)("li",{parentName:"ul"},"If you can run your workload in parallel, or enable the libraries you use to use the available cores. This will highly depend on the libraries you use. Do they support running their processes in parallel? Do you need to explicitly enable parallelism on a specific number of cores?")),(0,n.yg)("p",null,"Proper parallelism is not achieved easily, it needs to be manually implemented within the library processes. "),(0,n.yg)("p",null,'For example, Python has a "Global Interpreter Lock" (aka. GIL) that limit threads parallelism by design, so when you are doing some work on a spreadsheet with ',(0,n.yg)("inlineCode",{parentName:"p"},"pandas"),", you are only going to use 1 thread (which is nice, because it makes the conceptualization and understanding of algorithms easier, but it also makes it harder to write truly efficient libraries)"),(0,n.yg)("p",null,"You will need to use complementary libraries if you want to use more threads while processing data with ",(0,n.yg)("inlineCode",{parentName:"p"},"pandas"),". There are multiple ways and libraries to achieve this, but the easiest, if you want to check it yourself with pandas, is to use ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/nalepae/pandarallel"},"pandarallel"),". You could also implement the parallelism yourself with ",(0,n.yg)("inlineCode",{parentName:"p"},"concurrent.futures")),(0,n.yg)("h2",{id:"the-bad"},"The bad"),(0,n.yg)("p",null,"Until now everything seems good, more memory, more cores... So, what's the catch? It can only get better, no?"),(0,n.yg)("p",null,"Application and workspace running on the DSRI uses a persistent volume to avoid losing data when the application is restarted. On most workspaces this persistent volume is mounted on the workspace working directory."),(0,n.yg)("p",null,"This persistent volume is not hosted directly on the same node as your application, it's hosted on the cluster in a distributed fashion (remember you can attach this persistent volume to different applications, which might be hosted on different nodes themselves)"),(0,n.yg)("p",null,"And distributed storage means: slower read and write times! "),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("p",{parentName:"li"},"In your laptop the data is in a hard drive disc sitting at 2 cm of the CPU and memory.")),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("p",{parentName:"li"},"In the DSRI your workspace might be on node 4, when the persistent volume is on node 8. In this case the data will need to go through the network"))),(0,n.yg)("p",null,"So if you write a script to just load data, do no computing, and write back the data to the persistent volume, it will probably be much faster on your laptop than on the DSRI!"),(0,n.yg)("h2",{id:"the-solution"},"The solution"),(0,n.yg)("p",null,'Only 1 folder (and its subfolders) usually mounted on the persistent volume. The rest is "ephemeral storage", which is the data bound to the application you started, this means the data will be stored on the same node as your workspace. '),(0,n.yg)("p",null,"Which might result in faster read/write speed! But also means the data will be lost if the workspace is restarted (which does not happen everyday, but can happen without notice)"),(0,n.yg)("p",null,"A solution could be to:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"Keep your code and important data as backup in the persistent volume (the workspace working dir usually)"),(0,n.yg)("li",{parentName:"ul"},"Copy the data your process needs to load in a folder outside of the persistent volume (on the ephemeral storage)"),(0,n.yg)("li",{parentName:"ul"},"Read/write data mostly from this folder on the ephemeral storage, avoid using the data in the persistent volume folder as much as possible"),(0,n.yg)("li",{parentName:"ul"},"Copy the important result files or temporary files you don't want to lose from the folder on the ephemeral storage to the folder on the persistent storage")),(0,n.yg)("p",null,"Let us know how it works for you on the Slack ",(0,n.yg)("strong",{parentName:"p"},"#general")," channel, and if you have suggestions to improve the workspaces."))}h.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/2b7d82ba.e100cab7.js b/assets/js/2b7d82ba.7e2c4a40.js similarity index 99% rename from assets/js/2b7d82ba.e100cab7.js rename to assets/js/2b7d82ba.7e2c4a40.js index efddc2f4f..826602e85 100644 --- a/assets/js/2b7d82ba.e100cab7.js +++ b/assets/js/2b7d82ba.7e2c4a40.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5447],{5680:(e,n,t)=>{t.d(n,{xA:()=>p,yg:()=>g});var o=t(6540);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function a(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var l=o.createContext({}),u=function(e){var n=o.useContext(l),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},p=function(e){var n=u(e.components);return o.createElement(l.Provider,{value:n},e.children)},c={inlineCode:"code",wrapper:function(e){var n=e.children;return o.createElement(o.Fragment,{},n)}},h=o.forwardRef((function(e,n){var t=e.components,r=e.mdxType,a=e.originalType,l=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),h=u(t),g=r,y=h["".concat(l,".").concat(g)]||h[g]||c[g]||a;return t?o.createElement(y,i(i({ref:n},p),{},{components:t})):o.createElement(y,i({ref:n},p))}));function g(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var a=t.length,i=new Array(a);i[0]=h;var s={};for(var l in n)hasOwnProperty.call(n,l)&&(s[l]=n[l]);s.originalType=e,s.mdxType="string"==typeof e?e:r,i[1]=s;for(var u=2;u{t.r(n),t.d(n,{assets:()=>p,contentTitle:()=>l,default:()=>g,frontMatter:()=>s,metadata:()=>u,toc:()=>c});var o=t(9668),r=t(1367),a=(t(6540),t(5680)),i=["components"],s={id:"workflows-github-actions",title:"Deploy GitHub Runners"},l=void 0,u={unversionedId:"workflows-github-actions",id:"workflows-github-actions",title:"Deploy GitHub Runners",description:"Deploy a GitHub Actions runner to run workflows simple to define using YAML, and hosted in your GitHub repository on the DSRI. This allows you to run larger workloads than on GitHub-hosted runners, which are limited to 7G RAM, 1 CPU and 6h per job.",source:"@site/docs/workflows-github-actions.md",sourceDirName:".",slug:"/workflows-github-actions",permalink:"/docs/workflows-github-actions",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/workflows-github-actions.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"workflows-github-actions",title:"Deploy GitHub Runners"},sidebar:"docs",previous:{title:"Introduction to workflows",permalink:"/docs/workflows-introduction"},next:{title:"Deploy Airflow",permalink:"/docs/workflows-airflow"}},p={},c=[{value:"Install the chart",id:"install-the-chart",level:2},{value:"Deploy a Runner",id:"deploy-a-runner",level:2},{value:"For an organization",id:"for-an-organization",level:3},{value:"For a repository",id:"for-a-repository",level:3},{value:"Define Actions to run on DSRI",id:"define-actions-to-run-on-dsri",level:2},{value:"Uninstall the runner",id:"uninstall-the-runner",level:2},{value:"Deploy using GitHub Actions workflows",id:"deploy-using-github-actions-workflows",level:2},{value:"See also",id:"see-also",level:2}],h={toc:c};function g(e){var n=e.components,t=(0,r.A)(e,i);return(0,a.yg)("wrapper",(0,o.A)({},h,t,{components:n,mdxType:"MDXLayout"}),(0,a.yg)("p",null,"Deploy a GitHub Actions runner to run workflows simple to define using YAML, and hosted in your GitHub repository on the DSRI. This allows you to run larger workloads than on GitHub-hosted runners, which are limited to 7G RAM, 1 CPU and 6h per job."),(0,a.yg)("p",null,"Here are some of the advantage of GitHub Actions:"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"A step can be any Bash command, or a reusable Action from the ",(0,a.yg)("a",{parentName:"li",href:"https://github.com/marketplace/"},"GitHub Marketplace"),", which can be easily define from a Docker container, and share with your collaborators"),(0,a.yg)("li",{parentName:"ul"},"Parallelization can easily be added manually or dynamically to up to 255 jobs"),(0,a.yg)("li",{parentName:"ul"},"It provides a good logging system directly available in your repository on GitHub"),(0,a.yg)("li",{parentName:"ul"},"Define triggers (on code push, cron job, manual request), and secrets (such as passwords) easily")),(0,a.yg)("p",null,"For more information about GitHub Actions workflows, go to ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/features/actions"},"https://github.com/features/actions")),(0,a.yg)("h2",{id:"install-the-chart"},"Install the chart"),(0,a.yg)("p",null,"You will need to have Helm installed on your computer to deploy a GitHub Actions Runner, see the ",(0,a.yg)("a",{parentName:"p",href:"/docs/helm"},"Helm docs")," for more details."),(0,a.yg)("p",null,"Install the Helm chart to be able to deploy the GitHub Actions Runner on the DSRI:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add openshift-actions-runner https://redhat-actions.github.io/openshift-actions-runner-chart\nhelm repo update\n")),(0,a.yg)("p",null,"Then create a GitHub Personal Access Token as per the instructions in the ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/openshift-actions-runner#pat-guidelines"},"runner image README"),"."),(0,a.yg)("p",null,(0,a.yg)("strong",{parentName:"p"},"tl;dr:")," go to your Settings on GitHub: ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/settings/tokens"},"https://github.com/settings/tokens"),", click the button to create a new token, give it a meaningful name (e.g. ",(0,a.yg)("inlineCode",{parentName:"p"},"DSRI Runner my-project"),"), and check the following permissions:"),(0,a.yg)("p",null,"\u2705\ufe0f ",(0,a.yg)("inlineCode",{parentName:"p"},"repo")," (maybe also ",(0,a.yg)("inlineCode",{parentName:"p"},"workflow"),"?)"),(0,a.yg)("p",null,"\u2705\ufe0f ",(0,a.yg)("inlineCode",{parentName:"p"},"admin:org")," if the Runner is for an organization"),(0,a.yg)("h2",{id:"deploy-a-runner"},"Deploy a Runner"),(0,a.yg)("p",null,"Before deploying the runner, make sure you are in the project where you want to deploy it:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"oc project my-project\n")),(0,a.yg)("h3",{id:"for-an-organization"},"For an organization"),(0,a.yg)("p",null,"Deploy a runner available for all repositories of an organization (you can fine tune the access via GitHub Settings)"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Provide the token previously created, and the organization name")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},'export GITHUB_PAT="TOKEN"\nexport GITHUB_OWNER=My-Org\n')),(0,a.yg)("ol",{start:2},(0,a.yg)("li",{parentName:"ol"},"Deploy the runner for the organization:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},'helm install actions-runner openshift-actions-runner/actions-runner \\\n --set-string githubPat=$GITHUB_PAT \\\n --set-string githubOwner=$GITHUB_OWNER \\\n --set runnerLabels="{ dsri, $GITHUB_OWNER }" \\\n --set replicas=3 \\\n --set serviceAccountName=anyuid \\\n --set memoryRequest="512Mi" \\\n --set memoryLimit="100Gi" \\\n --set cpuRequest="100m" \\\n --set cpuLimit="64"\n')),(0,a.yg)("p",null,"You can also change the default runner image:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"}," --set runnerImage=ghcr.io/vemonet/github-actions-conda-runner \\\n --set runnerTag=latest\n")),(0,a.yg)("blockquote",null,(0,a.yg)("p",{parentName:"blockquote"},"Checkout ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/openshift-actions-runner-chart/blob/main/values.yaml"},"all available parameters here"))),(0,a.yg)("ol",{start:3},(0,a.yg)("li",{parentName:"ol"},"Check the deployment:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"helm get manifest actions-runner | kubectl get -f -\n")),(0,a.yg)("p",null,"Go to your organization Settings page on GitHub, then go to the ",(0,a.yg)("strong",{parentName:"p"},"Actions")," tab, and scroll to the bottom. In the list of active runners you should see the runners you just deployed. "),(0,a.yg)("h3",{id:"for-a-repository"},"For a repository"),(0,a.yg)("p",null,"You can also deploy a runner for a specific repository:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},'export GITHUB_PAT="TOKEN"\n# For an org runner, this is the org.\n# For a repo runner, this is the repo owner (org or user).\nexport GITHUB_OWNER=vemonet\n# For an org runner, omit this argument. \n# For a repo runner, the repo name.\nexport GITHUB_REPO=shapes-of-you\n')),(0,a.yg)("p",null,"Deploy the runner:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},'helm install actions-runner openshift-actions-runner/actions-runner \\\n --set-string githubPat=$GITHUB_PAT \\\n --set-string githubOwner=$GITHUB_OWNER \\\n --set-string githubRepository=$GITHUB_REPO \\\n --set runnerLabels="{ dsri, anything-helpful }"\n')),(0,a.yg)("h2",{id:"define-actions-to-run-on-dsri"},"Define Actions to run on DSRI"),(0,a.yg)("p",null,"You can now set GitHub Action workflows, in the ",(0,a.yg)("inlineCode",{parentName:"p"},".github/workflows")," folder, to be run on this runner (the repository needs to be under the organization, or user you added the workflow to). The job will be sent to run on the DSRI:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"},'jobs:\n your-job:\n runs-on: ["self-hosted", "dsri", "my-org" ]\n steps: ...\n')),(0,a.yg)("h2",{id:"uninstall-the-runner"},"Uninstall the runner"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"helm uninstall actions-runner\n")),(0,a.yg)("h2",{id:"deploy-using-github-actions-workflows"},"Deploy using GitHub Actions workflows"),(0,a.yg)("admonition",{title:"Experimental",type:"warning"},(0,a.yg)("p",{parentName:"admonition"},"Experimental: this deployment workflow is still experimental, let us know on Slack if you are interested in using it.")),(0,a.yg)("p",null,"Alternatively you can also build and deploy your application using a GitHub Actions workflow."),(0,a.yg)("p",null,"You will need to connect to the UM VPN in your workflow by defining 2 secrets for ",(0,a.yg)("inlineCode",{parentName:"p"},"VPN_USER")," and ",(0,a.yg)("inlineCode",{parentName:"p"},"VPN_PASSWORD"),", this is done by this step:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"},"- name: Connect to the VPN\n run: |\n sudo apt-get install -y openconnect network-manager-openconnect\n echo '${{ secrets.VPN_PASSWORD }}' | sudo openconnect --passwd-on-stdin --no-xmlpost --non-inter --background --authgroup 01-Employees --user ${{ secrets.VPN_USER }} vpn.maastrichtuniversity.nl\n sleep 10\n")),(0,a.yg)("admonition",{title:"RedHat documentation",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"RedHat provides the following instructions and template to deploy an application on OpenShift")),(0,a.yg)("p",null,"The OpenShift Starter workflow will:"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"Checkout your repository"),(0,a.yg)("li",{parentName:"ul"},"Perform a Docker build"),(0,a.yg)("li",{parentName:"ul"},"Push the built image to an image registry"),(0,a.yg)("li",{parentName:"ul"},"Log in to your OpenShift cluster"),(0,a.yg)("li",{parentName:"ul"},"Create an OpenShift app from the image and expose it to the internet.")),(0,a.yg)("p",null,"Before you begin:"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"Have write access to a container image registry such as quay.io or Dockerhub."),(0,a.yg)("li",{parentName:"ul"},"Have access to an OpenShift cluster.",(0,a.yg)("ul",{parentName:"li"},(0,a.yg)("li",{parentName:"ul"},"For instructions to get started with OpenShift see ",(0,a.yg)("a",{parentName:"li",href:"https://www.openshift.com/try"},"https://www.openshift.com/try")))),(0,a.yg)("li",{parentName:"ul"},"The project you wish to add this workflow to should have a Dockerfile.",(0,a.yg)("ul",{parentName:"li"},(0,a.yg)("li",{parentName:"ul"},"If you don't have a Dockerfile at the repository root, see the buildah-build step."),(0,a.yg)("li",{parentName:"ul"},"Builds from scratch are also available, but require more configuration.")))),(0,a.yg)("p",null,"To get the workflow running:"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Add this workflow to your repository."),(0,a.yg)("li",{parentName:"ol"},"Edit the top-level 'env' section, which contains a list of environment variables that must be configured."),(0,a.yg)("li",{parentName:"ol"},"Create the secrets referenced in the 'env' section under your repository Settings."),(0,a.yg)("li",{parentName:"ol"},"Edit the 'branches' in the 'on' section to trigger the workflow on a push to your branch."),(0,a.yg)("li",{parentName:"ol"},"Commit and push your changes.")),(0,a.yg)("p",null,"For a more sophisticated example, see ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/spring-petclinic/blob/main/.github/workflows/petclinic-sample.yaml"},"https://github.com/redhat-actions/spring-petclinic/blob/main/.github/workflows/petclinic-sample.yaml"),"\nAlso see our GitHub organization, ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/"},"https://github.com/redhat-actions/")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"},'name: Deploy to OpenShift\n\n# \u2b07\ufe0f Modify the fields marked with \u2b07\ufe0f to fit your project, and create any secrets that are referenced.\n# https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secrets\nenv:\n # \u2b07\ufe0f EDIT with your registry and registry path.\n REGISTRY: ghcr.io/maastrichtu-ids\n # \u2b07\ufe0f EDIT with your registry username.\n REGISTRY_USER: \n REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}\n\n # \u2b07\ufe0f EDIT to log into your OpenShift cluster and set up the context.\n # See https://github.com/redhat-actions/oc-login#readme for how to retrieve these values.\n OPENSHIFT_SERVER: ${{ secrets.OPENSHIFT_SERVER }}\n OPENSHIFT_TOKEN: ${{ secrets.OPENSHIFT_TOKEN }}\n\n # \u2b07\ufe0f EDIT with the port your application should be accessible on.\n APP_PORT: 8080\n\n # \u2b07\ufe0f EDIT if you wish to set the kube context\'s namespace after login. Leave blank to use the default namespace.\n OPENSHIFT_NAMESPACE: ""\n\n # If you wish to manually provide the APP_NAME and TAG, set them here, otherwise they will be auto-detected.\n APP_NAME: "my-app"\n TAG: ""\n\non:\n # https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows\n push:\n # Edit to the branch(es) you want to build and deploy on each push.\n branches: [ main ]\n\njobs:\n openshift-ci-cd:\n name: Build and deploy to OpenShift\n runs-on: ubuntu-20.04\n\n steps:\n - uses: actions/checkout@v2\n\n - name: Determine app name\n if: env.APP_NAME == \'\'\n run: |\n echo "APP_NAME=$(basename $PWD)" | tee -a $GITHUB_ENV\n\n - name: Determine tag\n if: env.TAG == \'\'\n run: |\n echo "TAG=${GITHUB_SHA::7}" | tee -a $GITHUB_ENV\n\n # https://github.com/redhat-actions/buildah-build#readme\n - name: Build from Dockerfile\n uses: redhat-actions/buildah-build@v1\n with:\n image: ${{ env.APP_NAME }}\n tag: ${{ env.TAG }}\n # If you don\'t have a dockerfile, see:\n # https://github.com/redhat-actions/buildah-build#building-from-scratch\n # Otherwise, point this to your Dockerfile relative to the repository root.\n dockerfiles: |\n ./Dockerfile\n\n # https://github.com/redhat-actions/push-to-registry#readme\n - name: Push to registry\n id: push-to-registry\n uses: redhat-actions/push-to-registry@v1\n with:\n image: ${{ env.APP_NAME }}\n tag: ${{ env.TAG }}\n registry: ${{ env.REGISTRY }}\n username: ${{ env.REGISTRY_USER }}\n password: ${{ env.REGISTRY_PASSWORD }}\n\n # The path the image was pushed to is now stored in ${{ steps.push-to-registry.outputs.registry-path }}\n\n - name: Connect to the VPN\n run: |\n sudo apt-get install -y openconnect network-manager-openconnect\n echo \'${{ secrets.VPN_PASSWORD }}\' | sudo openconnect --passwd-on-stdin --no-xmlpost --non-inter --background --authgroup 01-Employees --user ${{ secrets.VPN_USER }} vpn.maastrichtuniversity.nl\n sleep 10\n\n # oc-login works on all platforms, but oc must be installed first.\n # The GitHub Ubuntu runner already includes oc.\n # https://github.com/redhat-actions/oc-login#readme\n - name: Log in to OpenShift\n uses: redhat-actions/oc-login@v1\n with:\n openshift_server_url: ${{ env.OPENSHIFT_SERVER }}\n openshift_token: ${{ env.OPENSHIFT_TOKEN }}\n insecure_skip_tls_verify: true\n namespace: ${{ env.OPENSHIFT_NAMESPACE }}\n\n # This step should create a deployment, service, and route to run your app and expose it to the internet.\n # Feel free to replace this with \'oc apply\', \'helm install\', or however you like to deploy your app.\n - name: Create and expose app\n run: |\n export IMAGE="${{ steps.push-to-registry.outputs.registry-path }}"\n export PORT=${{ env.APP_PORT }}\n\n export SELECTOR="app=${{ env.APP_NAME }}"\n echo "SELECTOR=$SELECTOR" >> $GITHUB_ENV\n\n set -x\n # Take down any old deployment\n oc delete all --selector="$SELECTOR"\n oc new-app --name $APP_NAME --docker-image="$IMAGE"\n\n # Make sure the app port is exposed\n oc patch svc $APP_NAME -p "{ \\"spec\\": { \\"ports\\": [{ \\"name\\": \\"$PORT-tcp\\", \\"port\\": $PORT }] } }"\n oc expose service $APP_NAME --port=$PORT\n\n oc get all --selector="$SELECTOR"\n set +x\n\n export ROUTE="$(oc get route $APP_NAME -o jsonpath=\'{.spec.host}\')"\n echo "$APP_NAME is exposed at $ROUTE"\n echo "ROUTE=$ROUTE" >> $GITHUB_ENV\n\n - name: View application route\n run: |\n [[ -n ${{ env.ROUTE }} ]] || (echo "Determining application route failed in previous step"; exit 1)\n echo "======================== Your application is available at: ========================"\n echo ${{ env.ROUTE }}\n echo "==================================================================================="\n echo\n echo "Your app can be taken down with: \\"oc delete all --selector=\'${{ env.SELECTOR }}\'\\""\n')),(0,a.yg)("h2",{id:"see-also"},"See also"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"GitHub runner chart repository: ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/openshift-actions-runner-chart"},"https://github.com/redhat-actions/openshift-actions-runner-chart"))),(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"Image for the runner: ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/openshift-actions-runner"},"https://github.com/redhat-actions/openshift-actions-runner"))),(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"An action to automatically deploy a runner on a cluster (require to run openconnect to VPN first): ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/openshift-actions-runner"},"https://github.com/redhat-actions/openshift-actions-runner")))))}g.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5447],{5680:(e,n,t)=>{t.d(n,{xA:()=>p,yg:()=>g});var o=t(6540);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function a(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var l=o.createContext({}),u=function(e){var n=o.useContext(l),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},p=function(e){var n=u(e.components);return o.createElement(l.Provider,{value:n},e.children)},c={inlineCode:"code",wrapper:function(e){var n=e.children;return o.createElement(o.Fragment,{},n)}},h=o.forwardRef((function(e,n){var t=e.components,r=e.mdxType,a=e.originalType,l=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),h=u(t),g=r,y=h["".concat(l,".").concat(g)]||h[g]||c[g]||a;return t?o.createElement(y,i(i({ref:n},p),{},{components:t})):o.createElement(y,i({ref:n},p))}));function g(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var a=t.length,i=new Array(a);i[0]=h;var s={};for(var l in n)hasOwnProperty.call(n,l)&&(s[l]=n[l]);s.originalType=e,s.mdxType="string"==typeof e?e:r,i[1]=s;for(var u=2;u{t.r(n),t.d(n,{assets:()=>p,contentTitle:()=>l,default:()=>g,frontMatter:()=>s,metadata:()=>u,toc:()=>c});var o=t(9668),r=t(1367),a=(t(6540),t(5680)),i=["components"],s={id:"workflows-github-actions",title:"Deploy GitHub Runners"},l=void 0,u={unversionedId:"workflows-github-actions",id:"workflows-github-actions",title:"Deploy GitHub Runners",description:"Deploy a GitHub Actions runner to run workflows simple to define using YAML, and hosted in your GitHub repository on the DSRI. This allows you to run larger workloads than on GitHub-hosted runners, which are limited to 7G RAM, 1 CPU and 6h per job.",source:"@site/docs/workflows-github-actions.md",sourceDirName:".",slug:"/workflows-github-actions",permalink:"/docs/workflows-github-actions",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/workflows-github-actions.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"workflows-github-actions",title:"Deploy GitHub Runners"},sidebar:"docs",previous:{title:"Introduction to workflows",permalink:"/docs/workflows-introduction"},next:{title:"Deploy Airflow",permalink:"/docs/workflows-airflow"}},p={},c=[{value:"Install the chart",id:"install-the-chart",level:2},{value:"Deploy a Runner",id:"deploy-a-runner",level:2},{value:"For an organization",id:"for-an-organization",level:3},{value:"For a repository",id:"for-a-repository",level:3},{value:"Define Actions to run on DSRI",id:"define-actions-to-run-on-dsri",level:2},{value:"Uninstall the runner",id:"uninstall-the-runner",level:2},{value:"Deploy using GitHub Actions workflows",id:"deploy-using-github-actions-workflows",level:2},{value:"See also",id:"see-also",level:2}],h={toc:c};function g(e){var n=e.components,t=(0,r.A)(e,i);return(0,a.yg)("wrapper",(0,o.A)({},h,t,{components:n,mdxType:"MDXLayout"}),(0,a.yg)("p",null,"Deploy a GitHub Actions runner to run workflows simple to define using YAML, and hosted in your GitHub repository on the DSRI. This allows you to run larger workloads than on GitHub-hosted runners, which are limited to 7G RAM, 1 CPU and 6h per job."),(0,a.yg)("p",null,"Here are some of the advantage of GitHub Actions:"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"A step can be any Bash command, or a reusable Action from the ",(0,a.yg)("a",{parentName:"li",href:"https://github.com/marketplace/"},"GitHub Marketplace"),", which can be easily define from a Docker container, and share with your collaborators"),(0,a.yg)("li",{parentName:"ul"},"Parallelization can easily be added manually or dynamically to up to 255 jobs"),(0,a.yg)("li",{parentName:"ul"},"It provides a good logging system directly available in your repository on GitHub"),(0,a.yg)("li",{parentName:"ul"},"Define triggers (on code push, cron job, manual request), and secrets (such as passwords) easily")),(0,a.yg)("p",null,"For more information about GitHub Actions workflows, go to ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/features/actions"},"https://github.com/features/actions")),(0,a.yg)("h2",{id:"install-the-chart"},"Install the chart"),(0,a.yg)("p",null,"You will need to have Helm installed on your computer to deploy a GitHub Actions Runner, see the ",(0,a.yg)("a",{parentName:"p",href:"/docs/helm"},"Helm docs")," for more details."),(0,a.yg)("p",null,"Install the Helm chart to be able to deploy the GitHub Actions Runner on the DSRI:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add openshift-actions-runner https://redhat-actions.github.io/openshift-actions-runner-chart\nhelm repo update\n")),(0,a.yg)("p",null,"Then create a GitHub Personal Access Token as per the instructions in the ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/openshift-actions-runner#pat-guidelines"},"runner image README"),"."),(0,a.yg)("p",null,(0,a.yg)("strong",{parentName:"p"},"tl;dr:")," go to your Settings on GitHub: ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/settings/tokens"},"https://github.com/settings/tokens"),", click the button to create a new token, give it a meaningful name (e.g. ",(0,a.yg)("inlineCode",{parentName:"p"},"DSRI Runner my-project"),"), and check the following permissions:"),(0,a.yg)("p",null,"\u2705\ufe0f ",(0,a.yg)("inlineCode",{parentName:"p"},"repo")," (maybe also ",(0,a.yg)("inlineCode",{parentName:"p"},"workflow"),"?)"),(0,a.yg)("p",null,"\u2705\ufe0f ",(0,a.yg)("inlineCode",{parentName:"p"},"admin:org")," if the Runner is for an organization"),(0,a.yg)("h2",{id:"deploy-a-runner"},"Deploy a Runner"),(0,a.yg)("p",null,"Before deploying the runner, make sure you are in the project where you want to deploy it:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"oc project my-project\n")),(0,a.yg)("h3",{id:"for-an-organization"},"For an organization"),(0,a.yg)("p",null,"Deploy a runner available for all repositories of an organization (you can fine tune the access via GitHub Settings)"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Provide the token previously created, and the organization name")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},'export GITHUB_PAT="TOKEN"\nexport GITHUB_OWNER=My-Org\n')),(0,a.yg)("ol",{start:2},(0,a.yg)("li",{parentName:"ol"},"Deploy the runner for the organization:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},'helm install actions-runner openshift-actions-runner/actions-runner \\\n --set-string githubPat=$GITHUB_PAT \\\n --set-string githubOwner=$GITHUB_OWNER \\\n --set runnerLabels="{ dsri, $GITHUB_OWNER }" \\\n --set replicas=3 \\\n --set serviceAccountName=anyuid \\\n --set memoryRequest="512Mi" \\\n --set memoryLimit="100Gi" \\\n --set cpuRequest="100m" \\\n --set cpuLimit="64"\n')),(0,a.yg)("p",null,"You can also change the default runner image:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"}," --set runnerImage=ghcr.io/vemonet/github-actions-conda-runner \\\n --set runnerTag=latest\n")),(0,a.yg)("blockquote",null,(0,a.yg)("p",{parentName:"blockquote"},"Checkout ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/openshift-actions-runner-chart/blob/main/values.yaml"},"all available parameters here"))),(0,a.yg)("ol",{start:3},(0,a.yg)("li",{parentName:"ol"},"Check the deployment:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"helm get manifest actions-runner | kubectl get -f -\n")),(0,a.yg)("p",null,"Go to your organization Settings page on GitHub, then go to the ",(0,a.yg)("strong",{parentName:"p"},"Actions")," tab, and scroll to the bottom. In the list of active runners you should see the runners you just deployed. "),(0,a.yg)("h3",{id:"for-a-repository"},"For a repository"),(0,a.yg)("p",null,"You can also deploy a runner for a specific repository:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},'export GITHUB_PAT="TOKEN"\n# For an org runner, this is the org.\n# For a repo runner, this is the repo owner (org or user).\nexport GITHUB_OWNER=vemonet\n# For an org runner, omit this argument. \n# For a repo runner, the repo name.\nexport GITHUB_REPO=shapes-of-you\n')),(0,a.yg)("p",null,"Deploy the runner:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},'helm install actions-runner openshift-actions-runner/actions-runner \\\n --set-string githubPat=$GITHUB_PAT \\\n --set-string githubOwner=$GITHUB_OWNER \\\n --set-string githubRepository=$GITHUB_REPO \\\n --set runnerLabels="{ dsri, anything-helpful }"\n')),(0,a.yg)("h2",{id:"define-actions-to-run-on-dsri"},"Define Actions to run on DSRI"),(0,a.yg)("p",null,"You can now set GitHub Action workflows, in the ",(0,a.yg)("inlineCode",{parentName:"p"},".github/workflows")," folder, to be run on this runner (the repository needs to be under the organization, or user you added the workflow to). The job will be sent to run on the DSRI:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"},'jobs:\n your-job:\n runs-on: ["self-hosted", "dsri", "my-org" ]\n steps: ...\n')),(0,a.yg)("h2",{id:"uninstall-the-runner"},"Uninstall the runner"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"helm uninstall actions-runner\n")),(0,a.yg)("h2",{id:"deploy-using-github-actions-workflows"},"Deploy using GitHub Actions workflows"),(0,a.yg)("admonition",{title:"Experimental",type:"warning"},(0,a.yg)("p",{parentName:"admonition"},"Experimental: this deployment workflow is still experimental, let us know on Slack if you are interested in using it.")),(0,a.yg)("p",null,"Alternatively you can also build and deploy your application using a GitHub Actions workflow."),(0,a.yg)("p",null,"You will need to connect to the UM VPN in your workflow by defining 2 secrets for ",(0,a.yg)("inlineCode",{parentName:"p"},"VPN_USER")," and ",(0,a.yg)("inlineCode",{parentName:"p"},"VPN_PASSWORD"),", this is done by this step:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"},"- name: Connect to the VPN\n run: |\n sudo apt-get install -y openconnect network-manager-openconnect\n echo '${{ secrets.VPN_PASSWORD }}' | sudo openconnect --passwd-on-stdin --no-xmlpost --non-inter --background --authgroup 01-Employees --user ${{ secrets.VPN_USER }} vpn.maastrichtuniversity.nl\n sleep 10\n")),(0,a.yg)("admonition",{title:"RedHat documentation",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"RedHat provides the following instructions and template to deploy an application on OpenShift")),(0,a.yg)("p",null,"The OpenShift Starter workflow will:"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"Checkout your repository"),(0,a.yg)("li",{parentName:"ul"},"Perform a Docker build"),(0,a.yg)("li",{parentName:"ul"},"Push the built image to an image registry"),(0,a.yg)("li",{parentName:"ul"},"Log in to your OpenShift cluster"),(0,a.yg)("li",{parentName:"ul"},"Create an OpenShift app from the image and expose it to the internet.")),(0,a.yg)("p",null,"Before you begin:"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"Have write access to a container image registry such as quay.io or Dockerhub."),(0,a.yg)("li",{parentName:"ul"},"Have access to an OpenShift cluster.",(0,a.yg)("ul",{parentName:"li"},(0,a.yg)("li",{parentName:"ul"},"For instructions to get started with OpenShift see ",(0,a.yg)("a",{parentName:"li",href:"https://www.openshift.com/try"},"https://www.openshift.com/try")))),(0,a.yg)("li",{parentName:"ul"},"The project you wish to add this workflow to should have a Dockerfile.",(0,a.yg)("ul",{parentName:"li"},(0,a.yg)("li",{parentName:"ul"},"If you don't have a Dockerfile at the repository root, see the buildah-build step."),(0,a.yg)("li",{parentName:"ul"},"Builds from scratch are also available, but require more configuration.")))),(0,a.yg)("p",null,"To get the workflow running:"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Add this workflow to your repository."),(0,a.yg)("li",{parentName:"ol"},"Edit the top-level 'env' section, which contains a list of environment variables that must be configured."),(0,a.yg)("li",{parentName:"ol"},"Create the secrets referenced in the 'env' section under your repository Settings."),(0,a.yg)("li",{parentName:"ol"},"Edit the 'branches' in the 'on' section to trigger the workflow on a push to your branch."),(0,a.yg)("li",{parentName:"ol"},"Commit and push your changes.")),(0,a.yg)("p",null,"For a more sophisticated example, see ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/spring-petclinic/blob/main/.github/workflows/petclinic-sample.yaml"},"https://github.com/redhat-actions/spring-petclinic/blob/main/.github/workflows/petclinic-sample.yaml"),"\nAlso see our GitHub organization, ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/"},"https://github.com/redhat-actions/")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"},'name: Deploy to OpenShift\n\n# \u2b07\ufe0f Modify the fields marked with \u2b07\ufe0f to fit your project, and create any secrets that are referenced.\n# https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secrets\nenv:\n # \u2b07\ufe0f EDIT with your registry and registry path.\n REGISTRY: ghcr.io/maastrichtu-ids\n # \u2b07\ufe0f EDIT with your registry username.\n REGISTRY_USER: \n REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}\n\n # \u2b07\ufe0f EDIT to log into your OpenShift cluster and set up the context.\n # See https://github.com/redhat-actions/oc-login#readme for how to retrieve these values.\n OPENSHIFT_SERVER: ${{ secrets.OPENSHIFT_SERVER }}\n OPENSHIFT_TOKEN: ${{ secrets.OPENSHIFT_TOKEN }}\n\n # \u2b07\ufe0f EDIT with the port your application should be accessible on.\n APP_PORT: 8080\n\n # \u2b07\ufe0f EDIT if you wish to set the kube context\'s namespace after login. Leave blank to use the default namespace.\n OPENSHIFT_NAMESPACE: ""\n\n # If you wish to manually provide the APP_NAME and TAG, set them here, otherwise they will be auto-detected.\n APP_NAME: "my-app"\n TAG: ""\n\non:\n # https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows\n push:\n # Edit to the branch(es) you want to build and deploy on each push.\n branches: [ main ]\n\njobs:\n openshift-ci-cd:\n name: Build and deploy to OpenShift\n runs-on: ubuntu-20.04\n\n steps:\n - uses: actions/checkout@v2\n\n - name: Determine app name\n if: env.APP_NAME == \'\'\n run: |\n echo "APP_NAME=$(basename $PWD)" | tee -a $GITHUB_ENV\n\n - name: Determine tag\n if: env.TAG == \'\'\n run: |\n echo "TAG=${GITHUB_SHA::7}" | tee -a $GITHUB_ENV\n\n # https://github.com/redhat-actions/buildah-build#readme\n - name: Build from Dockerfile\n uses: redhat-actions/buildah-build@v1\n with:\n image: ${{ env.APP_NAME }}\n tag: ${{ env.TAG }}\n # If you don\'t have a dockerfile, see:\n # https://github.com/redhat-actions/buildah-build#building-from-scratch\n # Otherwise, point this to your Dockerfile relative to the repository root.\n dockerfiles: |\n ./Dockerfile\n\n # https://github.com/redhat-actions/push-to-registry#readme\n - name: Push to registry\n id: push-to-registry\n uses: redhat-actions/push-to-registry@v1\n with:\n image: ${{ env.APP_NAME }}\n tag: ${{ env.TAG }}\n registry: ${{ env.REGISTRY }}\n username: ${{ env.REGISTRY_USER }}\n password: ${{ env.REGISTRY_PASSWORD }}\n\n # The path the image was pushed to is now stored in ${{ steps.push-to-registry.outputs.registry-path }}\n\n - name: Connect to the VPN\n run: |\n sudo apt-get install -y openconnect network-manager-openconnect\n echo \'${{ secrets.VPN_PASSWORD }}\' | sudo openconnect --passwd-on-stdin --no-xmlpost --non-inter --background --authgroup 01-Employees --user ${{ secrets.VPN_USER }} vpn.maastrichtuniversity.nl\n sleep 10\n\n # oc-login works on all platforms, but oc must be installed first.\n # The GitHub Ubuntu runner already includes oc.\n # https://github.com/redhat-actions/oc-login#readme\n - name: Log in to OpenShift\n uses: redhat-actions/oc-login@v1\n with:\n openshift_server_url: ${{ env.OPENSHIFT_SERVER }}\n openshift_token: ${{ env.OPENSHIFT_TOKEN }}\n insecure_skip_tls_verify: true\n namespace: ${{ env.OPENSHIFT_NAMESPACE }}\n\n # This step should create a deployment, service, and route to run your app and expose it to the internet.\n # Feel free to replace this with \'oc apply\', \'helm install\', or however you like to deploy your app.\n - name: Create and expose app\n run: |\n export IMAGE="${{ steps.push-to-registry.outputs.registry-path }}"\n export PORT=${{ env.APP_PORT }}\n\n export SELECTOR="app=${{ env.APP_NAME }}"\n echo "SELECTOR=$SELECTOR" >> $GITHUB_ENV\n\n set -x\n # Take down any old deployment\n oc delete all --selector="$SELECTOR"\n oc new-app --name $APP_NAME --docker-image="$IMAGE"\n\n # Make sure the app port is exposed\n oc patch svc $APP_NAME -p "{ \\"spec\\": { \\"ports\\": [{ \\"name\\": \\"$PORT-tcp\\", \\"port\\": $PORT }] } }"\n oc expose service $APP_NAME --port=$PORT\n\n oc get all --selector="$SELECTOR"\n set +x\n\n export ROUTE="$(oc get route $APP_NAME -o jsonpath=\'{.spec.host}\')"\n echo "$APP_NAME is exposed at $ROUTE"\n echo "ROUTE=$ROUTE" >> $GITHUB_ENV\n\n - name: View application route\n run: |\n [[ -n ${{ env.ROUTE }} ]] || (echo "Determining application route failed in previous step"; exit 1)\n echo "======================== Your application is available at: ========================"\n echo ${{ env.ROUTE }}\n echo "==================================================================================="\n echo\n echo "Your app can be taken down with: \\"oc delete all --selector=\'${{ env.SELECTOR }}\'\\""\n')),(0,a.yg)("h2",{id:"see-also"},"See also"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"GitHub runner chart repository: ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/openshift-actions-runner-chart"},"https://github.com/redhat-actions/openshift-actions-runner-chart"))),(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"Image for the runner: ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/openshift-actions-runner"},"https://github.com/redhat-actions/openshift-actions-runner"))),(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"An action to automatically deploy a runner on a cluster (require to run openconnect to VPN first): ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/redhat-actions/openshift-actions-runner"},"https://github.com/redhat-actions/openshift-actions-runner")))))}g.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/2e2e5152.33980a74.js b/assets/js/2e2e5152.e9751cc7.js similarity index 99% rename from assets/js/2e2e5152.33980a74.js rename to assets/js/2e2e5152.e9751cc7.js index e44a69a35..4ae70b34e 100644 --- a/assets/js/2e2e5152.33980a74.js +++ b/assets/js/2e2e5152.e9751cc7.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2412],{5680:(e,t,n)=>{n.d(t,{xA:()=>g,yg:()=>m});var a=n(6540);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function r(e){for(var t=1;t=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var l=a.createContext({}),p=function(e){var t=a.useContext(l),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},g=function(e){var t=p(e.components);return a.createElement(l.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},c=a.forwardRef((function(e,t){var n=e.components,i=e.mdxType,o=e.originalType,l=e.parentName,g=s(e,["components","mdxType","originalType","parentName"]),c=p(n),m=i,d=c["".concat(l,".").concat(m)]||c[m]||u[m]||o;return n?a.createElement(d,r(r({ref:t},g),{},{components:n})):a.createElement(d,r({ref:t},g))}));function m(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var o=n.length,r=new Array(o);r[0]=c;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:i,r[1]=s;for(var p=2;p{n.r(t),n.d(t,{assets:()=>g,contentTitle:()=>l,default:()=>m,frontMatter:()=>s,metadata:()=>p,toc:()=>u});var a=n(9668),i=n(1367),o=(n(6540),n(5680)),r=["components"],s={id:"guide-known-issues",title:"Known Issues"},l=void 0,p={unversionedId:"guide-known-issues",id:"guide-known-issues",title:"Known Issues",description:"Cannot access your data in the persistent folder",source:"@site/docs/guide-known-issues.md",sourceDirName:".",slug:"/guide-known-issues",permalink:"/docs/guide-known-issues",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-known-issues.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-known-issues",title:"Known Issues"},sidebar:"docs",previous:{title:"Parallelization using Dask",permalink:"/docs/dask-tutorial"},next:{title:"Publish a Docker image",permalink:"/docs/guide-publish-image"}},g={},u=[{value:"Cannot access your data in the persistent folder",id:"cannot-access-your-data-in-the-persistent-folder",level:2},{value:"Large volumes",id:"large-volumes",level:2},{value:"DockerHub pull limitations",id:"dockerhub-pull-limitations",level:2},{value:"How to run function within a container ''in the background'",id:"how-to-run-function-within-a-container-in-the-background",level:2},{value:"Git authentication issue",id:"git-authentication-issue",level:2},{value:"Windows:",id:"windows",level:5},{value:"Mac:",id:"mac",level:5},{value:"Filebrowser 403 forbidden",id:"filebrowser-403-forbidden",level:2}],c={toc:u};function m(e){var t=e.components,n=(0,i.A)(e,r);return(0,o.yg)("wrapper",(0,a.A)({},c,n,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"cannot-access-your-data-in-the-persistent-folder"},"Cannot access your data in the persistent folder"),(0,o.yg)("p",null,"Sometimes you cannot access anymore the data you put in the persistent folder of your container. It can be due to a node going down, if the persistent volume your pod is connected to is on this node, then it cannot access it anymore."),(0,o.yg)("p",null,"You can easily fix this issue by restarting the pod of your application, it will make it properly connect to resources on nodes that are up."),(0,o.yg)("p",null,"To restart the pod, go in topology, click on your application, go to the details tab, and decrease the pod count to 0, then put it back up to 1."),(0,o.yg)("h2",{id:"large-volumes"},"Large volumes"),(0,o.yg)("admonition",{title:"Pod or Deployment will not start",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"You could run into a following message in the ",(0,o.yg)("strong",{parentName:"p"},"Events")," tab that looks similar to this"),(0,o.yg)("pre",{parentName:"admonition"},(0,o.yg)("code",{parentName:"pre"},"Error: kubelet may be retrying requests that are timing out in CRI-O due to system load. Currently at stage container volume configuration: context deadline exceeded: error reserving ctr name\n"))),(0,o.yg)("p",null,"The issue above will occur if you are using a ",(0,o.yg)("strong",{parentName:"p"},"large persistent volume"),". It can be resolved by adding the following to your Deployment(Config):"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"spec:\n template:\n metadata:\n annotations:\n io.kubernetes.cri-o.TrySkipVolumeSELinuxLabel: 'true'\n spec:\n runtimeClassName: selinux\n")),(0,o.yg)("p",null,"Take note of the ",(0,o.yg)("strong",{parentName:"p"},"indentation")," and the place in the file!"),(0,o.yg)("p",null,"An example of this can be found here:"),(0,o.yg)("img",{class:"screenshot",src:"/img/screenshot_large_volume_issue.png",alt:"Storage",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,o.yg)("h2",{id:"dockerhub-pull-limitations"},"DockerHub pull limitations"),(0,o.yg)("admonition",{title:"Spot the issue",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"If the ",(0,o.yg)("strong",{parentName:"p"},"Events")," tab show this error:"),(0,o.yg)("pre",{parentName:"admonition"},(0,o.yg)("code",{parentName:"pre"},"--\x3e Scaling filebrowser-case-1 to 1\nerror: update acceptor rejected my-app-1: pods for rc 'my-project/my-app-1' took longer than 600 seconds to become available\n")),(0,o.yg)("p",{parentName:"admonition"},"Then check for the application ImageStream in ",(0,o.yg)("strong",{parentName:"p"},"Build")," > ",(0,o.yg)("strong",{parentName:"p"},"Images"),", and you might see this for your application image:"),(0,o.yg)("pre",{parentName:"admonition"},(0,o.yg)("code",{parentName:"pre"},"Internal error occurred: toomanyrequests: You have reached your pull rate limit.\nYou may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit.\n"))),(0,o.yg)("p",null,"You can solve this by creating a secret to login to DockerHub in your project:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc create secret docker-registry dockerhub-login --docker-server=docker.io --docker-username=dockerhub_username --docker-password=dockerhub_password --docker-email=example@mail.com\n")),(0,o.yg)("p",null,"Linking the login secret to the default service account:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc secrets link default dockerhub-login --for=pull\n")),(0,o.yg)("admonition",{type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"Login to DockerHub should raise the limitations")),(0,o.yg)("p",null,"To definitely solve this issue you can publish the DockerHub image to the ",(0,o.yg)("a",{parentName:"p",href:"https://docs.github.com/en/packages/guides/about-github-container-registry"},"GitHub Container Registry"),"."),(0,o.yg)("p",null,"Follow those instructions on your laptop:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},(0,o.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/guide-publish-image#login-to-github-container-registry"},"Login to the GitHub Container Registry")," with ",(0,o.yg)("inlineCode",{parentName:"p"},"docker login"),".")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Pull the docker image from"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"docker pull myorg/myimage:latest\n")),(0,o.yg)("p",{parentName:"li"},(0,o.yg)("a",{parentName:"p",href:"mailto:git@github.com"},"git@github.com"),":MaastrichtU-IDS/",(0,o.yg)("a",{parentName:"p",href:"mailto:dsri-documentation.gitgit@github.com"},"dsri-documentation.gitgit@github.com"),":MaastrichtU-IDS/",(0,o.yg)("a",{parentName:"p",href:"mailto:dsri-documentation.gitgit@github.com"},"dsri-documentation.gitgit@github.com"),":MaastrichtU-IDS/dsri-documentation.git")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Change its tag"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"docker tag myorg/myimage:latest ghcr.io/maastrichtu-ids/myimage:latest\n"))),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Push it back to the GitHub Container Registry:"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"docker push ghcr.io/maastrichtu-ids/myimage:latest\n")))),(0,o.yg)("admonition",{title:"Image created automatically",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"If the image does not exist, GitHub will create automatically when you push it for the first time! You can then head to your ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/orgs/MaastrichtU-IDS/packages"},"organization ",(0,o.yg)("strong",{parentName:"a"},"Packages")," tab")," to see the package.")),(0,o.yg)("admonition",{title:"Make it public",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"By default new images are set as ",(0,o.yg)("inlineCode",{parentName:"p"},"Private"),", go to your ",(0,o.yg)("strong",{parentName:"p"},"Package Settings"),", and click ",(0,o.yg)("strong",{parentName:"p"},"Change Visibility")," to set it as ",(0,o.yg)("inlineCode",{parentName:"p"},"Public"),", this avoids the need to login to pull the image.")),(0,o.yg)("p",null,"You can update the image if you want access to the latest version, you can set a GitHub Actions workflow to do so."),(0,o.yg)("p",null,"Finally you will need to update your DSRI deployment, or template, to use the newly created image on ",(0,o.yg)("inlineCode",{parentName:"p"},"ghcr.io"),", and redeploy the application with the new template."),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"how-to-run-function-within-a-container-in-the-background"},"How to run function within a container ''in the background'"),(0,o.yg)("admonition",{title:"Spot the issue",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"If the ",(0,o.yg)("strong",{parentName:"p"},"Events")," tab show this error:"),(0,o.yg)("pre",{parentName:"admonition"},(0,o.yg)("code",{parentName:"pre"},"--\x3e cd /usr/local/src/work2/aerius-sample-sequencing/CD4K4ANXX\n\nTrinity --seqType fq --max_memory 100G --CPU 64 --samples_file samples.txt --output /usr/local/src/work2/Trinity_output_zip_090221\nerror: The function starts but at some points just exits without warnings or errors to Windows folder\n")),(0,o.yg)("pre",{parentName:"admonition"},(0,o.yg)("code",{parentName:"pre"},"DSRI in the container's terminal keep running fine but never finishes. At some point a red label ''disconnected'' appears and the terminal stops and the analysis never continues.\n"))),(0,o.yg)("p",null,"Those two issues are due to the process running attach to the terminal"),(0,o.yg)("p",null,'Should be able to easily run it using the "Bash way": add ',(0,o.yg)("inlineCode",{parentName:"p"},"nohup")," at the beginning and ",(0,o.yg)("inlineCode",{parentName:"p"},"&")," at the end\nIt will run in the back and all output that should have gone to the terminal will go to a file ",(0,o.yg)("inlineCode",{parentName:"p"},"nohup.out")," in the repo"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"nohup Trinity --seqType fq --max_memory 100G --CPU 64 --samples_file samples.txt --output /usr/local/src/work2/Trinity_output_zip_090221 &\n")),(0,o.yg)("p",null,"To check if it is still running:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"ps aux | grep Trinity\n")),(0,o.yg)("p",null,'Be careful make sure the terminal uses bash and not shell ("sh")'),(0,o.yg)("p",null,"To use bash just type bash in the terminal:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"bash\n")),(0,o.yg)("h2",{id:"git-authentication-issue"},"Git authentication issue"),(0,o.yg)("admonition",{type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"\u26a0\ufe0f remote: HTTP Basic: Access denied fatal: Authentication failed for")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"It happen every time when we forced to change the Windows password."),(0,o.yg)("img",{class:"screenshot",src:"/img/authentication-issue.png",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,o.yg)("img",{class:"screenshot",src:"/img/git-authentication-issue.png",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}})),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Apply command from powershell ",(0,o.yg)("strong",{parentName:"p"},"(run as administrator)")),(0,o.yg)("p",{parentName:"li"},(0,o.yg)("strong",{parentName:"p"},(0,o.yg)("inlineCode",{parentName:"strong"},"git config --system --unset credential.helper")))),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"And then remove ",(0,o.yg)("strong",{parentName:"p"},"gitconfig")," file from ",(0,o.yg)("strong",{parentName:"p"},"C:\\Program Files\\Git\\mingw64/etc/"),' location (Note: this path will be different in MAC like "/Users/username")')),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"After that use git command like ",(0,o.yg)("strong",{parentName:"p"},(0,o.yg)("inlineCode",{parentName:"strong"},"git pull")," or ",(0,o.yg)("inlineCode",{parentName:"strong"},"git push")),", it asked me for username and password. applying valid username and password and git command working."))),(0,o.yg)("h5",{id:"windows"},"Windows:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},"Go to Windows ",(0,o.yg)("strong",{parentName:"li"},"Credential Manager"),". This is done in a EN-US Windows by pressing the Windows Key and typing 'credential'. In other localized Windows variants you need to use the localized term.")),(0,o.yg)("img",{class:"screenshot",src:"/img/windows-credentials.png",alt:"Windows Credentials",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,o.yg)("p",null," ",(0,o.yg)("em",{parentName:"p"},"alternatively")," you can use the shortcut ",(0,o.yg)("inlineCode",{parentName:"p"},"control /name Microsoft.CredentialManager")," in the run dialog (WIN+R)"),(0,o.yg)("ol",{start:2},(0,o.yg)("li",{parentName:"ol"},"Edit the git entry under Windows Credentials, replacing old password with the new one.")),(0,o.yg)("h5",{id:"mac"},"Mac:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},'cmd+space and type "KeyChain Access",')),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},'You should find a key with the name like "gitlab.*.com Access Key for user". You can order by date modified to find it more easily.'))),(0,o.yg)("img",{class:"screenshot",src:"/img/Mac-git-autentication.png",alt:"Mac GIT Autentication",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,o.yg)("ol",{start:3},(0,o.yg)("li",{parentName:"ol"},"Right click and delete.")),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"filebrowser-403-forbidden"},"Filebrowser 403 forbidden"),(0,o.yg)("admonition",{title:"Spot the issue",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"If you get 403 forbidden issue while try to upload folders / files or creating new folder / file"),(0,o.yg)("pre",{parentName:"admonition"},(0,o.yg)("code",{parentName:"pre"},"403 forbidden\n"))),(0,o.yg)("img",{class:"screenshot",src:"/img/forbidden-issue.png",alt:"Forbidden Issue",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,o.yg)("p",null,"Above issue will occur if you are not using the ",(0,o.yg)("strong",{parentName:"p"},"persistent storage"),"."),(0,o.yg)("p",null,"A persistent storage can be created by the DSRI team for a persistent storage of the data. ",(0,o.yg)("a",{parentName:"p",href:"http://localhost:3000/dsri-documentation/help"},"Contact the DSRI team")," to request a persistent storage."),(0,o.yg)("p",null,"You can find the persistent storage name as below"),(0,o.yg)("img",{class:"screenshot",src:"/img/persistent_storage.png",alt:"Storage",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2412],{5680:(e,t,n)=>{n.d(t,{xA:()=>g,yg:()=>m});var a=n(6540);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function r(e){for(var t=1;t=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var l=a.createContext({}),p=function(e){var t=a.useContext(l),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},g=function(e){var t=p(e.components);return a.createElement(l.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},c=a.forwardRef((function(e,t){var n=e.components,i=e.mdxType,o=e.originalType,l=e.parentName,g=s(e,["components","mdxType","originalType","parentName"]),c=p(n),m=i,d=c["".concat(l,".").concat(m)]||c[m]||u[m]||o;return n?a.createElement(d,r(r({ref:t},g),{},{components:n})):a.createElement(d,r({ref:t},g))}));function m(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var o=n.length,r=new Array(o);r[0]=c;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:i,r[1]=s;for(var p=2;p{n.r(t),n.d(t,{assets:()=>g,contentTitle:()=>l,default:()=>m,frontMatter:()=>s,metadata:()=>p,toc:()=>u});var a=n(9668),i=n(1367),o=(n(6540),n(5680)),r=["components"],s={id:"guide-known-issues",title:"Known Issues"},l=void 0,p={unversionedId:"guide-known-issues",id:"guide-known-issues",title:"Known Issues",description:"Cannot access your data in the persistent folder",source:"@site/docs/guide-known-issues.md",sourceDirName:".",slug:"/guide-known-issues",permalink:"/docs/guide-known-issues",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-known-issues.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-known-issues",title:"Known Issues"},sidebar:"docs",previous:{title:"Parallelization using Dask",permalink:"/docs/dask-tutorial"},next:{title:"Publish a Docker image",permalink:"/docs/guide-publish-image"}},g={},u=[{value:"Cannot access your data in the persistent folder",id:"cannot-access-your-data-in-the-persistent-folder",level:2},{value:"Large volumes",id:"large-volumes",level:2},{value:"DockerHub pull limitations",id:"dockerhub-pull-limitations",level:2},{value:"How to run function within a container ''in the background'",id:"how-to-run-function-within-a-container-in-the-background",level:2},{value:"Git authentication issue",id:"git-authentication-issue",level:2},{value:"Windows:",id:"windows",level:5},{value:"Mac:",id:"mac",level:5},{value:"Filebrowser 403 forbidden",id:"filebrowser-403-forbidden",level:2}],c={toc:u};function m(e){var t=e.components,n=(0,i.A)(e,r);return(0,o.yg)("wrapper",(0,a.A)({},c,n,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"cannot-access-your-data-in-the-persistent-folder"},"Cannot access your data in the persistent folder"),(0,o.yg)("p",null,"Sometimes you cannot access anymore the data you put in the persistent folder of your container. It can be due to a node going down, if the persistent volume your pod is connected to is on this node, then it cannot access it anymore."),(0,o.yg)("p",null,"You can easily fix this issue by restarting the pod of your application, it will make it properly connect to resources on nodes that are up."),(0,o.yg)("p",null,"To restart the pod, go in topology, click on your application, go to the details tab, and decrease the pod count to 0, then put it back up to 1."),(0,o.yg)("h2",{id:"large-volumes"},"Large volumes"),(0,o.yg)("admonition",{title:"Pod or Deployment will not start",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"You could run into a following message in the ",(0,o.yg)("strong",{parentName:"p"},"Events")," tab that looks similar to this"),(0,o.yg)("pre",{parentName:"admonition"},(0,o.yg)("code",{parentName:"pre"},"Error: kubelet may be retrying requests that are timing out in CRI-O due to system load. Currently at stage container volume configuration: context deadline exceeded: error reserving ctr name\n"))),(0,o.yg)("p",null,"The issue above will occur if you are using a ",(0,o.yg)("strong",{parentName:"p"},"large persistent volume"),". It can be resolved by adding the following to your Deployment(Config):"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"spec:\n template:\n metadata:\n annotations:\n io.kubernetes.cri-o.TrySkipVolumeSELinuxLabel: 'true'\n spec:\n runtimeClassName: selinux\n")),(0,o.yg)("p",null,"Take note of the ",(0,o.yg)("strong",{parentName:"p"},"indentation")," and the place in the file!"),(0,o.yg)("p",null,"An example of this can be found here:"),(0,o.yg)("img",{class:"screenshot",src:"/img/screenshot_large_volume_issue.png",alt:"Storage",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,o.yg)("h2",{id:"dockerhub-pull-limitations"},"DockerHub pull limitations"),(0,o.yg)("admonition",{title:"Spot the issue",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"If the ",(0,o.yg)("strong",{parentName:"p"},"Events")," tab show this error:"),(0,o.yg)("pre",{parentName:"admonition"},(0,o.yg)("code",{parentName:"pre"},"--\x3e Scaling filebrowser-case-1 to 1\nerror: update acceptor rejected my-app-1: pods for rc 'my-project/my-app-1' took longer than 600 seconds to become available\n")),(0,o.yg)("p",{parentName:"admonition"},"Then check for the application ImageStream in ",(0,o.yg)("strong",{parentName:"p"},"Build")," > ",(0,o.yg)("strong",{parentName:"p"},"Images"),", and you might see this for your application image:"),(0,o.yg)("pre",{parentName:"admonition"},(0,o.yg)("code",{parentName:"pre"},"Internal error occurred: toomanyrequests: You have reached your pull rate limit.\nYou may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit.\n"))),(0,o.yg)("p",null,"You can solve this by creating a secret to login to DockerHub in your project:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc create secret docker-registry dockerhub-login --docker-server=docker.io --docker-username=dockerhub_username --docker-password=dockerhub_password --docker-email=example@mail.com\n")),(0,o.yg)("p",null,"Linking the login secret to the default service account:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc secrets link default dockerhub-login --for=pull\n")),(0,o.yg)("admonition",{type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"Login to DockerHub should raise the limitations")),(0,o.yg)("p",null,"To definitely solve this issue you can publish the DockerHub image to the ",(0,o.yg)("a",{parentName:"p",href:"https://docs.github.com/en/packages/guides/about-github-container-registry"},"GitHub Container Registry"),"."),(0,o.yg)("p",null,"Follow those instructions on your laptop:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},(0,o.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/guide-publish-image#login-to-github-container-registry"},"Login to the GitHub Container Registry")," with ",(0,o.yg)("inlineCode",{parentName:"p"},"docker login"),".")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Pull the docker image from"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"docker pull myorg/myimage:latest\n")),(0,o.yg)("p",{parentName:"li"},(0,o.yg)("a",{parentName:"p",href:"mailto:git@github.com"},"git@github.com"),":MaastrichtU-IDS/",(0,o.yg)("a",{parentName:"p",href:"mailto:dsri-documentation.gitgit@github.com"},"dsri-documentation.gitgit@github.com"),":MaastrichtU-IDS/",(0,o.yg)("a",{parentName:"p",href:"mailto:dsri-documentation.gitgit@github.com"},"dsri-documentation.gitgit@github.com"),":MaastrichtU-IDS/dsri-documentation.git")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Change its tag"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"docker tag myorg/myimage:latest ghcr.io/maastrichtu-ids/myimage:latest\n"))),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Push it back to the GitHub Container Registry:"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"docker push ghcr.io/maastrichtu-ids/myimage:latest\n")))),(0,o.yg)("admonition",{title:"Image created automatically",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"If the image does not exist, GitHub will create automatically when you push it for the first time! You can then head to your ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/orgs/MaastrichtU-IDS/packages"},"organization ",(0,o.yg)("strong",{parentName:"a"},"Packages")," tab")," to see the package.")),(0,o.yg)("admonition",{title:"Make it public",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"By default new images are set as ",(0,o.yg)("inlineCode",{parentName:"p"},"Private"),", go to your ",(0,o.yg)("strong",{parentName:"p"},"Package Settings"),", and click ",(0,o.yg)("strong",{parentName:"p"},"Change Visibility")," to set it as ",(0,o.yg)("inlineCode",{parentName:"p"},"Public"),", this avoids the need to login to pull the image.")),(0,o.yg)("p",null,"You can update the image if you want access to the latest version, you can set a GitHub Actions workflow to do so."),(0,o.yg)("p",null,"Finally you will need to update your DSRI deployment, or template, to use the newly created image on ",(0,o.yg)("inlineCode",{parentName:"p"},"ghcr.io"),", and redeploy the application with the new template."),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"how-to-run-function-within-a-container-in-the-background"},"How to run function within a container ''in the background'"),(0,o.yg)("admonition",{title:"Spot the issue",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"If the ",(0,o.yg)("strong",{parentName:"p"},"Events")," tab show this error:"),(0,o.yg)("pre",{parentName:"admonition"},(0,o.yg)("code",{parentName:"pre"},"--\x3e cd /usr/local/src/work2/aerius-sample-sequencing/CD4K4ANXX\n\nTrinity --seqType fq --max_memory 100G --CPU 64 --samples_file samples.txt --output /usr/local/src/work2/Trinity_output_zip_090221\nerror: The function starts but at some points just exits without warnings or errors to Windows folder\n")),(0,o.yg)("pre",{parentName:"admonition"},(0,o.yg)("code",{parentName:"pre"},"DSRI in the container's terminal keep running fine but never finishes. At some point a red label ''disconnected'' appears and the terminal stops and the analysis never continues.\n"))),(0,o.yg)("p",null,"Those two issues are due to the process running attach to the terminal"),(0,o.yg)("p",null,'Should be able to easily run it using the "Bash way": add ',(0,o.yg)("inlineCode",{parentName:"p"},"nohup")," at the beginning and ",(0,o.yg)("inlineCode",{parentName:"p"},"&")," at the end\nIt will run in the back and all output that should have gone to the terminal will go to a file ",(0,o.yg)("inlineCode",{parentName:"p"},"nohup.out")," in the repo"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"nohup Trinity --seqType fq --max_memory 100G --CPU 64 --samples_file samples.txt --output /usr/local/src/work2/Trinity_output_zip_090221 &\n")),(0,o.yg)("p",null,"To check if it is still running:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"ps aux | grep Trinity\n")),(0,o.yg)("p",null,'Be careful make sure the terminal uses bash and not shell ("sh")'),(0,o.yg)("p",null,"To use bash just type bash in the terminal:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"bash\n")),(0,o.yg)("h2",{id:"git-authentication-issue"},"Git authentication issue"),(0,o.yg)("admonition",{type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"\u26a0\ufe0f remote: HTTP Basic: Access denied fatal: Authentication failed for")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"It happen every time when we forced to change the Windows password."),(0,o.yg)("img",{class:"screenshot",src:"/img/authentication-issue.png",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,o.yg)("img",{class:"screenshot",src:"/img/git-authentication-issue.png",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}})),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Apply command from powershell ",(0,o.yg)("strong",{parentName:"p"},"(run as administrator)")),(0,o.yg)("p",{parentName:"li"},(0,o.yg)("strong",{parentName:"p"},(0,o.yg)("inlineCode",{parentName:"strong"},"git config --system --unset credential.helper")))),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"And then remove ",(0,o.yg)("strong",{parentName:"p"},"gitconfig")," file from ",(0,o.yg)("strong",{parentName:"p"},"C:\\Program Files\\Git\\mingw64/etc/"),' location (Note: this path will be different in MAC like "/Users/username")')),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"After that use git command like ",(0,o.yg)("strong",{parentName:"p"},(0,o.yg)("inlineCode",{parentName:"strong"},"git pull")," or ",(0,o.yg)("inlineCode",{parentName:"strong"},"git push")),", it asked me for username and password. applying valid username and password and git command working."))),(0,o.yg)("h5",{id:"windows"},"Windows:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},"Go to Windows ",(0,o.yg)("strong",{parentName:"li"},"Credential Manager"),". This is done in a EN-US Windows by pressing the Windows Key and typing 'credential'. In other localized Windows variants you need to use the localized term.")),(0,o.yg)("img",{class:"screenshot",src:"/img/windows-credentials.png",alt:"Windows Credentials",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,o.yg)("p",null," ",(0,o.yg)("em",{parentName:"p"},"alternatively")," you can use the shortcut ",(0,o.yg)("inlineCode",{parentName:"p"},"control /name Microsoft.CredentialManager")," in the run dialog (WIN+R)"),(0,o.yg)("ol",{start:2},(0,o.yg)("li",{parentName:"ol"},"Edit the git entry under Windows Credentials, replacing old password with the new one.")),(0,o.yg)("h5",{id:"mac"},"Mac:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},'cmd+space and type "KeyChain Access",')),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},'You should find a key with the name like "gitlab.*.com Access Key for user". You can order by date modified to find it more easily.'))),(0,o.yg)("img",{class:"screenshot",src:"/img/Mac-git-autentication.png",alt:"Mac GIT Autentication",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,o.yg)("ol",{start:3},(0,o.yg)("li",{parentName:"ol"},"Right click and delete.")),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"filebrowser-403-forbidden"},"Filebrowser 403 forbidden"),(0,o.yg)("admonition",{title:"Spot the issue",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"If you get 403 forbidden issue while try to upload folders / files or creating new folder / file"),(0,o.yg)("pre",{parentName:"admonition"},(0,o.yg)("code",{parentName:"pre"},"403 forbidden\n"))),(0,o.yg)("img",{class:"screenshot",src:"/img/forbidden-issue.png",alt:"Forbidden Issue",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,o.yg)("p",null,"Above issue will occur if you are not using the ",(0,o.yg)("strong",{parentName:"p"},"persistent storage"),"."),(0,o.yg)("p",null,"A persistent storage can be created by the DSRI team for a persistent storage of the data. ",(0,o.yg)("a",{parentName:"p",href:"http://localhost:3000/dsri-documentation/help"},"Contact the DSRI team")," to request a persistent storage."),(0,o.yg)("p",null,"You can find the persistent storage name as below"),(0,o.yg)("img",{class:"screenshot",src:"/img/persistent_storage.png",alt:"Storage",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/36f9137d.4437e9f9.js b/assets/js/36f9137d.af2cc1b8.js similarity index 99% rename from assets/js/36f9137d.4437e9f9.js rename to assets/js/36f9137d.af2cc1b8.js index 7cc700ec7..29923ff3a 100644 --- a/assets/js/36f9137d.4437e9f9.js +++ b/assets/js/36f9137d.af2cc1b8.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5210],{5680:(e,t,a)=>{a.d(t,{xA:()=>u,yg:()=>m});var n=a(6540);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function o(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var p=n.createContext({}),s=function(e){var t=n.useContext(p),a=t;return e&&(a="function"==typeof e?e(t):o(o({},t),e)),a},u=function(e){var t=s(e.components);return n.createElement(p.Provider,{value:t},e.children)},y={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},g=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,i=e.originalType,p=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),g=s(a),m=r,c=g["".concat(p,".").concat(m)]||g[m]||y[m]||i;return a?n.createElement(c,o(o({ref:t},u),{},{components:a})):n.createElement(c,o({ref:t},u))}));function m(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var i=a.length,o=new Array(i);o[0]=g;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:r,o[1]=l;for(var s=2;s{a.r(t),a.d(t,{assets:()=>u,contentTitle:()=>p,default:()=>m,frontMatter:()=>l,metadata:()=>s,toc:()=>y});var n=a(9668),r=a(1367),i=(a(6540),a(5680)),o=["components"],l={id:"deploy-jupyter",title:"Jupyter Notebooks"},p=void 0,s={unversionedId:"deploy-jupyter",id:"deploy-jupyter",title:"Jupyter Notebooks",description:"\ud83e\ude90 Start JupyterLab",source:"@site/docs/deploy-jupyter.md",sourceDirName:".",slug:"/deploy-jupyter",permalink:"/docs/deploy-jupyter",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-jupyter.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-jupyter",title:"Jupyter Notebooks"},sidebar:"docs",previous:{title:"Install from Operators",permalink:"/docs/operators"},next:{title:"RStudio",permalink:"/docs/deploy-rstudio"}},u={},y=[{value:"\ud83e\ude90 Start JupyterLab",id:"-start-jupyterlab",level:2},{value:"\ud83d\udce6\ufe0f Manage dependencies with Conda",id:"\ufe0f-manage-dependencies-with-conda",level:2},{value:"\ud83d\udc19 Use git in JupyterLab",id:"-use-git-in-jupyterlab",level:2},{value:"\ud83d\udc36 Example",id:"-example",level:3}],g={toc:y};function m(e){var t=e.components,a=(0,r.A)(e,o);return(0,i.yg)("wrapper",(0,n.A)({},g,a,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("h2",{id:"-start-jupyterlab"},"\ud83e\ude90 Start JupyterLab"),(0,i.yg)("p",null,"Start a JupyterLab container based on the ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/jupyter/docker-stacks"},"official Jupyter docker stacks")," (debian), with ",(0,i.yg)("inlineCode",{parentName:"p"},"sudo")," privileges to install anything you need (e.g. pip or apt packages)"),(0,i.yg)("p",null,"You can start a container using the ",(0,i.yg)("strong",{parentName:"p"},"JupyterLab")," template in the ",(0,i.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,i.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,i.yg)("p",null,"When instantiating the template you can provide a few parameters, such as:"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("strong",{parentName:"li"},"Password")," to access the notebook"),(0,i.yg)("li",{parentName:"ul"},"Optionally you can provide a ",(0,i.yg)("strong",{parentName:"li"},"git repository")," to be automatically cloned in the JupyterLab (if there is a ",(0,i.yg)("inlineCode",{parentName:"li"},"requirements.txt")," packages will be automatically installed with ",(0,i.yg)("inlineCode",{parentName:"li"},"pip"),")"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("strong",{parentName:"li"},"Docker image")," to use for the notebook (see below for more details on customizing the docker image) "),(0,i.yg)("li",{parentName:"ul"},"Your ",(0,i.yg)("strong",{parentName:"li"},"git username and email")," to automatically configure git")),(0,i.yg)("p",null,"The DSRI will automatically create a persistent volume to store data you will put in the ",(0,i.yg)("inlineCode",{parentName:"p"},"/home/jovyan/work")," folder (the folder used by the notebook interface). You can find the persistent volumes in the DSRI web UI, go to the ",(0,i.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,i.yg)("strong",{parentName:"p"},"Storage")," > ",(0,i.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),"."),(0,i.yg)("img",{src:"/img/screenshot-deploy-jupyter.png",alt:"Deploy Jupyter",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("p",null,"With this template you can use any image based on the official Jupyter docker stack: ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/jupyter/docker-stacks"},"https://github.com/jupyter/docker-stacks")),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"ghcr.io/maastrichtu-ids/jupyterlab:latest"),": custom image for data science on the DSRI, with additional kernels (Java), conda integration, VisualStudio Code, and autocomplete for Python"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"ghcr.io/maastrichtu-ids/jupyterlab:knowledge-graph"),": custom image for working with knowledge graph on the DSRI, with SPARQL kernel and OpenRefine"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"jupyter/scipy-notebook"),": some packages for science are preinstalled "),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"jupyter/datascience-notebook"),": with Julia kernel"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"jupyter/tensorflow-notebook"),": with tensorflow package pre-installed"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"jupyter/r-notebook"),": to work with R"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"jupyter/pyspark-notebook"),": if you want to connect to a Spark cluster"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"jupyter/all-spark-notebook"),": if you want to run Spark locally in the notebook")),(0,i.yg)("p",null,"You can also build your own image, we recommend to use this repository as example to extend a JupyterLab image: ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/jupyterlab"},"https://github.com/MaastrichtU-IDS/jupyterlab")),(0,i.yg)("h2",{id:"\ufe0f-manage-dependencies-with-conda"},"\ud83d\udce6\ufe0f Manage dependencies with Conda"),(0,i.yg)("p",null,"With the ",(0,i.yg)("inlineCode",{parentName:"p"},"ghcr.io/maastrichtu-ids/jupyterlab:latest")," image, you can easily start notebooks from the JupyterLab Launcher page using installed conda environments, at the condition ",(0,i.yg)("inlineCode",{parentName:"p"},"nb_conda_kernels")," and ",(0,i.yg)("inlineCode",{parentName:"p"},"ipykernel")," are installed in those environments."),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("p",{parentName:"li"},"You can pass a Git repository URL which contains an ",(0,i.yg)("inlineCode",{parentName:"p"},"environment.yml")," file in the root folder when starting JupyterLab, the conda environment will automatically be installed at the start of your container, and available in the JupyterLab Launcher page. You can use this repository as example: ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-demo"},"https://github.com/MaastrichtU-IDS/dsri-demo"))),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("p",{parentName:"li"},"Or you can install it directly in a running JupyterLab (we use ",(0,i.yg)("inlineCode",{parentName:"p"},"mamba")," which is like ",(0,i.yg)("inlineCode",{parentName:"p"},"conda")," but faster):"),(0,i.yg)("pre",{parentName:"li"},(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"mamba env create -f environment.yml\n")),(0,i.yg)("p",{parentName:"li"},"You'll need to wait for 1 or 2 minutes before the new conda environment becomes available on the JupyterLab Launcher page."))),(0,i.yg)("p",null,"You can easily install an environment with a different version of Python if you need it. Here is an example of an ",(0,i.yg)("inlineCode",{parentName:"p"},"environment.yml")," file to create an environment with Python 3.9, install the minimal dependencies required to easily starts notebooks in this environment with ",(0,i.yg)("inlineCode",{parentName:"p"},"conda"),", and install a ",(0,i.yg)("inlineCode",{parentName:"p"},"pip")," package:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},"name: custom-env\nchannels:\n - defaults\n - conda-forge\n - anaconda\ndependencies:\n - python=3.9\n - ipykernel \n - nb_conda_kernels\n - pip\n - pip:\n - matplotlib\n")),(0,i.yg)("p",null,"\u26a0\ufe0f You cannot use ",(0,i.yg)("inlineCode",{parentName:"p"},"conda activate")," in a Docker container, so you will need to either open a notebook using the kernel for your conda env, or use ",(0,i.yg)("inlineCode",{parentName:"p"},"conda run")," to run scripts in the new environment:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"conda run -n custom-env python --version\n")),(0,i.yg)("h2",{id:"-use-git-in-jupyterlab"},"\ud83d\udc19 Use git in JupyterLab"),(0,i.yg)("p",null,"You can always use ",(0,i.yg)("inlineCode",{parentName:"p"},"git")," from the terminal."),(0,i.yg)("admonition",{title:"Configure username",type:"caution"},(0,i.yg)("p",{parentName:"admonition"},"Before pushing back to GitHub or GitLab, you will need to ",(0,i.yg)("strong",{parentName:"p"},"configure you username and email")," in VSCode terminal:"),(0,i.yg)("pre",{parentName:"admonition"},(0,i.yg)("code",{parentName:"pre",className:"language-bash"},'git config --global user.name "Jean Dupont"\ngit config --global user.email jeandupont@gmail.com\n'))),(0,i.yg)("admonition",{title:"Save your password",type:"info"},(0,i.yg)("p",{parentName:"admonition"},"You can run this command to ask git to save your password for 15min:"),(0,i.yg)("pre",{parentName:"admonition"},(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"git config credential.helper cache\n")),(0,i.yg)("p",{parentName:"admonition"},"Or store the password in a plain text file:"),(0,i.yg)("pre",{parentName:"admonition"},(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"git config --global credential.helper 'store --file ~/.git-credentials'\n"))),(0,i.yg)("admonition",{title:"Git tip",type:"tip"},(0,i.yg)("p",{parentName:"admonition"},"We recommend to use SSH instead of HTTPS connection when possible, checkout ",(0,i.yg)("a",{parentName:"p",href:"https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent"},"here")," how to generate SSH keys and use them with your GitHub account.")),(0,i.yg)("p",null,"You can also enable and use the ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/jupyterlab/jupyterlab-git"},"JupyterLab Git extension")," to clone and manage your ",(0,i.yg)("inlineCode",{parentName:"p"},"git")," repositories."),(0,i.yg)("p",null,"It will prompt you for a username and password if the repository is private."),(0,i.yg)("img",{src:"https://raw.githubusercontent.com/jupyterlab/jupyterlab-git/master/docs/figs/preview.gif",alt:"JupyterLab Git extension",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("h3",{id:"-example"},"\ud83d\udc36 Example"),(0,i.yg)("p",null,"Initialize repository"),(0,i.yg)("img",{src:"/img/sample_git_page.png",alt:"Initialize repo",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("p",null,"Include git details in DSRI project setup"),(0,i.yg)("img",{src:"/img/sample_git_details_jupyter.png",alt:"git details",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("p",null,"Verify automatic deployment"),(0,i.yg)("img",{src:"/img/sample_workspace.png",alt:"workspace",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("img",{src:"/img/sample_requirements.png",alt:"requirements",style:{maxWidth:"100%",maxHeight:"100%"}}))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5210],{5680:(e,t,a)=>{a.d(t,{xA:()=>u,yg:()=>m});var n=a(6540);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function o(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var p=n.createContext({}),s=function(e){var t=n.useContext(p),a=t;return e&&(a="function"==typeof e?e(t):o(o({},t),e)),a},u=function(e){var t=s(e.components);return n.createElement(p.Provider,{value:t},e.children)},y={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},g=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,i=e.originalType,p=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),g=s(a),m=r,c=g["".concat(p,".").concat(m)]||g[m]||y[m]||i;return a?n.createElement(c,o(o({ref:t},u),{},{components:a})):n.createElement(c,o({ref:t},u))}));function m(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var i=a.length,o=new Array(i);o[0]=g;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:r,o[1]=l;for(var s=2;s{a.r(t),a.d(t,{assets:()=>u,contentTitle:()=>p,default:()=>m,frontMatter:()=>l,metadata:()=>s,toc:()=>y});var n=a(9668),r=a(1367),i=(a(6540),a(5680)),o=["components"],l={id:"deploy-jupyter",title:"Jupyter Notebooks"},p=void 0,s={unversionedId:"deploy-jupyter",id:"deploy-jupyter",title:"Jupyter Notebooks",description:"\ud83e\ude90 Start JupyterLab",source:"@site/docs/deploy-jupyter.md",sourceDirName:".",slug:"/deploy-jupyter",permalink:"/docs/deploy-jupyter",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-jupyter.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-jupyter",title:"Jupyter Notebooks"},sidebar:"docs",previous:{title:"Install from Operators",permalink:"/docs/operators"},next:{title:"RStudio",permalink:"/docs/deploy-rstudio"}},u={},y=[{value:"\ud83e\ude90 Start JupyterLab",id:"-start-jupyterlab",level:2},{value:"\ud83d\udce6\ufe0f Manage dependencies with Conda",id:"\ufe0f-manage-dependencies-with-conda",level:2},{value:"\ud83d\udc19 Use git in JupyterLab",id:"-use-git-in-jupyterlab",level:2},{value:"\ud83d\udc36 Example",id:"-example",level:3}],g={toc:y};function m(e){var t=e.components,a=(0,r.A)(e,o);return(0,i.yg)("wrapper",(0,n.A)({},g,a,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("h2",{id:"-start-jupyterlab"},"\ud83e\ude90 Start JupyterLab"),(0,i.yg)("p",null,"Start a JupyterLab container based on the ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/jupyter/docker-stacks"},"official Jupyter docker stacks")," (debian), with ",(0,i.yg)("inlineCode",{parentName:"p"},"sudo")," privileges to install anything you need (e.g. pip or apt packages)"),(0,i.yg)("p",null,"You can start a container using the ",(0,i.yg)("strong",{parentName:"p"},"JupyterLab")," template in the ",(0,i.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,i.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,i.yg)("p",null,"When instantiating the template you can provide a few parameters, such as:"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("strong",{parentName:"li"},"Password")," to access the notebook"),(0,i.yg)("li",{parentName:"ul"},"Optionally you can provide a ",(0,i.yg)("strong",{parentName:"li"},"git repository")," to be automatically cloned in the JupyterLab (if there is a ",(0,i.yg)("inlineCode",{parentName:"li"},"requirements.txt")," packages will be automatically installed with ",(0,i.yg)("inlineCode",{parentName:"li"},"pip"),")"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("strong",{parentName:"li"},"Docker image")," to use for the notebook (see below for more details on customizing the docker image) "),(0,i.yg)("li",{parentName:"ul"},"Your ",(0,i.yg)("strong",{parentName:"li"},"git username and email")," to automatically configure git")),(0,i.yg)("p",null,"The DSRI will automatically create a persistent volume to store data you will put in the ",(0,i.yg)("inlineCode",{parentName:"p"},"/home/jovyan/work")," folder (the folder used by the notebook interface). You can find the persistent volumes in the DSRI web UI, go to the ",(0,i.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,i.yg)("strong",{parentName:"p"},"Storage")," > ",(0,i.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),"."),(0,i.yg)("img",{src:"/img/screenshot-deploy-jupyter.png",alt:"Deploy Jupyter",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("p",null,"With this template you can use any image based on the official Jupyter docker stack: ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/jupyter/docker-stacks"},"https://github.com/jupyter/docker-stacks")),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"ghcr.io/maastrichtu-ids/jupyterlab:latest"),": custom image for data science on the DSRI, with additional kernels (Java), conda integration, VisualStudio Code, and autocomplete for Python"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"ghcr.io/maastrichtu-ids/jupyterlab:knowledge-graph"),": custom image for working with knowledge graph on the DSRI, with SPARQL kernel and OpenRefine"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"jupyter/scipy-notebook"),": some packages for science are preinstalled "),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"jupyter/datascience-notebook"),": with Julia kernel"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"jupyter/tensorflow-notebook"),": with tensorflow package pre-installed"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"jupyter/r-notebook"),": to work with R"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"jupyter/pyspark-notebook"),": if you want to connect to a Spark cluster"),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"jupyter/all-spark-notebook"),": if you want to run Spark locally in the notebook")),(0,i.yg)("p",null,"You can also build your own image, we recommend to use this repository as example to extend a JupyterLab image: ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/jupyterlab"},"https://github.com/MaastrichtU-IDS/jupyterlab")),(0,i.yg)("h2",{id:"\ufe0f-manage-dependencies-with-conda"},"\ud83d\udce6\ufe0f Manage dependencies with Conda"),(0,i.yg)("p",null,"With the ",(0,i.yg)("inlineCode",{parentName:"p"},"ghcr.io/maastrichtu-ids/jupyterlab:latest")," image, you can easily start notebooks from the JupyterLab Launcher page using installed conda environments, at the condition ",(0,i.yg)("inlineCode",{parentName:"p"},"nb_conda_kernels")," and ",(0,i.yg)("inlineCode",{parentName:"p"},"ipykernel")," are installed in those environments."),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("p",{parentName:"li"},"You can pass a Git repository URL which contains an ",(0,i.yg)("inlineCode",{parentName:"p"},"environment.yml")," file in the root folder when starting JupyterLab, the conda environment will automatically be installed at the start of your container, and available in the JupyterLab Launcher page. You can use this repository as example: ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-demo"},"https://github.com/MaastrichtU-IDS/dsri-demo"))),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("p",{parentName:"li"},"Or you can install it directly in a running JupyterLab (we use ",(0,i.yg)("inlineCode",{parentName:"p"},"mamba")," which is like ",(0,i.yg)("inlineCode",{parentName:"p"},"conda")," but faster):"),(0,i.yg)("pre",{parentName:"li"},(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"mamba env create -f environment.yml\n")),(0,i.yg)("p",{parentName:"li"},"You'll need to wait for 1 or 2 minutes before the new conda environment becomes available on the JupyterLab Launcher page."))),(0,i.yg)("p",null,"You can easily install an environment with a different version of Python if you need it. Here is an example of an ",(0,i.yg)("inlineCode",{parentName:"p"},"environment.yml")," file to create an environment with Python 3.9, install the minimal dependencies required to easily starts notebooks in this environment with ",(0,i.yg)("inlineCode",{parentName:"p"},"conda"),", and install a ",(0,i.yg)("inlineCode",{parentName:"p"},"pip")," package:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},"name: custom-env\nchannels:\n - defaults\n - conda-forge\n - anaconda\ndependencies:\n - python=3.9\n - ipykernel \n - nb_conda_kernels\n - pip\n - pip:\n - matplotlib\n")),(0,i.yg)("p",null,"\u26a0\ufe0f You cannot use ",(0,i.yg)("inlineCode",{parentName:"p"},"conda activate")," in a Docker container, so you will need to either open a notebook using the kernel for your conda env, or use ",(0,i.yg)("inlineCode",{parentName:"p"},"conda run")," to run scripts in the new environment:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"conda run -n custom-env python --version\n")),(0,i.yg)("h2",{id:"-use-git-in-jupyterlab"},"\ud83d\udc19 Use git in JupyterLab"),(0,i.yg)("p",null,"You can always use ",(0,i.yg)("inlineCode",{parentName:"p"},"git")," from the terminal."),(0,i.yg)("admonition",{title:"Configure username",type:"caution"},(0,i.yg)("p",{parentName:"admonition"},"Before pushing back to GitHub or GitLab, you will need to ",(0,i.yg)("strong",{parentName:"p"},"configure you username and email")," in VSCode terminal:"),(0,i.yg)("pre",{parentName:"admonition"},(0,i.yg)("code",{parentName:"pre",className:"language-bash"},'git config --global user.name "Jean Dupont"\ngit config --global user.email jeandupont@gmail.com\n'))),(0,i.yg)("admonition",{title:"Save your password",type:"info"},(0,i.yg)("p",{parentName:"admonition"},"You can run this command to ask git to save your password for 15min:"),(0,i.yg)("pre",{parentName:"admonition"},(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"git config credential.helper cache\n")),(0,i.yg)("p",{parentName:"admonition"},"Or store the password in a plain text file:"),(0,i.yg)("pre",{parentName:"admonition"},(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"git config --global credential.helper 'store --file ~/.git-credentials'\n"))),(0,i.yg)("admonition",{title:"Git tip",type:"tip"},(0,i.yg)("p",{parentName:"admonition"},"We recommend to use SSH instead of HTTPS connection when possible, checkout ",(0,i.yg)("a",{parentName:"p",href:"https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent"},"here")," how to generate SSH keys and use them with your GitHub account.")),(0,i.yg)("p",null,"You can also enable and use the ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/jupyterlab/jupyterlab-git"},"JupyterLab Git extension")," to clone and manage your ",(0,i.yg)("inlineCode",{parentName:"p"},"git")," repositories."),(0,i.yg)("p",null,"It will prompt you for a username and password if the repository is private."),(0,i.yg)("img",{src:"https://raw.githubusercontent.com/jupyterlab/jupyterlab-git/master/docs/figs/preview.gif",alt:"JupyterLab Git extension",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("h3",{id:"-example"},"\ud83d\udc36 Example"),(0,i.yg)("p",null,"Initialize repository"),(0,i.yg)("img",{src:"/img/sample_git_page.png",alt:"Initialize repo",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("p",null,"Include git details in DSRI project setup"),(0,i.yg)("img",{src:"/img/sample_git_details_jupyter.png",alt:"git details",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("p",null,"Verify automatic deployment"),(0,i.yg)("img",{src:"/img/sample_workspace.png",alt:"workspace",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("img",{src:"/img/sample_requirements.png",alt:"requirements",style:{maxWidth:"100%",maxHeight:"100%"}}))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/389e2b0f.7b1734df.js b/assets/js/389e2b0f.d07c3e77.js similarity index 99% rename from assets/js/389e2b0f.7b1734df.js rename to assets/js/389e2b0f.d07c3e77.js index 25801d067..ec87f91bf 100644 --- a/assets/js/389e2b0f.7b1734df.js +++ b/assets/js/389e2b0f.d07c3e77.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[610],{5680:(e,t,a)=>{a.d(t,{xA:()=>c,yg:()=>g});var n=a(6540);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function r(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function l(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var i=n.createContext({}),d=function(e){var t=n.useContext(i),a=t;return e&&(a="function"==typeof e?e(t):l(l({},t),e)),a},c=function(e){var t=d(e.components);return n.createElement(i.Provider,{value:t},e.children)},s={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var a=e.components,o=e.mdxType,r=e.originalType,i=e.parentName,c=p(e,["components","mdxType","originalType","parentName"]),m=d(a),g=o,y=m["".concat(i,".").concat(g)]||m[g]||s[g]||r;return a?n.createElement(y,l(l({ref:t},c),{},{components:a})):n.createElement(y,l({ref:t},c))}));function g(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var r=a.length,l=new Array(r);l[0]=m;var p={};for(var i in t)hasOwnProperty.call(t,i)&&(p[i]=t[i]);p.originalType=e,p.mdxType="string"==typeof e?e:o,l[1]=p;for(var d=2;d{a.r(t),a.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>g,frontMatter:()=>p,metadata:()=>d,toc:()=>s});var n=a(9668),o=a(1367),r=(a(6540),a(5680)),l=["components"],p={id:"openshift-commands",title:"Command Line Interface"},i=void 0,d={unversionedId:"openshift-commands",id:"openshift-commands",title:"Command Line Interface",description:"Overview",source:"@site/docs/openshift-commands.md",sourceDirName:".",slug:"/openshift-commands",permalink:"/docs/openshift-commands",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/openshift-commands.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"openshift-commands",title:"Command Line Interface"},sidebar:"docs",previous:{title:"Login to Docker registries",permalink:"/docs/login-docker-registry"},next:{title:"Data storage",permalink:"/docs/openshift-storage"}},c={},s=[{value:"Overview",id:"overview",level:2},{value:"Projects",id:"projects",level:2},{value:"List projects",id:"list-projects",level:3},{value:"Connect to project",id:"connect-to-project",level:3},{value:"ImageStreams",id:"imagestreams",level:2},{value:"Pods",id:"pods",level:2},{value:"Create pod from YAML",id:"create-pod-from-yaml",level:3},{value:"List pods",id:"list-pods",level:3},{value:"Get specific pod",id:"get-specific-pod",level:3},{value:"Remote Shell connection",id:"remote-shell-connection",level:3},{value:"Execute command in pod",id:"execute-command-in-pod",level:3},{value:"Delete pod",id:"delete-pod",level:3},{value:"Get pod logs",id:"get-pod-logs",level:3},{value:"Create app from template",id:"create-app-from-template",level:2},{value:"Copy files",id:"copy-files",level:3}],m={toc:s};function g(e){var t=e.components,a=(0,o.A)(e,l);return(0,r.yg)("wrapper",(0,n.A)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("h2",{id:"overview"},"Overview"),(0,r.yg)("p",null,"Here is an overview of common ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," commands:"),(0,r.yg)("table",null,(0,r.yg)("thead",{parentName:"table"},(0,r.yg)("tr",{parentName:"thead"},(0,r.yg)("th",{parentName:"tr",align:null},(0,r.yg)("strong",{parentName:"th"},"Command")),(0,r.yg)("th",{parentName:"tr",align:null},(0,r.yg)("strong",{parentName:"th"},"Description")))),(0,r.yg)("tbody",{parentName:"table"},(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc login --token=")),(0,r.yg)("td",{parentName:"tr",align:null},"Login to the DSRI OpenShift cluster in your terminal")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc get projects")),(0,r.yg)("td",{parentName:"tr",align:null},"List all available projects")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc project ")),(0,r.yg)("td",{parentName:"tr",align:null},"Switch to project")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc get pods ")),(0,r.yg)("td",{parentName:"tr",align:null},"Get running pods (a pod can run one or multiple containers for your application)")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc rsh ")),(0,r.yg)("td",{parentName:"tr",align:null},"Remote terminal connexion to a pod (Shell/Bash)")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc cp ")),(0,r.yg)("td",{parentName:"tr",align:null},"Copy files from host to container or vice versa, e.g. from host: ",(0,r.yg)("inlineCode",{parentName:"td"},"oc cp :")," or from to host: ",(0,r.yg)("inlineCode",{parentName:"td"},"oc cp : "))),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc rsync ")),(0,r.yg)("td",{parentName:"tr",align:null},"Similar to rsync command on Linux to synchronize directories between container and host or the other way around")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc exec ")),(0,r.yg)("td",{parentName:"tr",align:null},"Execute command in pods")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc delete pod ")),(0,r.yg)("td",{parentName:"tr",align:null},"Delete pod")))),(0,r.yg)("h2",{id:"projects"},"Projects"),(0,r.yg)("h3",{id:"list-projects"},"List projects"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc projects\n")),(0,r.yg)("h3",{id:"connect-to-project"},"Connect to project"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc project my-project\n")),(0,r.yg)("hr",null),(0,r.yg)("h2",{id:"imagestreams"},"ImageStreams"),(0,r.yg)("p",null,"To update an ImageStream in your project to pull the latest update from the external repository (e.g. from ghcr.io or DockerHub):"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc import-image \n")),(0,r.yg)("hr",null),(0,r.yg)("h2",{id:"pods"},"Pods"),(0,r.yg)("h3",{id:"create-pod-from-yaml"},"Create pod from YAML"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc create -f my-pod.yaml\n")),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"E.g. ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/d2s-core/blob/master/argo/d2s-pod-virtuoso.yaml"},"d2s-pod-virtuoso.yaml"),".")),(0,r.yg)("h3",{id:"list-pods"},"List pods"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc get pod\n")),(0,r.yg)("p",null,"List running pods:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc get pods --field-selector=status.phase=Running\n")),(0,r.yg)("h3",{id:"get-specific-pod"},"Get specific pod"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc get pod | grep \n")),(0,r.yg)("p",null,"Using selector with Apache Flink as example, and showing only the pod id without header:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name\n")),(0,r.yg)("h3",{id:"remote-shell-connection"},"Remote Shell connection"),(0,r.yg)("p",null,"Connect to a pod with ",(0,r.yg)("a",{parentName:"p",href:"https://devhints.io/bash"},"Bash"),"."),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc rsh \n")),(0,r.yg)("h3",{id:"execute-command-in-pod"},"Execute command in pod"),(0,r.yg)("p",null,"Example creating a folder:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc exec -- mkdir -p /mnt/workspace/resources\n")),(0,r.yg)("h3",{id:"delete-pod"},"Delete pod"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete pod \n")),(0,r.yg)("admonition",{title:"Force pod deletion",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"If the pod is not properly deleted, you can force its deletion:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete pod --force --grace-period=0 \n"))),(0,r.yg)("h3",{id:"get-pod-logs"},"Get pod logs"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc logs -f \n")),(0,r.yg)("admonition",{title:"Debug a pod",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Get more details on how to ",(0,r.yg)("a",{parentName:"p",href:"/docs/guide-monitoring"},"debug a pod"),".")),(0,r.yg)("h2",{id:"create-app-from-template"},"Create app from template"),(0,r.yg)("p",null,"Create app from template using the CLI and providing parameters as arguments:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc new-app my-template -p APPLICATION_NAME=my-app -p ADMIN_PASSWORD=mypassword\n")),(0,r.yg)("p",null,"Example for the Semantic Web course notebooks:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc new-app template-jupyterstack-notebook -p APPLICATION_NAME=swcourseName -p NOTEBOOK_PASSWORD=PASSWORD\n\noc delete all --selector template=template-jupyterstack-notebook\n")),(0,r.yg)("h3",{id:"copy-files"},"Copy files"),(0,r.yg)("p",null,"See the ",(0,r.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/openshift-load-data"},"Load data")," page."))}g.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[610],{5680:(e,t,a)=>{a.d(t,{xA:()=>c,yg:()=>g});var n=a(6540);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function r(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function l(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var i=n.createContext({}),d=function(e){var t=n.useContext(i),a=t;return e&&(a="function"==typeof e?e(t):l(l({},t),e)),a},c=function(e){var t=d(e.components);return n.createElement(i.Provider,{value:t},e.children)},s={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var a=e.components,o=e.mdxType,r=e.originalType,i=e.parentName,c=p(e,["components","mdxType","originalType","parentName"]),m=d(a),g=o,y=m["".concat(i,".").concat(g)]||m[g]||s[g]||r;return a?n.createElement(y,l(l({ref:t},c),{},{components:a})):n.createElement(y,l({ref:t},c))}));function g(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var r=a.length,l=new Array(r);l[0]=m;var p={};for(var i in t)hasOwnProperty.call(t,i)&&(p[i]=t[i]);p.originalType=e,p.mdxType="string"==typeof e?e:o,l[1]=p;for(var d=2;d{a.r(t),a.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>g,frontMatter:()=>p,metadata:()=>d,toc:()=>s});var n=a(9668),o=a(1367),r=(a(6540),a(5680)),l=["components"],p={id:"openshift-commands",title:"Command Line Interface"},i=void 0,d={unversionedId:"openshift-commands",id:"openshift-commands",title:"Command Line Interface",description:"Overview",source:"@site/docs/openshift-commands.md",sourceDirName:".",slug:"/openshift-commands",permalink:"/docs/openshift-commands",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/openshift-commands.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"openshift-commands",title:"Command Line Interface"},sidebar:"docs",previous:{title:"Login to Docker registries",permalink:"/docs/login-docker-registry"},next:{title:"Data storage",permalink:"/docs/openshift-storage"}},c={},s=[{value:"Overview",id:"overview",level:2},{value:"Projects",id:"projects",level:2},{value:"List projects",id:"list-projects",level:3},{value:"Connect to project",id:"connect-to-project",level:3},{value:"ImageStreams",id:"imagestreams",level:2},{value:"Pods",id:"pods",level:2},{value:"Create pod from YAML",id:"create-pod-from-yaml",level:3},{value:"List pods",id:"list-pods",level:3},{value:"Get specific pod",id:"get-specific-pod",level:3},{value:"Remote Shell connection",id:"remote-shell-connection",level:3},{value:"Execute command in pod",id:"execute-command-in-pod",level:3},{value:"Delete pod",id:"delete-pod",level:3},{value:"Get pod logs",id:"get-pod-logs",level:3},{value:"Create app from template",id:"create-app-from-template",level:2},{value:"Copy files",id:"copy-files",level:3}],m={toc:s};function g(e){var t=e.components,a=(0,o.A)(e,l);return(0,r.yg)("wrapper",(0,n.A)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("h2",{id:"overview"},"Overview"),(0,r.yg)("p",null,"Here is an overview of common ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," commands:"),(0,r.yg)("table",null,(0,r.yg)("thead",{parentName:"table"},(0,r.yg)("tr",{parentName:"thead"},(0,r.yg)("th",{parentName:"tr",align:null},(0,r.yg)("strong",{parentName:"th"},"Command")),(0,r.yg)("th",{parentName:"tr",align:null},(0,r.yg)("strong",{parentName:"th"},"Description")))),(0,r.yg)("tbody",{parentName:"table"},(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc login --token=")),(0,r.yg)("td",{parentName:"tr",align:null},"Login to the DSRI OpenShift cluster in your terminal")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc get projects")),(0,r.yg)("td",{parentName:"tr",align:null},"List all available projects")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc project ")),(0,r.yg)("td",{parentName:"tr",align:null},"Switch to project")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc get pods ")),(0,r.yg)("td",{parentName:"tr",align:null},"Get running pods (a pod can run one or multiple containers for your application)")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc rsh ")),(0,r.yg)("td",{parentName:"tr",align:null},"Remote terminal connexion to a pod (Shell/Bash)")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc cp ")),(0,r.yg)("td",{parentName:"tr",align:null},"Copy files from host to container or vice versa, e.g. from host: ",(0,r.yg)("inlineCode",{parentName:"td"},"oc cp :")," or from to host: ",(0,r.yg)("inlineCode",{parentName:"td"},"oc cp : "))),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc rsync ")),(0,r.yg)("td",{parentName:"tr",align:null},"Similar to rsync command on Linux to synchronize directories between container and host or the other way around")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc exec ")),(0,r.yg)("td",{parentName:"tr",align:null},"Execute command in pods")),(0,r.yg)("tr",{parentName:"tbody"},(0,r.yg)("td",{parentName:"tr",align:null},(0,r.yg)("inlineCode",{parentName:"td"},"oc delete pod ")),(0,r.yg)("td",{parentName:"tr",align:null},"Delete pod")))),(0,r.yg)("h2",{id:"projects"},"Projects"),(0,r.yg)("h3",{id:"list-projects"},"List projects"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc projects\n")),(0,r.yg)("h3",{id:"connect-to-project"},"Connect to project"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc project my-project\n")),(0,r.yg)("hr",null),(0,r.yg)("h2",{id:"imagestreams"},"ImageStreams"),(0,r.yg)("p",null,"To update an ImageStream in your project to pull the latest update from the external repository (e.g. from ghcr.io or DockerHub):"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc import-image \n")),(0,r.yg)("hr",null),(0,r.yg)("h2",{id:"pods"},"Pods"),(0,r.yg)("h3",{id:"create-pod-from-yaml"},"Create pod from YAML"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc create -f my-pod.yaml\n")),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"E.g. ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/d2s-core/blob/master/argo/d2s-pod-virtuoso.yaml"},"d2s-pod-virtuoso.yaml"),".")),(0,r.yg)("h3",{id:"list-pods"},"List pods"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc get pod\n")),(0,r.yg)("p",null,"List running pods:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc get pods --field-selector=status.phase=Running\n")),(0,r.yg)("h3",{id:"get-specific-pod"},"Get specific pod"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc get pod | grep \n")),(0,r.yg)("p",null,"Using selector with Apache Flink as example, and showing only the pod id without header:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name\n")),(0,r.yg)("h3",{id:"remote-shell-connection"},"Remote Shell connection"),(0,r.yg)("p",null,"Connect to a pod with ",(0,r.yg)("a",{parentName:"p",href:"https://devhints.io/bash"},"Bash"),"."),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc rsh \n")),(0,r.yg)("h3",{id:"execute-command-in-pod"},"Execute command in pod"),(0,r.yg)("p",null,"Example creating a folder:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc exec -- mkdir -p /mnt/workspace/resources\n")),(0,r.yg)("h3",{id:"delete-pod"},"Delete pod"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete pod \n")),(0,r.yg)("admonition",{title:"Force pod deletion",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"If the pod is not properly deleted, you can force its deletion:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete pod --force --grace-period=0 \n"))),(0,r.yg)("h3",{id:"get-pod-logs"},"Get pod logs"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc logs -f \n")),(0,r.yg)("admonition",{title:"Debug a pod",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Get more details on how to ",(0,r.yg)("a",{parentName:"p",href:"/docs/guide-monitoring"},"debug a pod"),".")),(0,r.yg)("h2",{id:"create-app-from-template"},"Create app from template"),(0,r.yg)("p",null,"Create app from template using the CLI and providing parameters as arguments:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc new-app my-template -p APPLICATION_NAME=my-app -p ADMIN_PASSWORD=mypassword\n")),(0,r.yg)("p",null,"Example for the Semantic Web course notebooks:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc new-app template-jupyterstack-notebook -p APPLICATION_NAME=swcourseName -p NOTEBOOK_PASSWORD=PASSWORD\n\noc delete all --selector template=template-jupyterstack-notebook\n")),(0,r.yg)("h3",{id:"copy-files"},"Copy files"),(0,r.yg)("p",null,"See the ",(0,r.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/openshift-load-data"},"Load data")," page."))}g.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/391576c4.9b8ecdf8.js b/assets/js/391576c4.8129e6e7.js similarity index 98% rename from assets/js/391576c4.9b8ecdf8.js rename to assets/js/391576c4.8129e6e7.js index 02f9fbf3b..33629f4c6 100644 --- a/assets/js/391576c4.9b8ecdf8.js +++ b/assets/js/391576c4.8129e6e7.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2567],{5680:(e,n,t)=>{t.d(n,{xA:()=>p,yg:()=>f});var r=t(6540);function o(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);n&&(r=r.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,r)}return t}function a(e){for(var n=1;n=0||(o[t]=e[t]);return o}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(o[t]=e[t])}return o}var i=r.createContext({}),c=function(e){var n=r.useContext(i),t=n;return e&&(t="function"==typeof e?e(n):a(a({},n),e)),t},p=function(e){var n=c(e.components);return r.createElement(i.Provider,{value:n},e.children)},u={inlineCode:"code",wrapper:function(e){var n=e.children;return r.createElement(r.Fragment,{},n)}},d=r.forwardRef((function(e,n){var t=e.components,o=e.mdxType,l=e.originalType,i=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),d=c(t),f=o,y=d["".concat(i,".").concat(f)]||d[f]||u[f]||l;return t?r.createElement(y,a(a({ref:n},p),{},{components:t})):r.createElement(y,a({ref:n},p))}));function f(e,n){var t=arguments,o=n&&n.mdxType;if("string"==typeof e||o){var l=t.length,a=new Array(l);a[0]=d;var s={};for(var i in n)hasOwnProperty.call(n,i)&&(s[i]=n[i]);s.originalType=e,s.mdxType="string"==typeof e?e:o,a[1]=s;for(var c=2;c{t.r(n),t.d(n,{assets:()=>p,contentTitle:()=>i,default:()=>f,frontMatter:()=>s,metadata:()=>c,toc:()=>u});var r=t(9668),o=t(1367),l=(t(6540),t(5680)),a=["components"],s={id:"enabling-vpn-wsl",title:"Enabling VPN access in WSL2"},i=void 0,c={unversionedId:"enabling-vpn-wsl",id:"enabling-vpn-wsl",title:"Enabling VPN access in WSL2",description:"Follow these steps in the WSL2 environment:",source:"@site/docs/enabling-vpn-wsl.md",sourceDirName:".",slug:"/enabling-vpn-wsl",permalink:"/docs/enabling-vpn-wsl",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/enabling-vpn-wsl.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"enabling-vpn-wsl",title:"Enabling VPN access in WSL2"},sidebar:"docs",previous:{title:"Contribute",permalink:"/docs/contribute"},next:{title:"Increase your processes speed",permalink:"/docs/increase-process-speed"}},p={},u=[{value:"Follow these steps in the WSL2 environment:",id:"follow-these-steps-in-the-wsl2-environment",level:2}],d={toc:u};function f(e){var n=e.components,t=(0,o.A)(e,a);return(0,l.yg)("wrapper",(0,r.A)({},d,t,{components:n,mdxType:"MDXLayout"}),(0,l.yg)("h2",{id:"follow-these-steps-in-the-wsl2-environment"},"Follow these steps in the WSL2 environment:"),(0,l.yg)("p",null,"Create a file in /etc/wsl.conf:"),(0,l.yg)("p",null,"[network]"),(0,l.yg)("p",null,"generateResolvConf = false"),(0,l.yg)("p",null,"This makes sure that WSL2 does not generate it's own resolv.conf anymore."),(0,l.yg)("p",null,"Edit the file /etc/resolv.conf and add the appropiate nameservers:"),(0,l.yg)("p",null,"nameserver 137.120.1.1"),(0,l.yg)("p",null,"nameserver 137.120.1.5"),(0,l.yg)("p",null,"nameserver 8.8.8.8 # OR OF YOUR CHOOSING"),(0,l.yg)("p",null,"search unimaas.nl"),(0,l.yg)("p",null,"These are all the steps you should take in WSL2. Now you should do the following step after you connected to the VPN.\nYou can run this command in Powershell:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-Powershell"},'Get-NetAdapter | Where-Object {$_.InterfaceDescription -Match "Cisco AnyConnect"} | Set-NetIPInterface -InterfaceMetric 6000\n\n')),(0,l.yg)("p",null,"you should now be able to verify that WSL2 has connectivity:"),(0,l.yg)("p",null,"ping google.com -c 4"))}f.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2567],{5680:(e,n,t)=>{t.d(n,{xA:()=>p,yg:()=>f});var r=t(6540);function o(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);n&&(r=r.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,r)}return t}function a(e){for(var n=1;n=0||(o[t]=e[t]);return o}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(o[t]=e[t])}return o}var i=r.createContext({}),c=function(e){var n=r.useContext(i),t=n;return e&&(t="function"==typeof e?e(n):a(a({},n),e)),t},p=function(e){var n=c(e.components);return r.createElement(i.Provider,{value:n},e.children)},u={inlineCode:"code",wrapper:function(e){var n=e.children;return r.createElement(r.Fragment,{},n)}},d=r.forwardRef((function(e,n){var t=e.components,o=e.mdxType,l=e.originalType,i=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),d=c(t),f=o,y=d["".concat(i,".").concat(f)]||d[f]||u[f]||l;return t?r.createElement(y,a(a({ref:n},p),{},{components:t})):r.createElement(y,a({ref:n},p))}));function f(e,n){var t=arguments,o=n&&n.mdxType;if("string"==typeof e||o){var l=t.length,a=new Array(l);a[0]=d;var s={};for(var i in n)hasOwnProperty.call(n,i)&&(s[i]=n[i]);s.originalType=e,s.mdxType="string"==typeof e?e:o,a[1]=s;for(var c=2;c{t.r(n),t.d(n,{assets:()=>p,contentTitle:()=>i,default:()=>f,frontMatter:()=>s,metadata:()=>c,toc:()=>u});var r=t(9668),o=t(1367),l=(t(6540),t(5680)),a=["components"],s={id:"enabling-vpn-wsl",title:"Enabling VPN access in WSL2"},i=void 0,c={unversionedId:"enabling-vpn-wsl",id:"enabling-vpn-wsl",title:"Enabling VPN access in WSL2",description:"Follow these steps in the WSL2 environment:",source:"@site/docs/enabling-vpn-wsl.md",sourceDirName:".",slug:"/enabling-vpn-wsl",permalink:"/docs/enabling-vpn-wsl",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/enabling-vpn-wsl.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"enabling-vpn-wsl",title:"Enabling VPN access in WSL2"},sidebar:"docs",previous:{title:"Contribute",permalink:"/docs/contribute"},next:{title:"Increase your processes speed",permalink:"/docs/increase-process-speed"}},p={},u=[{value:"Follow these steps in the WSL2 environment:",id:"follow-these-steps-in-the-wsl2-environment",level:2}],d={toc:u};function f(e){var n=e.components,t=(0,o.A)(e,a);return(0,l.yg)("wrapper",(0,r.A)({},d,t,{components:n,mdxType:"MDXLayout"}),(0,l.yg)("h2",{id:"follow-these-steps-in-the-wsl2-environment"},"Follow these steps in the WSL2 environment:"),(0,l.yg)("p",null,"Create a file in /etc/wsl.conf:"),(0,l.yg)("p",null,"[network]"),(0,l.yg)("p",null,"generateResolvConf = false"),(0,l.yg)("p",null,"This makes sure that WSL2 does not generate it's own resolv.conf anymore."),(0,l.yg)("p",null,"Edit the file /etc/resolv.conf and add the appropiate nameservers:"),(0,l.yg)("p",null,"nameserver 137.120.1.1"),(0,l.yg)("p",null,"nameserver 137.120.1.5"),(0,l.yg)("p",null,"nameserver 8.8.8.8 # OR OF YOUR CHOOSING"),(0,l.yg)("p",null,"search unimaas.nl"),(0,l.yg)("p",null,"These are all the steps you should take in WSL2. Now you should do the following step after you connected to the VPN.\nYou can run this command in Powershell:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-Powershell"},'Get-NetAdapter | Where-Object {$_.InterfaceDescription -Match "Cisco AnyConnect"} | Set-NetIPInterface -InterfaceMetric 6000\n\n')),(0,l.yg)("p",null,"you should now be able to verify that WSL2 has connectivity:"),(0,l.yg)("p",null,"ping google.com -c 4"))}f.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/3a93ea81.afbe68e0.js b/assets/js/3a93ea81.ba42fafc.js similarity index 99% rename from assets/js/3a93ea81.afbe68e0.js rename to assets/js/3a93ea81.ba42fafc.js index e480a104e..9a5ad4c64 100644 --- a/assets/js/3a93ea81.afbe68e0.js +++ b/assets/js/3a93ea81.ba42fafc.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[3684],{5680:(e,t,a)=>{a.d(t,{xA:()=>u,yg:()=>d});var n=a(6540);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function l(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function o(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var i=n.createContext({}),s=function(e){var t=n.useContext(i),a=t;return e&&(a="function"==typeof e?e(t):o(o({},t),e)),a},u=function(e){var t=s(e.components);return n.createElement(i.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},y=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,l=e.originalType,i=e.parentName,u=p(e,["components","mdxType","originalType","parentName"]),y=s(a),d=r,m=y["".concat(i,".").concat(d)]||y[d]||c[d]||l;return a?n.createElement(m,o(o({ref:t},u),{},{components:a})):n.createElement(m,o({ref:t},u))}));function d(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var l=a.length,o=new Array(l);o[0]=y;var p={};for(var i in t)hasOwnProperty.call(t,i)&&(p[i]=t[i]);p.originalType=e,p.mdxType="string"==typeof e?e:r,o[1]=p;for(var s=2;s{a.r(t),a.d(t,{assets:()=>u,contentTitle:()=>i,default:()=>d,frontMatter:()=>p,metadata:()=>s,toc:()=>c});var n=a(9668),r=a(1367),l=(a(6540),a(5680)),o=["components"],p={id:"jupyterhub-spark",title:"JupyterHub with Spark"},i=void 0,s={unversionedId:"jupyterhub-spark",id:"jupyterhub-spark",title:"JupyterHub with Spark",description:"JupyterHub is ideal to enable multiple users easily start predefined workspaces in the same project. The complimentary Apache Spark cluster can be used from the workspaces to perform distributed processing.",source:"@site/docs/jupyterhub-spark.md",sourceDirName:".",slug:"/jupyterhub-spark",permalink:"/docs/jupyterhub-spark",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/jupyterhub-spark.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"jupyterhub-spark",title:"JupyterHub with Spark"}},u={},c=[{value:"\ud83e\uddca Install kfctl",id:"-install-kfctl",level:2},{value:"\ud83e\ude90 Deploy JupyterHub and Spark",id:"-deploy-jupyterhub-and-spark",level:2},{value:"\u2728 Use the Spark cluster",id:"-use-the-spark-cluster",level:2},{value:"Match the version",id:"match-the-version",level:3},{value:"Spark UI",id:"spark-ui",level:3},{value:"New Spark cluster",id:"new-spark-cluster",level:3},{value:"\ud83d\uddd1\ufe0f Delete the deployment",id:"\ufe0f-delete-the-deployment",level:2}],y={toc:c};function d(e){var t=e.components,a=(0,r.A)(e,o);return(0,l.yg)("wrapper",(0,n.A)({},y,a,{components:t,mdxType:"MDXLayout"}),(0,l.yg)("p",null,"JupyterHub is ideal to enable multiple users easily start predefined workspaces in the same project. The complimentary Apache Spark cluster can be used from the workspaces to perform distributed processing."),(0,l.yg)("h2",{id:"-install-kfctl"},"\ud83e\uddca Install kfctl"),(0,l.yg)("p",null,"You will need to have the usual ",(0,l.yg)("inlineCode",{parentName:"p"},"oc")," tool installed, and to install ",(0,l.yg)("inlineCode",{parentName:"p"},"kfctl")," on your machine, a tool to deploy Kubeflow applications, download the ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/kfctl/releases"},"latest version for your OS \ud83d\udce5\ufe0f")," "),(0,l.yg)("p",null,"You can then install it by downloading the binary and putting it in your path, for example on Linux:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"wget https://github.com/kubeflow/kfctl/releases/download/v1.2.0/kfctl_v1.2.0-0-gbc038f9_linux.tar.gz\ntar -xzf kfctl_v1.2.0-0-gbc038f9_linux.tar.gz\nsudo mv kfctl /usr/local/bin/\n")),(0,l.yg)("p",null,"Clone the repository with the DSRI custom images and deployments for the OpenDataHub platform, and go to the ",(0,l.yg)("inlineCode",{parentName:"p"},"kfdef")," folder:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"git clone https://github.com/MaastrichtU-IDS/odh-manifests\ncd odh-manifests/kfdef\n")),(0,l.yg)("h2",{id:"-deploy-jupyterhub-and-spark"},"\ud83e\ude90 Deploy JupyterHub and Spark"),(0,l.yg)("admonition",{title:"Go the the kfdef folder",type:"info"},(0,l.yg)("p",{parentName:"admonition"},"All scripts need to be run from the ",(0,l.yg)("inlineCode",{parentName:"p"},"kfdef")," folder \ud83d\udcc2")),(0,l.yg)("p",null,"You can deploy JupyterHub with 2 different authentications system, use the file corresponding to your choice:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("p",{parentName:"li"},"For the default DSRI authentication use ",(0,l.yg)("inlineCode",{parentName:"p"},"kfctl_openshift_dsri.yaml"))),(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("p",{parentName:"li"},"For GitHub authentication use ",(0,l.yg)("inlineCode",{parentName:"p"},"kfctl_openshift_github.yaml")),(0,l.yg)("ul",{parentName:"li"},(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("p",{parentName:"li"},"You need to create a new GitHub OAuth app: ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/settings/developers"},"https://github.com/settings/developers"))),(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("p",{parentName:"li"},"And provide the GitHub client ID and secret through environment variable before running the start script:"),(0,l.yg)("pre",{parentName:"li"},(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"export GITHUB_CLIENT_ID=YOUR_CLIENT_ID\nexport GITHUB_CLIENT_SECRET=YOUR_CLIENT_SECRET\n")))))),(0,l.yg)("p",null,"First you will need to change the ",(0,l.yg)("inlineCode",{parentName:"p"},"namespace:")," in the file you want to deploy, to provide the project where you want to start JupyterHub (currently ",(0,l.yg)("inlineCode",{parentName:"p"},"opendatahub-ids"),"), then you can deploy JupyterHub and Spark with ",(0,l.yg)("inlineCode",{parentName:"p"},"kfctl"),":"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"./start_odh.sh kfctl_openshift_dsri.yaml\n")),(0,l.yg)("p",null,"\ud83d\uddc4\ufe0f Persistent volumes are automatically created for each instance started in JupyterHub to insure persistence of the data even JupyterHub is stopped. You can find the persistent volumes in the DSRI web UI, go to the ",(0,l.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,l.yg)("strong",{parentName:"p"},"Storage")," > ",(0,l.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),"."),(0,l.yg)("p",null,"\u26a1\ufe0f A Spark cluster with 3 workers is automatically created with the service name ",(0,l.yg)("inlineCode",{parentName:"p"},"spark-cluster"),", you can use the URL of the master node to access it from your workspace: ",(0,l.yg)("inlineCode",{parentName:"p"},"spark://spark-cluster:7077")),(0,l.yg)("h2",{id:"-use-the-spark-cluster"},"\u2728 Use the Spark cluster"),(0,l.yg)("admonition",{title:"Matching Spark versions",type:"caution"},(0,l.yg)("p",{parentName:"admonition"},"Make sure all the Spark versions are matching, the current default version is ",(0,l.yg)("inlineCode",{parentName:"p"},"3.0.1"))),(0,l.yg)("p",null,"You can test the Spark cluster connection with PySpark:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-python"},"from pyspark.sql import SparkSession, SQLContext\nimport os\nimport socket\n# Create a Spark session\nspark_cluster_url = \"spark://spark-cluster:7077\"\nspark = SparkSession.builder.master(spark_cluster_url).getOrCreate()\nsc = spark.sparkContext\n\n# Test your Spark connection\nspark.range(5, numPartitions=5).rdd.map(lambda x: socket.gethostname()).distinct().collect()\n# Or try:\n#x = ['spark', 'rdd', 'example', 'sample', 'example']\nx = [1, 2, 3, 4, 5]\ny = sc.parallelize(x)\ny.collect()\n# Or try:\ndata = [1, 2, 3, 4, 5]\ndistData = sc.parallelize(data)\ndistData.reduce(lambda a, b: a + b)\n")),(0,l.yg)("h3",{id:"match-the-version"},"Match the version"),(0,l.yg)("p",null,"Make sure all the Spark versions are matching, the current default version is ",(0,l.yg)("inlineCode",{parentName:"p"},"3.0.1"),":"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"Go to the Spark UI to verify the version of the Spark cluster"),(0,l.yg)("li",{parentName:"ul"},"Run ",(0,l.yg)("inlineCode",{parentName:"li"},"spark-shell --version")," to verify the version of the Spark binary installed in the workspace"),(0,l.yg)("li",{parentName:"ul"},"Run ",(0,l.yg)("inlineCode",{parentName:"li"},"pip list | grep pyspark")," to verify the version of the PySpark library")),(0,l.yg)("p",null,"Check the ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/jupyterlab/blob/main/Dockerfile#L14"},"JupyterLab workspace ",(0,l.yg)("inlineCode",{parentName:"a"},"Dockerfile"))," to change the version of Spark installed in the workspace, and see how you can download and install a new version of the Spark binary."),(0,l.yg)("p",null,"If you need to change the Python, Java or PySpark version in the workspace you can create a ",(0,l.yg)("inlineCode",{parentName:"p"},"environment.yml")," file, for example for ",(0,l.yg)("inlineCode",{parentName:"p"},"2.4.5"),":"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-yaml"},"name: spark\nchannels:\n - defaults\n - conda-forge\n - anaconda\ndependencies:\n - python=3.7\n - openjdk=8\n - ipykernel \n - nb_conda_kernels\n - pip\n - pip:\n - pyspark==2.4.5\n")),(0,l.yg)("p",null,"Create the environment with ",(0,l.yg)("inlineCode",{parentName:"p"},"conda"),":"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"mamba env create -f environment.yml\n")),(0,l.yg)("h3",{id:"spark-ui"},"Spark UI"),(0,l.yg)("p",null,"You can also create a route to access the Spark UI and monitor the activity on the Spark cluster:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"oc expose svc/spark-cluster-ui\n")),(0,l.yg)("p",null,"Get the Spark UI URL:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"oc get route --selector radanalytics.io/service=ui --no-headers -o=custom-columns=HOST:.spec.host\n")),(0,l.yg)("h3",{id:"new-spark-cluster"},"New Spark cluster"),(0,l.yg)("p",null,"You can create a new Spark cluster, for example here using Spark ",(0,l.yg)("inlineCode",{parentName:"p"},"3.0.1")," with the installed Spark Operator:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"cat <{a.d(t,{xA:()=>u,yg:()=>d});var n=a(6540);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function l(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function o(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var i=n.createContext({}),s=function(e){var t=n.useContext(i),a=t;return e&&(a="function"==typeof e?e(t):o(o({},t),e)),a},u=function(e){var t=s(e.components);return n.createElement(i.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},y=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,l=e.originalType,i=e.parentName,u=p(e,["components","mdxType","originalType","parentName"]),y=s(a),d=r,m=y["".concat(i,".").concat(d)]||y[d]||c[d]||l;return a?n.createElement(m,o(o({ref:t},u),{},{components:a})):n.createElement(m,o({ref:t},u))}));function d(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var l=a.length,o=new Array(l);o[0]=y;var p={};for(var i in t)hasOwnProperty.call(t,i)&&(p[i]=t[i]);p.originalType=e,p.mdxType="string"==typeof e?e:r,o[1]=p;for(var s=2;s{a.r(t),a.d(t,{assets:()=>u,contentTitle:()=>i,default:()=>d,frontMatter:()=>p,metadata:()=>s,toc:()=>c});var n=a(9668),r=a(1367),l=(a(6540),a(5680)),o=["components"],p={id:"jupyterhub-spark",title:"JupyterHub with Spark"},i=void 0,s={unversionedId:"jupyterhub-spark",id:"jupyterhub-spark",title:"JupyterHub with Spark",description:"JupyterHub is ideal to enable multiple users easily start predefined workspaces in the same project. The complimentary Apache Spark cluster can be used from the workspaces to perform distributed processing.",source:"@site/docs/jupyterhub-spark.md",sourceDirName:".",slug:"/jupyterhub-spark",permalink:"/docs/jupyterhub-spark",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/jupyterhub-spark.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"jupyterhub-spark",title:"JupyterHub with Spark"}},u={},c=[{value:"\ud83e\uddca Install kfctl",id:"-install-kfctl",level:2},{value:"\ud83e\ude90 Deploy JupyterHub and Spark",id:"-deploy-jupyterhub-and-spark",level:2},{value:"\u2728 Use the Spark cluster",id:"-use-the-spark-cluster",level:2},{value:"Match the version",id:"match-the-version",level:3},{value:"Spark UI",id:"spark-ui",level:3},{value:"New Spark cluster",id:"new-spark-cluster",level:3},{value:"\ud83d\uddd1\ufe0f Delete the deployment",id:"\ufe0f-delete-the-deployment",level:2}],y={toc:c};function d(e){var t=e.components,a=(0,r.A)(e,o);return(0,l.yg)("wrapper",(0,n.A)({},y,a,{components:t,mdxType:"MDXLayout"}),(0,l.yg)("p",null,"JupyterHub is ideal to enable multiple users easily start predefined workspaces in the same project. The complimentary Apache Spark cluster can be used from the workspaces to perform distributed processing."),(0,l.yg)("h2",{id:"-install-kfctl"},"\ud83e\uddca Install kfctl"),(0,l.yg)("p",null,"You will need to have the usual ",(0,l.yg)("inlineCode",{parentName:"p"},"oc")," tool installed, and to install ",(0,l.yg)("inlineCode",{parentName:"p"},"kfctl")," on your machine, a tool to deploy Kubeflow applications, download the ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/kfctl/releases"},"latest version for your OS \ud83d\udce5\ufe0f")," "),(0,l.yg)("p",null,"You can then install it by downloading the binary and putting it in your path, for example on Linux:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"wget https://github.com/kubeflow/kfctl/releases/download/v1.2.0/kfctl_v1.2.0-0-gbc038f9_linux.tar.gz\ntar -xzf kfctl_v1.2.0-0-gbc038f9_linux.tar.gz\nsudo mv kfctl /usr/local/bin/\n")),(0,l.yg)("p",null,"Clone the repository with the DSRI custom images and deployments for the OpenDataHub platform, and go to the ",(0,l.yg)("inlineCode",{parentName:"p"},"kfdef")," folder:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"git clone https://github.com/MaastrichtU-IDS/odh-manifests\ncd odh-manifests/kfdef\n")),(0,l.yg)("h2",{id:"-deploy-jupyterhub-and-spark"},"\ud83e\ude90 Deploy JupyterHub and Spark"),(0,l.yg)("admonition",{title:"Go the the kfdef folder",type:"info"},(0,l.yg)("p",{parentName:"admonition"},"All scripts need to be run from the ",(0,l.yg)("inlineCode",{parentName:"p"},"kfdef")," folder \ud83d\udcc2")),(0,l.yg)("p",null,"You can deploy JupyterHub with 2 different authentications system, use the file corresponding to your choice:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("p",{parentName:"li"},"For the default DSRI authentication use ",(0,l.yg)("inlineCode",{parentName:"p"},"kfctl_openshift_dsri.yaml"))),(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("p",{parentName:"li"},"For GitHub authentication use ",(0,l.yg)("inlineCode",{parentName:"p"},"kfctl_openshift_github.yaml")),(0,l.yg)("ul",{parentName:"li"},(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("p",{parentName:"li"},"You need to create a new GitHub OAuth app: ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/settings/developers"},"https://github.com/settings/developers"))),(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("p",{parentName:"li"},"And provide the GitHub client ID and secret through environment variable before running the start script:"),(0,l.yg)("pre",{parentName:"li"},(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"export GITHUB_CLIENT_ID=YOUR_CLIENT_ID\nexport GITHUB_CLIENT_SECRET=YOUR_CLIENT_SECRET\n")))))),(0,l.yg)("p",null,"First you will need to change the ",(0,l.yg)("inlineCode",{parentName:"p"},"namespace:")," in the file you want to deploy, to provide the project where you want to start JupyterHub (currently ",(0,l.yg)("inlineCode",{parentName:"p"},"opendatahub-ids"),"), then you can deploy JupyterHub and Spark with ",(0,l.yg)("inlineCode",{parentName:"p"},"kfctl"),":"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"./start_odh.sh kfctl_openshift_dsri.yaml\n")),(0,l.yg)("p",null,"\ud83d\uddc4\ufe0f Persistent volumes are automatically created for each instance started in JupyterHub to insure persistence of the data even JupyterHub is stopped. You can find the persistent volumes in the DSRI web UI, go to the ",(0,l.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,l.yg)("strong",{parentName:"p"},"Storage")," > ",(0,l.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),"."),(0,l.yg)("p",null,"\u26a1\ufe0f A Spark cluster with 3 workers is automatically created with the service name ",(0,l.yg)("inlineCode",{parentName:"p"},"spark-cluster"),", you can use the URL of the master node to access it from your workspace: ",(0,l.yg)("inlineCode",{parentName:"p"},"spark://spark-cluster:7077")),(0,l.yg)("h2",{id:"-use-the-spark-cluster"},"\u2728 Use the Spark cluster"),(0,l.yg)("admonition",{title:"Matching Spark versions",type:"caution"},(0,l.yg)("p",{parentName:"admonition"},"Make sure all the Spark versions are matching, the current default version is ",(0,l.yg)("inlineCode",{parentName:"p"},"3.0.1"))),(0,l.yg)("p",null,"You can test the Spark cluster connection with PySpark:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-python"},"from pyspark.sql import SparkSession, SQLContext\nimport os\nimport socket\n# Create a Spark session\nspark_cluster_url = \"spark://spark-cluster:7077\"\nspark = SparkSession.builder.master(spark_cluster_url).getOrCreate()\nsc = spark.sparkContext\n\n# Test your Spark connection\nspark.range(5, numPartitions=5).rdd.map(lambda x: socket.gethostname()).distinct().collect()\n# Or try:\n#x = ['spark', 'rdd', 'example', 'sample', 'example']\nx = [1, 2, 3, 4, 5]\ny = sc.parallelize(x)\ny.collect()\n# Or try:\ndata = [1, 2, 3, 4, 5]\ndistData = sc.parallelize(data)\ndistData.reduce(lambda a, b: a + b)\n")),(0,l.yg)("h3",{id:"match-the-version"},"Match the version"),(0,l.yg)("p",null,"Make sure all the Spark versions are matching, the current default version is ",(0,l.yg)("inlineCode",{parentName:"p"},"3.0.1"),":"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"Go to the Spark UI to verify the version of the Spark cluster"),(0,l.yg)("li",{parentName:"ul"},"Run ",(0,l.yg)("inlineCode",{parentName:"li"},"spark-shell --version")," to verify the version of the Spark binary installed in the workspace"),(0,l.yg)("li",{parentName:"ul"},"Run ",(0,l.yg)("inlineCode",{parentName:"li"},"pip list | grep pyspark")," to verify the version of the PySpark library")),(0,l.yg)("p",null,"Check the ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/jupyterlab/blob/main/Dockerfile#L14"},"JupyterLab workspace ",(0,l.yg)("inlineCode",{parentName:"a"},"Dockerfile"))," to change the version of Spark installed in the workspace, and see how you can download and install a new version of the Spark binary."),(0,l.yg)("p",null,"If you need to change the Python, Java or PySpark version in the workspace you can create a ",(0,l.yg)("inlineCode",{parentName:"p"},"environment.yml")," file, for example for ",(0,l.yg)("inlineCode",{parentName:"p"},"2.4.5"),":"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-yaml"},"name: spark\nchannels:\n - defaults\n - conda-forge\n - anaconda\ndependencies:\n - python=3.7\n - openjdk=8\n - ipykernel \n - nb_conda_kernels\n - pip\n - pip:\n - pyspark==2.4.5\n")),(0,l.yg)("p",null,"Create the environment with ",(0,l.yg)("inlineCode",{parentName:"p"},"conda"),":"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"mamba env create -f environment.yml\n")),(0,l.yg)("h3",{id:"spark-ui"},"Spark UI"),(0,l.yg)("p",null,"You can also create a route to access the Spark UI and monitor the activity on the Spark cluster:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"oc expose svc/spark-cluster-ui\n")),(0,l.yg)("p",null,"Get the Spark UI URL:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"oc get route --selector radanalytics.io/service=ui --no-headers -o=custom-columns=HOST:.spec.host\n")),(0,l.yg)("h3",{id:"new-spark-cluster"},"New Spark cluster"),(0,l.yg)("p",null,"You can create a new Spark cluster, for example here using Spark ",(0,l.yg)("inlineCode",{parentName:"p"},"3.0.1")," with the installed Spark Operator:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"cat <{a.d(t,{xA:()=>g,yg:()=>y});var i=a(6540);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function r(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,i)}return a}function n(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(i=0;i=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var u=i.createContext({}),s=function(e){var t=i.useContext(u),a=t;return e&&(a="function"==typeof e?e(t):n(n({},t),e)),a},g=function(e){var t=s(e.components);return i.createElement(u.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return i.createElement(i.Fragment,{},t)}},c=i.forwardRef((function(e,t){var a=e.components,o=e.mdxType,r=e.originalType,u=e.parentName,g=l(e,["components","mdxType","originalType","parentName"]),c=s(a),y=o,m=c["".concat(u,".").concat(y)]||c[y]||p[y]||r;return a?i.createElement(m,n(n({ref:t},g),{},{components:a})):i.createElement(m,n({ref:t},g))}));function y(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var r=a.length,n=new Array(r);n[0]=c;var l={};for(var u in t)hasOwnProperty.call(t,u)&&(l[u]=t[u]);l.originalType=e,l.mdxType="string"==typeof e?e:o,n[1]=l;for(var s=2;s{a.r(t),a.d(t,{assets:()=>g,contentTitle:()=>u,default:()=>y,frontMatter:()=>l,metadata:()=>s,toc:()=>p});var i=a(9668),o=a(1367),r=(a(6540),a(5680)),n=["components"],l={id:"guide-publish-image",title:"Publish a Docker image"},u=void 0,s={unversionedId:"guide-publish-image",id:"guide-publish-image",title:"Publish a Docker image",description:"\u26a0\ufe0f DockerHub imposes strict pull limitations for clusters like the DSRI (using DockerHub might result in failing to pull your images on the DSRI).",source:"@site/docs/guide-publish-image.md",sourceDirName:".",slug:"/guide-publish-image",permalink:"/docs/guide-publish-image",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-publish-image.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-publish-image",title:"Publish a Docker image"},sidebar:"docs",previous:{title:"Known Issues",permalink:"/docs/guide-known-issues"},next:{title:"Install UM VPN",permalink:"/docs/guide-vpn"}},g={},p=[{value:"Login to Container Registries \ud83d\udd11",id:"login-to-container-registries-",level:2},{value:"Login to GitHub Container Registry",id:"login-to-github-container-registry",level:3},{value:"Login to quay.io",id:"login-to-quayio",level:3},{value:"Login to DockerHub",id:"login-to-dockerhub",level:3},{value:"Publish your image \ud83d\udce2",id:"publish-your-image-",level:2},{value:"Publish to GitHub Container Registry",id:"publish-to-github-container-registry",level:3},{value:"Publish to Quay.io",id:"publish-to-quayio",level:3},{value:"Publish to DockerHub",id:"publish-to-dockerhub",level:3},{value:"Use automated workflows",id:"use-automated-workflows",level:3}],c={toc:p};function y(e){var t=e.components,a=(0,o.A)(e,n);return(0,r.yg)("wrapper",(0,i.A)({},c,a,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("admonition",{title:"DockerHub pull rates limitations",type:"warning"},(0,r.yg)("p",{parentName:"admonition"},"\u26a0\ufe0f ",(0,r.yg)("strong",{parentName:"p"},"DockerHub imposes strict pull limitations for clusters")," like the DSRI (using DockerHub might result in failing to pull your images on the DSRI). "),(0,r.yg)("p",{parentName:"admonition"},"We highly recommend to ",(0,r.yg)("strong",{parentName:"p"},"use the ",(0,r.yg)("a",{parentName:"strong",href:"https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry/about-github-container-registry"},"GitHub Container Registry")," or ",(0,r.yg)("a",{parentName:"strong",href:"https://quay.io/"},"RedHat quay.io Container Registry")," to publish public Docker images"),".")),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"You can also login to DockerHub using a Secret in OpenShift to increase the pull rates limitations from 100 to 200 every 6 hours (this will mitigate the issue, but not solve it completely if you do not have a paid account on DockerHub):"),(0,r.yg)("pre",{parentName:"blockquote"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc create secret docker-registry docker-hub-secret --docker-server=docker.io --docker-username=your-dockerhub-username --docker-password=your-dockerhub-password --docker-email=your-dockerhub-email\n"))),(0,r.yg)("h2",{id:"login-to-container-registries-"},"Login to Container Registries \ud83d\udd11"),(0,r.yg)("h3",{id:"login-to-github-container-registry"},"Login to GitHub Container Registry"),(0,r.yg)("p",null,"Use your existing ",(0,r.yg)("a",{parentName:"p",href:"https://github.com"},"GitHub")," account if you have one:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Create a ",(0,r.yg)("strong",{parentName:"li"},"Personal Access Token")," for GitHub packages at ",(0,r.yg)("strong",{parentName:"li"},(0,r.yg)("a",{parentName:"strong",href:"https://github.com/settings/tokens/new"},"https://github.com/settings/tokens/new"))),(0,r.yg)("li",{parentName:"ol"},"Provide a meaningful description for the token, and enable the following scopes when creating the token:",(0,r.yg)("ul",{parentName:"li"},(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"write:packages"),": publish container images to GitHub Container Registry"),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"delete:packages"),": delete specified versions of private or public container images from GitHub Container Registry"))),(0,r.yg)("li",{parentName:"ol"},"You might want to store this token in a safe place, as you will not be able to retrieve it later on github.com (you can still delete it, and create a new token easily if you lose your token)"),(0,r.yg)("li",{parentName:"ol"},"\ud83d\udc68\u200d\ud83d\udcbb Log in to the GitHub Container Registry in your terminal (change ",(0,r.yg)("inlineCode",{parentName:"li"},"USERNAME")," and ",(0,r.yg)("inlineCode",{parentName:"li"},"ACCESS_TOKEN")," to yours):")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'echo "ACCESS_TOKEN" | docker login ghcr.io -u USERNAME --password-stdin\n')),(0,r.yg)("p",null,"On Windows use this command:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'docker login ghcr.io -u USERNAME -p "ACCESS_TOKEN"\n')),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"See the ",(0,r.yg)("a",{parentName:"p",href:"https://docs.github.com/en/free-pro-team@latest/packages/using-github-packages-with-your-projects-ecosystem/configuring-docker-for-use-with-github-packages"},"official GitHub documentation"),".")),(0,r.yg)("h3",{id:"login-to-quayio"},"Login to quay.io"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Create an account at ",(0,r.yg)("a",{parentName:"li",href:"https://quay.io"},"https://quay.io")," "),(0,r.yg)("li",{parentName:"ol"},"Login in your terminal (you will be asked for username and password)")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker login quay.io\n")),(0,r.yg)("h3",{id:"login-to-dockerhub"},"Login to DockerHub"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Get a ",(0,r.yg)("a",{parentName:"p",href:"https://hub.docker.com/"},"DockerHub")," account at ",(0,r.yg)("a",{parentName:"p",href:"https://hub.docker.com"},"https://hub.docker.com")," (you most probably already have one if you installed Docker Desktop)")),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"\ud83d\udc69\u200d\ud83d\udcbb Run in your terminal:"))),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker login\n")),(0,r.yg)("ol",{start:3},(0,r.yg)("li",{parentName:"ol"},"Provide your DockerHub username and password.")),(0,r.yg)("h2",{id:"publish-your-image-"},"Publish your image \ud83d\udce2"),(0,r.yg)("p",null,"Once you built a Docker image, and you logged in to a Container Registry, you might want to publish the image to pull and re-use it easily later."),(0,r.yg)("h3",{id:"publish-to-github-container-registry"},"Publish to GitHub Container Registry"),(0,r.yg)("admonition",{title:"Free for public images",type:"tip"},(0,r.yg)("p",{parentName:"admonition"},"The ",(0,r.yg)("a",{parentName:"p",href:"https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry"},"GitHub Container Registry")," is still in beta but will be free for public images when fully released. It enables you to store your Docker images at the same place you keep your code! \ud83d\udce6")),(0,r.yg)("p",null,"Publish to your user Container Registry on GitHub:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker build -t ghcr.io/github-username/my-image:latest .\ndocker push ghcr.io/github-username/my-image:latest\n")),(0,r.yg)("p",null,"For example, to the ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/orgs/MaastrichtU-IDS/packages"},"MaastrichtU-IDS organization Container Registry on GitHub"),":"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker build -t ghcr.io/maastrichtu-ids/jupyterlab:latest .\ndocker push ghcr.io/maastrichtu-ids/jupyterlab:latest\n")),(0,r.yg)("admonition",{title:"Created automatically",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"If the image does not exist, GitHub Container Registry will create it automatically and set it as ",(0,r.yg)("strong",{parentName:"p"},"Private")," by default. You can easily change it to ",(0,r.yg)("strong",{parentName:"p"},"Public")," in the image settings on github.com.")),(0,r.yg)("h3",{id:"publish-to-quayio"},"Publish to Quay.io"),(0,r.yg)("admonition",{title:"Free for public images",type:"tip"},(0,r.yg)("p",{parentName:"admonition"},"Quay.io is free for public images and does not restrict images pulls.")),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Create the image on ",(0,r.yg)("a",{parentName:"p",href:"https://quay.io/"},"quay.io"))),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Build and push to ",(0,r.yg)("a",{parentName:"p",href:"https://quay.io/"},"quay.io")))),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker build -t ghcr.io/quay-username/my-image:latest .\ndocker push quay.io/quay-username/my-image:latest\n")),(0,r.yg)("h3",{id:"publish-to-dockerhub"},"Publish to DockerHub"),(0,r.yg)("admonition",{title:"DockerHub pull rates limitations",type:"warning"},(0,r.yg)("p",{parentName:"admonition"},"\u26a0\ufe0f ",(0,r.yg)("strong",{parentName:"p"},"DockerHub imposes strict pull limitations for clusters")," like the DSRI (using DockerHub might result in failing to pull your images on the DSRI). "),(0,r.yg)("p",{parentName:"admonition"},"We highly recommend to ",(0,r.yg)("strong",{parentName:"p"},"use the ",(0,r.yg)("a",{parentName:"strong",href:"https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry/about-github-container-registry"},"GitHub Container Registry")," or ",(0,r.yg)("a",{parentName:"strong",href:"https://quay.io/"},"RedHat quay.io Container Registry")," to publish public Docker images"),".")),(0,r.yg)("admonition",{title:"Logged in",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"If you are login with your DockerHub user on the DSRI, it should allow you to pull DockerHub images in your project (see above).")),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Create the repository on ",(0,r.yg)("a",{parentName:"li",href:"https://hub.docker.com/"},"DockerHub")," (attached to your user or an ",(0,r.yg)("a",{parentName:"li",href:"https://hub.docker.com/orgs/umids/repositories"},"organization"),")"),(0,r.yg)("li",{parentName:"ol"},"Build and push the image:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker build -t dockerhub-username/jupyterlab:latest .\ndocker push dockerhub-username/jupyterlab:latest\n")),(0,r.yg)("p",null,"You can also change the name (aka. tag) of an existing image:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker build -t my-jupyterlab .\ndocker tag my-jupyterlab ghcr.io/github-username/jupyterlab:latest\n")),(0,r.yg)("h3",{id:"use-automated-workflows"},"Use automated workflows"),(0,r.yg)("p",null,"You can automate the building and publication of Docker images using GitHub Actions workflows \ud83d\udd04"),(0,r.yg)("admonition",{title:"Use a working workflow as example",type:"tip"},(0,r.yg)("p",{parentName:"admonition"},"\ud83d\udc40 Check the ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/get-started-with-docker/blob/main/.github/workflows/publish-docker.yml"},".github/workflows/publish-docker.yml file")," to see an example of a workflow to publish an image to the GitHub Container Registry.")),(0,r.yg)("p",null,"\ud83d\udc69\u200d\ud83d\udcbb You only need to change the ",(0,r.yg)("inlineCode",{parentName:"p"},"IMAGE_NAME"),", and use it in your GitHub repository to publish a Docker image for your application automatically! It will build from a ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile")," at the root of the repository."),(0,r.yg)("admonition",{title:"Workflow triggers",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"The workflow can be easily configured to:"),(0,r.yg)("ul",{parentName:"admonition"},(0,r.yg)("li",{parentName:"ul"},"publish a new image to the ",(0,r.yg)("inlineCode",{parentName:"li"},"latest")," tag at each push to the main branch"),(0,r.yg)("li",{parentName:"ul"},"publish an image to a new tag if a release is pushed on GitHub (using the git tag)",(0,r.yg)("ul",{parentName:"li"},(0,r.yg)("li",{parentName:"ul"},"e.g. ",(0,r.yg)("inlineCode",{parentName:"li"},"v0.0.1")," published as image ",(0,r.yg)("inlineCode",{parentName:"li"},"0.0.1")))))))}y.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4792],{5680:(e,t,a)=>{a.d(t,{xA:()=>g,yg:()=>y});var i=a(6540);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function r(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,i)}return a}function n(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(i=0;i=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var u=i.createContext({}),s=function(e){var t=i.useContext(u),a=t;return e&&(a="function"==typeof e?e(t):n(n({},t),e)),a},g=function(e){var t=s(e.components);return i.createElement(u.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return i.createElement(i.Fragment,{},t)}},c=i.forwardRef((function(e,t){var a=e.components,o=e.mdxType,r=e.originalType,u=e.parentName,g=l(e,["components","mdxType","originalType","parentName"]),c=s(a),y=o,m=c["".concat(u,".").concat(y)]||c[y]||p[y]||r;return a?i.createElement(m,n(n({ref:t},g),{},{components:a})):i.createElement(m,n({ref:t},g))}));function y(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var r=a.length,n=new Array(r);n[0]=c;var l={};for(var u in t)hasOwnProperty.call(t,u)&&(l[u]=t[u]);l.originalType=e,l.mdxType="string"==typeof e?e:o,n[1]=l;for(var s=2;s{a.r(t),a.d(t,{assets:()=>g,contentTitle:()=>u,default:()=>y,frontMatter:()=>l,metadata:()=>s,toc:()=>p});var i=a(9668),o=a(1367),r=(a(6540),a(5680)),n=["components"],l={id:"guide-publish-image",title:"Publish a Docker image"},u=void 0,s={unversionedId:"guide-publish-image",id:"guide-publish-image",title:"Publish a Docker image",description:"\u26a0\ufe0f DockerHub imposes strict pull limitations for clusters like the DSRI (using DockerHub might result in failing to pull your images on the DSRI).",source:"@site/docs/guide-publish-image.md",sourceDirName:".",slug:"/guide-publish-image",permalink:"/docs/guide-publish-image",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-publish-image.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-publish-image",title:"Publish a Docker image"},sidebar:"docs",previous:{title:"Known Issues",permalink:"/docs/guide-known-issues"},next:{title:"Install UM VPN",permalink:"/docs/guide-vpn"}},g={},p=[{value:"Login to Container Registries \ud83d\udd11",id:"login-to-container-registries-",level:2},{value:"Login to GitHub Container Registry",id:"login-to-github-container-registry",level:3},{value:"Login to quay.io",id:"login-to-quayio",level:3},{value:"Login to DockerHub",id:"login-to-dockerhub",level:3},{value:"Publish your image \ud83d\udce2",id:"publish-your-image-",level:2},{value:"Publish to GitHub Container Registry",id:"publish-to-github-container-registry",level:3},{value:"Publish to Quay.io",id:"publish-to-quayio",level:3},{value:"Publish to DockerHub",id:"publish-to-dockerhub",level:3},{value:"Use automated workflows",id:"use-automated-workflows",level:3}],c={toc:p};function y(e){var t=e.components,a=(0,o.A)(e,n);return(0,r.yg)("wrapper",(0,i.A)({},c,a,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("admonition",{title:"DockerHub pull rates limitations",type:"warning"},(0,r.yg)("p",{parentName:"admonition"},"\u26a0\ufe0f ",(0,r.yg)("strong",{parentName:"p"},"DockerHub imposes strict pull limitations for clusters")," like the DSRI (using DockerHub might result in failing to pull your images on the DSRI). "),(0,r.yg)("p",{parentName:"admonition"},"We highly recommend to ",(0,r.yg)("strong",{parentName:"p"},"use the ",(0,r.yg)("a",{parentName:"strong",href:"https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry/about-github-container-registry"},"GitHub Container Registry")," or ",(0,r.yg)("a",{parentName:"strong",href:"https://quay.io/"},"RedHat quay.io Container Registry")," to publish public Docker images"),".")),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"You can also login to DockerHub using a Secret in OpenShift to increase the pull rates limitations from 100 to 200 every 6 hours (this will mitigate the issue, but not solve it completely if you do not have a paid account on DockerHub):"),(0,r.yg)("pre",{parentName:"blockquote"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc create secret docker-registry docker-hub-secret --docker-server=docker.io --docker-username=your-dockerhub-username --docker-password=your-dockerhub-password --docker-email=your-dockerhub-email\n"))),(0,r.yg)("h2",{id:"login-to-container-registries-"},"Login to Container Registries \ud83d\udd11"),(0,r.yg)("h3",{id:"login-to-github-container-registry"},"Login to GitHub Container Registry"),(0,r.yg)("p",null,"Use your existing ",(0,r.yg)("a",{parentName:"p",href:"https://github.com"},"GitHub")," account if you have one:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Create a ",(0,r.yg)("strong",{parentName:"li"},"Personal Access Token")," for GitHub packages at ",(0,r.yg)("strong",{parentName:"li"},(0,r.yg)("a",{parentName:"strong",href:"https://github.com/settings/tokens/new"},"https://github.com/settings/tokens/new"))),(0,r.yg)("li",{parentName:"ol"},"Provide a meaningful description for the token, and enable the following scopes when creating the token:",(0,r.yg)("ul",{parentName:"li"},(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"write:packages"),": publish container images to GitHub Container Registry"),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"delete:packages"),": delete specified versions of private or public container images from GitHub Container Registry"))),(0,r.yg)("li",{parentName:"ol"},"You might want to store this token in a safe place, as you will not be able to retrieve it later on github.com (you can still delete it, and create a new token easily if you lose your token)"),(0,r.yg)("li",{parentName:"ol"},"\ud83d\udc68\u200d\ud83d\udcbb Log in to the GitHub Container Registry in your terminal (change ",(0,r.yg)("inlineCode",{parentName:"li"},"USERNAME")," and ",(0,r.yg)("inlineCode",{parentName:"li"},"ACCESS_TOKEN")," to yours):")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'echo "ACCESS_TOKEN" | docker login ghcr.io -u USERNAME --password-stdin\n')),(0,r.yg)("p",null,"On Windows use this command:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'docker login ghcr.io -u USERNAME -p "ACCESS_TOKEN"\n')),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"See the ",(0,r.yg)("a",{parentName:"p",href:"https://docs.github.com/en/free-pro-team@latest/packages/using-github-packages-with-your-projects-ecosystem/configuring-docker-for-use-with-github-packages"},"official GitHub documentation"),".")),(0,r.yg)("h3",{id:"login-to-quayio"},"Login to quay.io"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Create an account at ",(0,r.yg)("a",{parentName:"li",href:"https://quay.io"},"https://quay.io")," "),(0,r.yg)("li",{parentName:"ol"},"Login in your terminal (you will be asked for username and password)")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker login quay.io\n")),(0,r.yg)("h3",{id:"login-to-dockerhub"},"Login to DockerHub"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Get a ",(0,r.yg)("a",{parentName:"p",href:"https://hub.docker.com/"},"DockerHub")," account at ",(0,r.yg)("a",{parentName:"p",href:"https://hub.docker.com"},"https://hub.docker.com")," (you most probably already have one if you installed Docker Desktop)")),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"\ud83d\udc69\u200d\ud83d\udcbb Run in your terminal:"))),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker login\n")),(0,r.yg)("ol",{start:3},(0,r.yg)("li",{parentName:"ol"},"Provide your DockerHub username and password.")),(0,r.yg)("h2",{id:"publish-your-image-"},"Publish your image \ud83d\udce2"),(0,r.yg)("p",null,"Once you built a Docker image, and you logged in to a Container Registry, you might want to publish the image to pull and re-use it easily later."),(0,r.yg)("h3",{id:"publish-to-github-container-registry"},"Publish to GitHub Container Registry"),(0,r.yg)("admonition",{title:"Free for public images",type:"tip"},(0,r.yg)("p",{parentName:"admonition"},"The ",(0,r.yg)("a",{parentName:"p",href:"https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry"},"GitHub Container Registry")," is still in beta but will be free for public images when fully released. It enables you to store your Docker images at the same place you keep your code! \ud83d\udce6")),(0,r.yg)("p",null,"Publish to your user Container Registry on GitHub:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker build -t ghcr.io/github-username/my-image:latest .\ndocker push ghcr.io/github-username/my-image:latest\n")),(0,r.yg)("p",null,"For example, to the ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/orgs/MaastrichtU-IDS/packages"},"MaastrichtU-IDS organization Container Registry on GitHub"),":"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker build -t ghcr.io/maastrichtu-ids/jupyterlab:latest .\ndocker push ghcr.io/maastrichtu-ids/jupyterlab:latest\n")),(0,r.yg)("admonition",{title:"Created automatically",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"If the image does not exist, GitHub Container Registry will create it automatically and set it as ",(0,r.yg)("strong",{parentName:"p"},"Private")," by default. You can easily change it to ",(0,r.yg)("strong",{parentName:"p"},"Public")," in the image settings on github.com.")),(0,r.yg)("h3",{id:"publish-to-quayio"},"Publish to Quay.io"),(0,r.yg)("admonition",{title:"Free for public images",type:"tip"},(0,r.yg)("p",{parentName:"admonition"},"Quay.io is free for public images and does not restrict images pulls.")),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Create the image on ",(0,r.yg)("a",{parentName:"p",href:"https://quay.io/"},"quay.io"))),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Build and push to ",(0,r.yg)("a",{parentName:"p",href:"https://quay.io/"},"quay.io")))),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker build -t ghcr.io/quay-username/my-image:latest .\ndocker push quay.io/quay-username/my-image:latest\n")),(0,r.yg)("h3",{id:"publish-to-dockerhub"},"Publish to DockerHub"),(0,r.yg)("admonition",{title:"DockerHub pull rates limitations",type:"warning"},(0,r.yg)("p",{parentName:"admonition"},"\u26a0\ufe0f ",(0,r.yg)("strong",{parentName:"p"},"DockerHub imposes strict pull limitations for clusters")," like the DSRI (using DockerHub might result in failing to pull your images on the DSRI). "),(0,r.yg)("p",{parentName:"admonition"},"We highly recommend to ",(0,r.yg)("strong",{parentName:"p"},"use the ",(0,r.yg)("a",{parentName:"strong",href:"https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry/about-github-container-registry"},"GitHub Container Registry")," or ",(0,r.yg)("a",{parentName:"strong",href:"https://quay.io/"},"RedHat quay.io Container Registry")," to publish public Docker images"),".")),(0,r.yg)("admonition",{title:"Logged in",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"If you are login with your DockerHub user on the DSRI, it should allow you to pull DockerHub images in your project (see above).")),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Create the repository on ",(0,r.yg)("a",{parentName:"li",href:"https://hub.docker.com/"},"DockerHub")," (attached to your user or an ",(0,r.yg)("a",{parentName:"li",href:"https://hub.docker.com/orgs/umids/repositories"},"organization"),")"),(0,r.yg)("li",{parentName:"ol"},"Build and push the image:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker build -t dockerhub-username/jupyterlab:latest .\ndocker push dockerhub-username/jupyterlab:latest\n")),(0,r.yg)("p",null,"You can also change the name (aka. tag) of an existing image:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"docker build -t my-jupyterlab .\ndocker tag my-jupyterlab ghcr.io/github-username/jupyterlab:latest\n")),(0,r.yg)("h3",{id:"use-automated-workflows"},"Use automated workflows"),(0,r.yg)("p",null,"You can automate the building and publication of Docker images using GitHub Actions workflows \ud83d\udd04"),(0,r.yg)("admonition",{title:"Use a working workflow as example",type:"tip"},(0,r.yg)("p",{parentName:"admonition"},"\ud83d\udc40 Check the ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/get-started-with-docker/blob/main/.github/workflows/publish-docker.yml"},".github/workflows/publish-docker.yml file")," to see an example of a workflow to publish an image to the GitHub Container Registry.")),(0,r.yg)("p",null,"\ud83d\udc69\u200d\ud83d\udcbb You only need to change the ",(0,r.yg)("inlineCode",{parentName:"p"},"IMAGE_NAME"),", and use it in your GitHub repository to publish a Docker image for your application automatically! It will build from a ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile")," at the root of the repository."),(0,r.yg)("admonition",{title:"Workflow triggers",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"The workflow can be easily configured to:"),(0,r.yg)("ul",{parentName:"admonition"},(0,r.yg)("li",{parentName:"ul"},"publish a new image to the ",(0,r.yg)("inlineCode",{parentName:"li"},"latest")," tag at each push to the main branch"),(0,r.yg)("li",{parentName:"ul"},"publish an image to a new tag if a release is pushed on GitHub (using the git tag)",(0,r.yg)("ul",{parentName:"li"},(0,r.yg)("li",{parentName:"ul"},"e.g. ",(0,r.yg)("inlineCode",{parentName:"li"},"v0.0.1")," published as image ",(0,r.yg)("inlineCode",{parentName:"li"},"0.0.1")))))))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/3f4d8b80.1b019b6c.js b/assets/js/3f4d8b80.e31fb42b.js similarity index 99% rename from assets/js/3f4d8b80.1b019b6c.js rename to assets/js/3f4d8b80.e31fb42b.js index f068376ec..0e5a5bf12 100644 --- a/assets/js/3f4d8b80.1b019b6c.js +++ b/assets/js/3f4d8b80.e31fb42b.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4190],{5680:(e,t,n)=>{n.d(t,{xA:()=>p,yg:()=>d});var a=n(6540);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function r(e){for(var t=1;t=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var u=a.createContext({}),s=function(e){var t=a.useContext(u),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},p=function(e){var t=s(e.components);return a.createElement(u.Provider,{value:t},e.children)},h={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},y=a.forwardRef((function(e,t){var n=e.components,i=e.mdxType,o=e.originalType,u=e.parentName,p=l(e,["components","mdxType","originalType","parentName"]),y=s(n),d=i,g=y["".concat(u,".").concat(d)]||y[d]||h[d]||o;return n?a.createElement(g,r(r({ref:t},p),{},{components:n})):a.createElement(g,r({ref:t},p))}));function d(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var o=n.length,r=new Array(o);r[0]=y;var l={};for(var u in t)hasOwnProperty.call(t,u)&&(l[u]=t[u]);l.originalType=e,l.mdxType="string"==typeof e?e:i,r[1]=l;for(var s=2;s{n.r(t),n.d(t,{assets:()=>p,contentTitle:()=>u,default:()=>d,frontMatter:()=>l,metadata:()=>s,toc:()=>h});var a=n(9668),i=n(1367),o=(n(6540),n(5680)),r=["components"],l={id:"deploy-jupyterhub",title:"JupyterHub"},u=void 0,s={unversionedId:"deploy-jupyterhub",id:"deploy-jupyterhub",title:"JupyterHub",description:"JupyterHub is ideal to enable multiple users easily start predefined workspaces in the same project.",source:"@site/docs/deploy-jupyterhub.md",sourceDirName:".",slug:"/deploy-jupyterhub",permalink:"/docs/deploy-jupyterhub",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-jupyterhub.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-jupyterhub",title:"JupyterHub"},sidebar:"docs",previous:{title:"Matlab",permalink:"/docs/deploy-matlab"},next:{title:"Deploy Dask Cluster",permalink:"/docs/dask-cluster"}},p={},h=[{value:"Downloading and adjusting the config.yaml",id:"downloading-and-adjusting-the-configyaml",level:2},{value:"Setting user's default persistent volume size",id:"setting-users-default-persistent-volume-size",level:3},{value:"Persistent volumes",id:"persistent-volumes",level:4},{value:"Configuring an authentication method",id:"configuring-an-authentication-method",level:3},{value:"Dummy authentication",id:"dummy-authentication",level:4},{value:"allow_users / admin_users authentication",id:"allow_users--admin_users-authentication",level:4},{value:"GitHub OAuth authentication",id:"github-oauth-authentication",level:4},{value:"Deploying JupyterHub using the DSRI website \ud83e\ude90",id:"deploying-jupyterhub-using-the-dsri-website-",level:2},{value:"Installing the JupyterHub Helm Chart repository",id:"installing-the-jupyterhub-helm-chart-repository",level:3},{value:"Installing the JupyterHub Helm Chart",id:"installing-the-jupyterhub-helm-chart",level:3},{value:"Creating a secured route",id:"creating-a-secured-route",level:3},{value:"Upgrading the config.yaml",id:"upgrading-the-configyaml",level:3},{value:"Deploying JupyterHub using the Command Line Interface (CLI) \ud83e\ude90",id:"deploying-jupyterhub-using-the-command-line-interface-cli-",level:2},{value:"Installing the JupyterHub Helm Chart repository",id:"installing-the-jupyterhub-helm-chart-repository-1",level:3},{value:"Installing the JupyterHub Helm Chart",id:"installing-the-jupyterhub-helm-chart-1",level:3},{value:"Creating a secured route",id:"creating-a-secured-route-1",level:3},{value:"Upgrading the config.yaml",id:"upgrading-the-configyaml-1",level:3}],y={toc:h};function d(e){var t=e.components,n=(0,i.A)(e,r);return(0,o.yg)("wrapper",(0,a.A)({},y,n,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"JupyterHub is ideal to enable multiple users easily start predefined workspaces in the same project. "),(0,o.yg)("admonition",{title:"Experimental \ud83e\uddea ",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"Deploying JupyterHub is still experimental, and it can be a bit tricky to configure. Feel free to ",(0,o.yg)("a",{parentName:"p",href:"https://servicedesk.icts.maastrichtuniversity.nl/tas/public/ssp/content/serviceflow?unid=1ffa93e9ecd94d938ad46e3cb24c2392"},"submit a ticket")," to ask for help.")),(0,o.yg)("h2",{id:"downloading-and-adjusting-the-configyaml"},"Downloading and adjusting the config.yaml"),(0,o.yg)("admonition",{title:"Before you begin download the config.yaml",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"Download the preconfigured ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," from our ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/refs/heads/master/applications/jupyterhub/config.yaml"},"GitHub repository"),".\nThe default config that is provided by JupyterHub will not work. ")),(0,o.yg)("h1",{id:""}),(0,o.yg)("h3",{id:"setting-users-default-persistent-volume-size"},"Setting user's default persistent volume size"),(0,o.yg)("h4",{id:"persistent-volumes"},"Persistent volumes"),(0,o.yg)("p",null,"Persistent volumes are automatically created for each user and instance started in JupyterHub to ensure persistence of the data even JupyterHub is stopped. You can find the persistent volumes in the DSRI web UI, go to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,o.yg)("strong",{parentName:"p"},"Storage")," > ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),"."),(0,o.yg)("p",null,"It is possible to change the default size of a persistent volume claim for a user in the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml"),". In our ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," the default value is ",(0,o.yg)("inlineCode",{parentName:"p"},"2Gi"),". However if you think that your users will need more storage space you can change this default size in the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml"),". "),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"singleuser:\n # ...\n storage:\n capacity: 2Gi\n")),(0,o.yg)("h3",{id:"configuring-an-authentication-method"},"Configuring an authentication method"),(0,o.yg)("p",null,"At the moment we support three different authentication methods. One for testing purposes (dummy authenthication), one for people who are working alone in a JupyterHub instance or with one or two collaborators (allowed_users / admin_users authenthication), and one for allowing groups of people to collaborate in the same JupyterHub instance (GitHub OAuth). By default the dummy authentication is set in the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml"),". ",(0,o.yg)("strong",{parentName:"p"},"Note that this is only for testing purposes!!!")," However, with very few changes to the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," you can set up the other authentication methods. For reference see ",(0,o.yg)("a",{parentName:"p",href:"https://z2jh.jupyter.org/en/stable/administrator/authentication.html"},"the zero2jupyterhub documentation about authentication methods")),(0,o.yg)("h4",{id:"dummy-authentication"},"Dummy authentication"),(0,o.yg)("p",null,"This authentication method is set by default and is only there so that you can easily test your JupyterHub instance without the need of setting up proper authentication. The catch with this method is that whatever username/password combination you fill in, you will get access! In other words this is ",(0,o.yg)("strong",{parentName:"p"},"completely not safe to use in usecases other than testing!"),"\nIn the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," you see -besides the commented out other authentication methods- the following block of text:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"hub:\n # ...\n config:\n JupyterHub:\n admin_access: true\n authenticator_class: dummy\n")),(0,o.yg)("p",null,"Some parts are intentionally left out here, shown as dots ",(0,o.yg)("inlineCode",{parentName:"p"},"# ...")," for better representation. If you are first setting up your JupyterHub instance you can leave this as is. Upon going to your instance via the URL you will get prompted with a login screen:"),(0,o.yg)("img",{src:"/img/jupyterhub-dummy-login.png",alt:"",style:{maxWidth:"75%",maxHeight:"75%"}}),(0,o.yg)("h1",{id:"-1"}),(0,o.yg)("p",null,"Fill in any usernamer and password combination you would like and the useraccount will be made. Note that this useraccount really is made and has its own userpod in the deployment. It has a persistent volume as well and all other properties like any other useraccount that will be made. However you can use whatever password you will fill in to access this account. In other words do not use this user actively and definitely do not store any (sensitive) data in this useraccount!"),(0,o.yg)("h4",{id:"allow_users--admin_users-authentication"},"allow_users / admin_users authentication"),(0,o.yg)("p",null,"If you will be working on your own in your JupyterHub instance it will be easiest to use the allow_users / admin_users authentication method. This method will let you specify an user and admin account with a shared password. ",(0,o.yg)("strong",{parentName:"p"},"It is important that you keep this password a secret and safe! If people will get their hands on this they can acces your JupyterHub instance and login as an admin, which can lead to hefty consequences.")," "),(0,o.yg)("p",null,"If you want to make use of this config uncomment the following block of text and comment out the previous block of text seen at the ",(0,o.yg)("inlineCode",{parentName:"p"},"Dummy authentication")," section above:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"hub:\n # ...\n config:\n Authenticator:\n admin_users:\n - admin\n allowed_users:\n - user1\n DummyAuthenticator:\n password: a-shared-secret-password\n JupyterHub:\n authenticator_class: dummy\n")),(0,o.yg)("p",null,"Note that this password is in plaintext in your ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml"),". ",(0,o.yg)("strong",{parentName:"p"},"Do not use password you use for other accounts, this is never a good idea and is surely not a good idea in this case!")," Unfortunately it is not possible to set passwords in JupyterHub using secrets in the DSRI at the moment. If you need to share your JupyterHub instance with others we recommend you to use the GitHub OAuth authentication method described below. "),(0,o.yg)("h4",{id:"github-oauth-authentication"},"GitHub OAuth authentication"),(0,o.yg)("p",null,"This authentication method is the most secure option we provide at the moment. The major caveat is that you and the people you want to collaborate with need a GitHub account. Moreover, you will need to create an organization and team within that organization, or have access to an organization and team. You grant the people authorization to log in into the JupyterHub instance with their GitHub account by adding them to a team in an organization in GitHub."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"hub:\n # ...\n config:\n GitHubOAuthenticator:\n client_id: your-client-id\n client_secret: your-client-secret\n oauth_callback_url: https://-.apps.dsri2.unimaas.nl/hub/oauth_callback\n JupyterHub:\n authenticator_class: github\n")),(0,o.yg)("p",null,"For creating an OAuth app in GitHub please refer to GitHub's ",(0,o.yg)("a",{parentName:"p",href:"https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/creating-an-oauth-app"},"documentation."),". The GitHub OAuth app will provide your client ID and client secret. The ",(0,o.yg)("inlineCode",{parentName:"p"},"")," and ",(0,o.yg)("inlineCode",{parentName:"p"},"")," you provided yourself in the previous steps, fill those in accordingly.\nTo set up an organization and team, please refer to GitHub's ",(0,o.yg)("a",{parentName:"p",href:"https://docs.github.com/en/organizations"},"documentation.")," as well. "),(0,o.yg)("h1",{id:"-2"}),(0,o.yg)("h2",{id:"deploying-jupyterhub-using-the-dsri-website-"},"Deploying JupyterHub using the DSRI website \ud83e\ude90"),(0,o.yg)("admonition",{title:"Before you begin download the config.yaml",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"Download the preconfigured ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," from our ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/refs/heads/master/applications/jupyterhub/config.yaml"},"GitHub repository"),".\nThe default config that is provided by JupyterHub will not work. ")),(0,o.yg)("h3",{id:"installing-the-jupyterhub-helm-chart-repository"},"Installing the JupyterHub Helm Chart repository"),(0,o.yg)("p",null,"After you have created a project you can start with installing the JupyterHub Helm Chart. If you do not have access to DSRI or created a project yet, and you need to find out how, please refer to our ",(0,o.yg)("a",{parentName:"p",href:"https://dsri.maastrichtuniversity.nl/docs/"},"documentation.")),(0,o.yg)("admonition",{title:"Helm Chart already available",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"The Helm Chart should be already made available for everyone to use on the DSRI platform. There will be no need to install the repository yourself.")),(0,o.yg)("h1",{id:"-3"}),(0,o.yg)("p",null,"In ",(0,o.yg)("inlineCode",{parentName:"p"},"Developer")," mode in your project, go to ",(0,o.yg)("inlineCode",{parentName:"p"},"Helm")," in the sidepanel (1). Next, click on ",(0,o.yg)("inlineCode",{parentName:"p"},"Create")," and choose ",(0,o.yg)("inlineCode",{parentName:"p"},"Repository")," (2)."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-repo-add-one.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-4"}),(0,o.yg)("p",null,"Then fill in the ",(0,o.yg)("inlineCode",{parentName:"p"},"Name"),", ",(0,o.yg)("inlineCode",{parentName:"p"},"Display Name"),", give it a ",(0,o.yg)("inlineCode",{parentName:"p"},"Description")," and fill in the ",(0,o.yg)("inlineCode",{parentName:"p"},"URL"),": ",(0,o.yg)("a",{parentName:"p",href:"https://hub.jupyter.org/helm-chart/"},"https://hub.jupyter.org/helm-chart/"),". "),(0,o.yg)("h1",{id:"-5"}),(0,o.yg)("p",null,"Next, click ",(0,o.yg)("inlineCode",{parentName:"p"},"Create"),". "),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-repo-add-two.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-6"}),(0,o.yg)("h3",{id:"installing-the-jupyterhub-helm-chart"},"Installing the JupyterHub Helm Chart"),(0,o.yg)("admonition",{type:"info"},(0,o.yg)("p",{parentName:"admonition"},"At the moment the latest -and only- Helm Chart version which is supported by DSRI is version 3.3.8. Newer versions will not work, and older versions are not tested and/or configured!")),(0,o.yg)("h1",{id:"-7"}),(0,o.yg)("p",null,"In ",(0,o.yg)("inlineCode",{parentName:"p"},"Developer")," mode in your project, go to ",(0,o.yg)("inlineCode",{parentName:"p"},"Helm")," in the sidepanel (1). Next, click on ",(0,o.yg)("inlineCode",{parentName:"p"},"Create")," and choose ",(0,o.yg)("inlineCode",{parentName:"p"},"Helm Release")," (2)"),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-install-one.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-8"}),(0,o.yg)("p",null,"Search for ",(0,o.yg)("inlineCode",{parentName:"p"},"jupyterhub")," (or the name you gave the repository if you added the repository yourself), and choose the ",(0,o.yg)("inlineCode",{parentName:"p"},"JupyterHub")," Helm Chart (1)."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-install-two.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-9"}),(0,o.yg)("p",null,"Click ",(0,o.yg)("inlineCode",{parentName:"p"},"Create"),"."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-install-three.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-10"}),(0,o.yg)("p",null,"Click the ",(0,o.yg)("inlineCode",{parentName:"p"},"Chart version")," drop down menu (1)."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-install-four.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-11"}),(0,o.yg)("p",null,"And choose the right Chart version: ",(0,o.yg)("inlineCode",{parentName:"p"},"3.3.8")," (1). Note that this is an important step, as we only support version 3.3.8 at the moment. Newer versions do not work yet and older versions we did not configure and/or test! "),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-install-five.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-12"}),(0,o.yg)("p",null,"Now, change the config with the content of the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," you have downloaded from our ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/refs/heads/master/applications/jupyterhub/config.yaml"},"GitHub repository"),".\nCopy the content of the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," and paste it in the highlighted box to replace the old with the new config. Click ",(0,o.yg)("inlineCode",{parentName:"p"},"Create")," to install the JupyterHub Helm Chart."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-install-six.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-13"}),(0,o.yg)("h3",{id:"creating-a-secured-route"},"Creating a secured route"),(0,o.yg)("p",null,"Create a secured route, with TLS edge termination."),(0,o.yg)("p",null,"In ",(0,o.yg)("inlineCode",{parentName:"p"},"Developer")," mode in your project, go to ",(0,o.yg)("inlineCode",{parentName:"p"},"Project")," in the sidepanel (1). Next, click on ",(0,o.yg)("inlineCode",{parentName:"p"},"Route")," (2)."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-route-one.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-14"}),(0,o.yg)("p",null,"Next, click ",(0,o.yg)("inlineCode",{parentName:"p"},"Create"),"."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-route-two.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-15"}),(0,o.yg)("p",null,"Fill in the ",(0,o.yg)("inlineCode",{parentName:"p"},"Name")," (1), choose the ",(0,o.yg)("inlineCode",{parentName:"p"},"Service"),": ",(0,o.yg)("inlineCode",{parentName:"p"},"proxy-public")," (2), choose the ",(0,o.yg)("inlineCode",{parentName:"p"},"Target Port"),": ",(0,o.yg)("inlineCode",{parentName:"p"},"80 -> http (TCP)")," (3), tick the box ",(0,o.yg)("inlineCode",{parentName:"p"},"Secure Route")," (4), and finally choose ",(0,o.yg)("inlineCode",{parentName:"p"},"TLS Termination"),": ",(0,o.yg)("inlineCode",{parentName:"p"},"Edge")," (5). Next, click ",(0,o.yg)("inlineCode",{parentName:"p"},"Create"),", to create the route."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-route-three.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h3",{id:"upgrading-the-configyaml"},"Upgrading the config.yaml"),(0,o.yg)("p",null,"You can upgrade your ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," easily in the DSRI web UI if you would like to change certain settings, such as user's default persistent volume claims, authentication methods, and many more things. Note that in some cases users who created an account with an old authentication method will still have access via that method, make sure you set up your preferred authentication method before allowing users to authenticate and use the JupyterHub instance."),(0,o.yg)("h1",{id:"-16"}),(0,o.yg)("p",null,"In ",(0,o.yg)("inlineCode",{parentName:"p"},"Developer")," mode in your project, go to ",(0,o.yg)("inlineCode",{parentName:"p"},"Helm")," in the sidepanel (1). Next, click on your Helm Chart Release (2)."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-upgrade-chart-one.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-17"}),(0,o.yg)("p",null,"Now, click the ",(0,o.yg)("inlineCode",{parentName:"p"},"Actions")," drop down menu, and choose ",(0,o.yg)("inlineCode",{parentName:"p"},"Upgrade")," (1)."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-upgrade-chart-two.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-18"}),(0,o.yg)("p",null,"In the box -highlighted in the picutre below- you can make changes to the config.yaml. After you have made your changes, click ",(0,o.yg)("inlineCode",{parentName:"p"},"Upgrade")," and your upgraded JupyterHub Helm Chart Release will automatically be deployed."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-upgrade-chart-three.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-19"}),(0,o.yg)("admonition",{title:"Configure JupyterHub",type:"success"},(0,o.yg)("p",{parentName:"admonition"},"Feel free to ",(0,o.yg)("a",{parentName:"p",href:"https://servicedesk.icts.maastrichtuniversity.nl/tas/public/ssp/content/serviceflow?unid=1ffa93e9ecd94d938ad46e3cb24c2392"},"submit a ticket")," to ask for help configuring your JupyterHub.")),(0,o.yg)("h1",{id:"-20"}),(0,o.yg)("h2",{id:"deploying-jupyterhub-using-the-command-line-interface-cli-"},"Deploying JupyterHub using the Command Line Interface (CLI) \ud83e\ude90"),(0,o.yg)("admonition",{title:"Before you begin download the config.yaml",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"Download the preconfigured ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," from our ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/refs/heads/master/applications/jupyterhub/config.yaml"},"GitHub repository"),".\nThe default config that is provided by JupyterHub will not work. ")),(0,o.yg)("h3",{id:"installing-the-jupyterhub-helm-chart-repository-1"},"Installing the JupyterHub Helm Chart repository"),(0,o.yg)("p",null,"After you have created a project you can start with installing the JupyterHub Helm Chart. If you do not have access to DSRI or created a project yet, and you need to find out how, please refer to our ",(0,o.yg)("a",{parentName:"p",href:"https://dsri.maastrichtuniversity.nl/docs/"},"documentation.")),(0,o.yg)("admonition",{title:"Helm Chart already available",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"The Helm Chart should be already made available for everyone to use on the DSRI platform. There will be no need to install the repository yourself.")),(0,o.yg)("h1",{id:"-21"}),(0,o.yg)("p",null,"Add the JupyterHub Helm Chart repository:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add jupyterhub https://hub.jupyter.org/helm-chart/\nhelm repo update\n")),(0,o.yg)("h3",{id:"installing-the-jupyterhub-helm-chart-1"},"Installing the JupyterHub Helm Chart"),(0,o.yg)("admonition",{type:"info"},(0,o.yg)("p",{parentName:"admonition"},"At the moment the latest -and only- Helm Chart version which is supported by DSRI is version 3.3.8. Newer versions will not work, and older versions are not tested and/or configured!")),(0,o.yg)("h1",{id:"-22"}),(0,o.yg)("p",null,"Make sure you use the right ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," downloaded from our ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/refs/heads/master/applications/jupyterhub/config.yaml"},"GitHub repository"),"."),(0,o.yg)("p",null,"Install the Helm Chart using the following command:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"helm upgrade --cleanup-on-fail \\\n --install jupyterhub jupyterhub/jupyterhub \\\n --version=3.3.8 \\\n --namespace= \\\n --values config.yaml\n")),(0,o.yg)("p",null,(0,o.yg)("inlineCode",{parentName:"p"},"")," is the name of the namespace your project is in. "),(0,o.yg)("h3",{id:"creating-a-secured-route-1"},"Creating a secured route"),(0,o.yg)("p",null,"Create a secured route, with TLS edge termination:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc create route edge --namespace --service=proxy-public --port=http\n")),(0,o.yg)("p",null,(0,o.yg)("inlineCode",{parentName:"p"},"")," is the name of the namespace your project is in.\n",(0,o.yg)("inlineCode",{parentName:"p"},"")," is the name of the route. "),(0,o.yg)("h3",{id:"upgrading-the-configyaml-1"},"Upgrading the config.yaml"),(0,o.yg)("p",null,"Run the following command with your new config.yaml:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"helm upgrade --cleanup-on-fail \\\n --install jupyterhub jupyterhub/jupyterhub \\\n --version=3.3.8 \\\n --namespace= \\\n --values config.yaml\n")),(0,o.yg)("p",null,(0,o.yg)("inlineCode",{parentName:"p"},"")," is the name of the namespace your project is in. "),(0,o.yg)("p",null,(0,o.yg)("strong",{parentName:"p"},"Note")," that the namespace should be the same namespace as the one where your original deployment was initiated!"),(0,o.yg)("h1",{id:"-23"}),(0,o.yg)("admonition",{title:"Configure JupyterHub",type:"success"},(0,o.yg)("p",{parentName:"admonition"},"Feel free to ",(0,o.yg)("a",{parentName:"p",href:"https://servicedesk.icts.maastrichtuniversity.nl/tas/public/ssp/content/serviceflow?unid=1ffa93e9ecd94d938ad46e3cb24c2392"},"submit a ticket")," to ask for help configuring your JupyterHub.")))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4190],{5680:(e,t,n)=>{n.d(t,{xA:()=>p,yg:()=>d});var a=n(6540);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function r(e){for(var t=1;t=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var u=a.createContext({}),s=function(e){var t=a.useContext(u),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},p=function(e){var t=s(e.components);return a.createElement(u.Provider,{value:t},e.children)},h={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},y=a.forwardRef((function(e,t){var n=e.components,i=e.mdxType,o=e.originalType,u=e.parentName,p=l(e,["components","mdxType","originalType","parentName"]),y=s(n),d=i,g=y["".concat(u,".").concat(d)]||y[d]||h[d]||o;return n?a.createElement(g,r(r({ref:t},p),{},{components:n})):a.createElement(g,r({ref:t},p))}));function d(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var o=n.length,r=new Array(o);r[0]=y;var l={};for(var u in t)hasOwnProperty.call(t,u)&&(l[u]=t[u]);l.originalType=e,l.mdxType="string"==typeof e?e:i,r[1]=l;for(var s=2;s{n.r(t),n.d(t,{assets:()=>p,contentTitle:()=>u,default:()=>d,frontMatter:()=>l,metadata:()=>s,toc:()=>h});var a=n(9668),i=n(1367),o=(n(6540),n(5680)),r=["components"],l={id:"deploy-jupyterhub",title:"JupyterHub"},u=void 0,s={unversionedId:"deploy-jupyterhub",id:"deploy-jupyterhub",title:"JupyterHub",description:"JupyterHub is ideal to enable multiple users easily start predefined workspaces in the same project.",source:"@site/docs/deploy-jupyterhub.md",sourceDirName:".",slug:"/deploy-jupyterhub",permalink:"/docs/deploy-jupyterhub",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-jupyterhub.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-jupyterhub",title:"JupyterHub"},sidebar:"docs",previous:{title:"Matlab",permalink:"/docs/deploy-matlab"},next:{title:"Deploy Dask Cluster",permalink:"/docs/dask-cluster"}},p={},h=[{value:"Downloading and adjusting the config.yaml",id:"downloading-and-adjusting-the-configyaml",level:2},{value:"Setting user's default persistent volume size",id:"setting-users-default-persistent-volume-size",level:3},{value:"Persistent volumes",id:"persistent-volumes",level:4},{value:"Configuring an authentication method",id:"configuring-an-authentication-method",level:3},{value:"Dummy authentication",id:"dummy-authentication",level:4},{value:"allow_users / admin_users authentication",id:"allow_users--admin_users-authentication",level:4},{value:"GitHub OAuth authentication",id:"github-oauth-authentication",level:4},{value:"Deploying JupyterHub using the DSRI website \ud83e\ude90",id:"deploying-jupyterhub-using-the-dsri-website-",level:2},{value:"Installing the JupyterHub Helm Chart repository",id:"installing-the-jupyterhub-helm-chart-repository",level:3},{value:"Installing the JupyterHub Helm Chart",id:"installing-the-jupyterhub-helm-chart",level:3},{value:"Creating a secured route",id:"creating-a-secured-route",level:3},{value:"Upgrading the config.yaml",id:"upgrading-the-configyaml",level:3},{value:"Deploying JupyterHub using the Command Line Interface (CLI) \ud83e\ude90",id:"deploying-jupyterhub-using-the-command-line-interface-cli-",level:2},{value:"Installing the JupyterHub Helm Chart repository",id:"installing-the-jupyterhub-helm-chart-repository-1",level:3},{value:"Installing the JupyterHub Helm Chart",id:"installing-the-jupyterhub-helm-chart-1",level:3},{value:"Creating a secured route",id:"creating-a-secured-route-1",level:3},{value:"Upgrading the config.yaml",id:"upgrading-the-configyaml-1",level:3}],y={toc:h};function d(e){var t=e.components,n=(0,i.A)(e,r);return(0,o.yg)("wrapper",(0,a.A)({},y,n,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"JupyterHub is ideal to enable multiple users easily start predefined workspaces in the same project. "),(0,o.yg)("admonition",{title:"Experimental \ud83e\uddea ",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"Deploying JupyterHub is still experimental, and it can be a bit tricky to configure. Feel free to ",(0,o.yg)("a",{parentName:"p",href:"https://servicedesk.icts.maastrichtuniversity.nl/tas/public/ssp/content/serviceflow?unid=1ffa93e9ecd94d938ad46e3cb24c2392"},"submit a ticket")," to ask for help.")),(0,o.yg)("h2",{id:"downloading-and-adjusting-the-configyaml"},"Downloading and adjusting the config.yaml"),(0,o.yg)("admonition",{title:"Before you begin download the config.yaml",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"Download the preconfigured ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," from our ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/refs/heads/master/applications/jupyterhub/config.yaml"},"GitHub repository"),".\nThe default config that is provided by JupyterHub will not work. ")),(0,o.yg)("h1",{id:""}),(0,o.yg)("h3",{id:"setting-users-default-persistent-volume-size"},"Setting user's default persistent volume size"),(0,o.yg)("h4",{id:"persistent-volumes"},"Persistent volumes"),(0,o.yg)("p",null,"Persistent volumes are automatically created for each user and instance started in JupyterHub to ensure persistence of the data even JupyterHub is stopped. You can find the persistent volumes in the DSRI web UI, go to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,o.yg)("strong",{parentName:"p"},"Storage")," > ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),"."),(0,o.yg)("p",null,"It is possible to change the default size of a persistent volume claim for a user in the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml"),". In our ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," the default value is ",(0,o.yg)("inlineCode",{parentName:"p"},"2Gi"),". However if you think that your users will need more storage space you can change this default size in the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml"),". "),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"singleuser:\n # ...\n storage:\n capacity: 2Gi\n")),(0,o.yg)("h3",{id:"configuring-an-authentication-method"},"Configuring an authentication method"),(0,o.yg)("p",null,"At the moment we support three different authentication methods. One for testing purposes (dummy authenthication), one for people who are working alone in a JupyterHub instance or with one or two collaborators (allowed_users / admin_users authenthication), and one for allowing groups of people to collaborate in the same JupyterHub instance (GitHub OAuth). By default the dummy authentication is set in the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml"),". ",(0,o.yg)("strong",{parentName:"p"},"Note that this is only for testing purposes!!!")," However, with very few changes to the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," you can set up the other authentication methods. For reference see ",(0,o.yg)("a",{parentName:"p",href:"https://z2jh.jupyter.org/en/stable/administrator/authentication.html"},"the zero2jupyterhub documentation about authentication methods")),(0,o.yg)("h4",{id:"dummy-authentication"},"Dummy authentication"),(0,o.yg)("p",null,"This authentication method is set by default and is only there so that you can easily test your JupyterHub instance without the need of setting up proper authentication. The catch with this method is that whatever username/password combination you fill in, you will get access! In other words this is ",(0,o.yg)("strong",{parentName:"p"},"completely not safe to use in usecases other than testing!"),"\nIn the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," you see -besides the commented out other authentication methods- the following block of text:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"hub:\n # ...\n config:\n JupyterHub:\n admin_access: true\n authenticator_class: dummy\n")),(0,o.yg)("p",null,"Some parts are intentionally left out here, shown as dots ",(0,o.yg)("inlineCode",{parentName:"p"},"# ...")," for better representation. If you are first setting up your JupyterHub instance you can leave this as is. Upon going to your instance via the URL you will get prompted with a login screen:"),(0,o.yg)("img",{src:"/img/jupyterhub-dummy-login.png",alt:"",style:{maxWidth:"75%",maxHeight:"75%"}}),(0,o.yg)("h1",{id:"-1"}),(0,o.yg)("p",null,"Fill in any usernamer and password combination you would like and the useraccount will be made. Note that this useraccount really is made and has its own userpod in the deployment. It has a persistent volume as well and all other properties like any other useraccount that will be made. However you can use whatever password you will fill in to access this account. In other words do not use this user actively and definitely do not store any (sensitive) data in this useraccount!"),(0,o.yg)("h4",{id:"allow_users--admin_users-authentication"},"allow_users / admin_users authentication"),(0,o.yg)("p",null,"If you will be working on your own in your JupyterHub instance it will be easiest to use the allow_users / admin_users authentication method. This method will let you specify an user and admin account with a shared password. ",(0,o.yg)("strong",{parentName:"p"},"It is important that you keep this password a secret and safe! If people will get their hands on this they can acces your JupyterHub instance and login as an admin, which can lead to hefty consequences.")," "),(0,o.yg)("p",null,"If you want to make use of this config uncomment the following block of text and comment out the previous block of text seen at the ",(0,o.yg)("inlineCode",{parentName:"p"},"Dummy authentication")," section above:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"hub:\n # ...\n config:\n Authenticator:\n admin_users:\n - admin\n allowed_users:\n - user1\n DummyAuthenticator:\n password: a-shared-secret-password\n JupyterHub:\n authenticator_class: dummy\n")),(0,o.yg)("p",null,"Note that this password is in plaintext in your ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml"),". ",(0,o.yg)("strong",{parentName:"p"},"Do not use password you use for other accounts, this is never a good idea and is surely not a good idea in this case!")," Unfortunately it is not possible to set passwords in JupyterHub using secrets in the DSRI at the moment. If you need to share your JupyterHub instance with others we recommend you to use the GitHub OAuth authentication method described below. "),(0,o.yg)("h4",{id:"github-oauth-authentication"},"GitHub OAuth authentication"),(0,o.yg)("p",null,"This authentication method is the most secure option we provide at the moment. The major caveat is that you and the people you want to collaborate with need a GitHub account. Moreover, you will need to create an organization and team within that organization, or have access to an organization and team. You grant the people authorization to log in into the JupyterHub instance with their GitHub account by adding them to a team in an organization in GitHub."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"hub:\n # ...\n config:\n GitHubOAuthenticator:\n client_id: your-client-id\n client_secret: your-client-secret\n oauth_callback_url: https://-.apps.dsri2.unimaas.nl/hub/oauth_callback\n JupyterHub:\n authenticator_class: github\n")),(0,o.yg)("p",null,"For creating an OAuth app in GitHub please refer to GitHub's ",(0,o.yg)("a",{parentName:"p",href:"https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/creating-an-oauth-app"},"documentation."),". The GitHub OAuth app will provide your client ID and client secret. The ",(0,o.yg)("inlineCode",{parentName:"p"},"")," and ",(0,o.yg)("inlineCode",{parentName:"p"},"")," you provided yourself in the previous steps, fill those in accordingly.\nTo set up an organization and team, please refer to GitHub's ",(0,o.yg)("a",{parentName:"p",href:"https://docs.github.com/en/organizations"},"documentation.")," as well. "),(0,o.yg)("h1",{id:"-2"}),(0,o.yg)("h2",{id:"deploying-jupyterhub-using-the-dsri-website-"},"Deploying JupyterHub using the DSRI website \ud83e\ude90"),(0,o.yg)("admonition",{title:"Before you begin download the config.yaml",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"Download the preconfigured ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," from our ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/refs/heads/master/applications/jupyterhub/config.yaml"},"GitHub repository"),".\nThe default config that is provided by JupyterHub will not work. ")),(0,o.yg)("h3",{id:"installing-the-jupyterhub-helm-chart-repository"},"Installing the JupyterHub Helm Chart repository"),(0,o.yg)("p",null,"After you have created a project you can start with installing the JupyterHub Helm Chart. If you do not have access to DSRI or created a project yet, and you need to find out how, please refer to our ",(0,o.yg)("a",{parentName:"p",href:"https://dsri.maastrichtuniversity.nl/docs/"},"documentation.")),(0,o.yg)("admonition",{title:"Helm Chart already available",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"The Helm Chart should be already made available for everyone to use on the DSRI platform. There will be no need to install the repository yourself.")),(0,o.yg)("h1",{id:"-3"}),(0,o.yg)("p",null,"In ",(0,o.yg)("inlineCode",{parentName:"p"},"Developer")," mode in your project, go to ",(0,o.yg)("inlineCode",{parentName:"p"},"Helm")," in the sidepanel (1). Next, click on ",(0,o.yg)("inlineCode",{parentName:"p"},"Create")," and choose ",(0,o.yg)("inlineCode",{parentName:"p"},"Repository")," (2)."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-repo-add-one.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-4"}),(0,o.yg)("p",null,"Then fill in the ",(0,o.yg)("inlineCode",{parentName:"p"},"Name"),", ",(0,o.yg)("inlineCode",{parentName:"p"},"Display Name"),", give it a ",(0,o.yg)("inlineCode",{parentName:"p"},"Description")," and fill in the ",(0,o.yg)("inlineCode",{parentName:"p"},"URL"),": ",(0,o.yg)("a",{parentName:"p",href:"https://hub.jupyter.org/helm-chart/"},"https://hub.jupyter.org/helm-chart/"),". "),(0,o.yg)("h1",{id:"-5"}),(0,o.yg)("p",null,"Next, click ",(0,o.yg)("inlineCode",{parentName:"p"},"Create"),". "),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-repo-add-two.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-6"}),(0,o.yg)("h3",{id:"installing-the-jupyterhub-helm-chart"},"Installing the JupyterHub Helm Chart"),(0,o.yg)("admonition",{type:"info"},(0,o.yg)("p",{parentName:"admonition"},"At the moment the latest -and only- Helm Chart version which is supported by DSRI is version 3.3.8. Newer versions will not work, and older versions are not tested and/or configured!")),(0,o.yg)("h1",{id:"-7"}),(0,o.yg)("p",null,"In ",(0,o.yg)("inlineCode",{parentName:"p"},"Developer")," mode in your project, go to ",(0,o.yg)("inlineCode",{parentName:"p"},"Helm")," in the sidepanel (1). Next, click on ",(0,o.yg)("inlineCode",{parentName:"p"},"Create")," and choose ",(0,o.yg)("inlineCode",{parentName:"p"},"Helm Release")," (2)"),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-install-one.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-8"}),(0,o.yg)("p",null,"Search for ",(0,o.yg)("inlineCode",{parentName:"p"},"jupyterhub")," (or the name you gave the repository if you added the repository yourself), and choose the ",(0,o.yg)("inlineCode",{parentName:"p"},"JupyterHub")," Helm Chart (1)."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-install-two.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-9"}),(0,o.yg)("p",null,"Click ",(0,o.yg)("inlineCode",{parentName:"p"},"Create"),"."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-install-three.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-10"}),(0,o.yg)("p",null,"Click the ",(0,o.yg)("inlineCode",{parentName:"p"},"Chart version")," drop down menu (1)."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-install-four.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-11"}),(0,o.yg)("p",null,"And choose the right Chart version: ",(0,o.yg)("inlineCode",{parentName:"p"},"3.3.8")," (1). Note that this is an important step, as we only support version 3.3.8 at the moment. Newer versions do not work yet and older versions we did not configure and/or test! "),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-install-five.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-12"}),(0,o.yg)("p",null,"Now, change the config with the content of the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," you have downloaded from our ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/refs/heads/master/applications/jupyterhub/config.yaml"},"GitHub repository"),".\nCopy the content of the ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," and paste it in the highlighted box to replace the old with the new config. Click ",(0,o.yg)("inlineCode",{parentName:"p"},"Create")," to install the JupyterHub Helm Chart."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-install-six.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-13"}),(0,o.yg)("h3",{id:"creating-a-secured-route"},"Creating a secured route"),(0,o.yg)("p",null,"Create a secured route, with TLS edge termination."),(0,o.yg)("p",null,"In ",(0,o.yg)("inlineCode",{parentName:"p"},"Developer")," mode in your project, go to ",(0,o.yg)("inlineCode",{parentName:"p"},"Project")," in the sidepanel (1). Next, click on ",(0,o.yg)("inlineCode",{parentName:"p"},"Route")," (2)."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-route-one.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-14"}),(0,o.yg)("p",null,"Next, click ",(0,o.yg)("inlineCode",{parentName:"p"},"Create"),"."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-route-two.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-15"}),(0,o.yg)("p",null,"Fill in the ",(0,o.yg)("inlineCode",{parentName:"p"},"Name")," (1), choose the ",(0,o.yg)("inlineCode",{parentName:"p"},"Service"),": ",(0,o.yg)("inlineCode",{parentName:"p"},"proxy-public")," (2), choose the ",(0,o.yg)("inlineCode",{parentName:"p"},"Target Port"),": ",(0,o.yg)("inlineCode",{parentName:"p"},"80 -> http (TCP)")," (3), tick the box ",(0,o.yg)("inlineCode",{parentName:"p"},"Secure Route")," (4), and finally choose ",(0,o.yg)("inlineCode",{parentName:"p"},"TLS Termination"),": ",(0,o.yg)("inlineCode",{parentName:"p"},"Edge")," (5). Next, click ",(0,o.yg)("inlineCode",{parentName:"p"},"Create"),", to create the route."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-route-three.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h3",{id:"upgrading-the-configyaml"},"Upgrading the config.yaml"),(0,o.yg)("p",null,"You can upgrade your ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," easily in the DSRI web UI if you would like to change certain settings, such as user's default persistent volume claims, authentication methods, and many more things. Note that in some cases users who created an account with an old authentication method will still have access via that method, make sure you set up your preferred authentication method before allowing users to authenticate and use the JupyterHub instance."),(0,o.yg)("h1",{id:"-16"}),(0,o.yg)("p",null,"In ",(0,o.yg)("inlineCode",{parentName:"p"},"Developer")," mode in your project, go to ",(0,o.yg)("inlineCode",{parentName:"p"},"Helm")," in the sidepanel (1). Next, click on your Helm Chart Release (2)."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-upgrade-chart-one.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-17"}),(0,o.yg)("p",null,"Now, click the ",(0,o.yg)("inlineCode",{parentName:"p"},"Actions")," drop down menu, and choose ",(0,o.yg)("inlineCode",{parentName:"p"},"Upgrade")," (1)."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-upgrade-chart-two.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-18"}),(0,o.yg)("p",null,"In the box -highlighted in the picutre below- you can make changes to the config.yaml. After you have made your changes, click ",(0,o.yg)("inlineCode",{parentName:"p"},"Upgrade")," and your upgraded JupyterHub Helm Chart Release will automatically be deployed."),(0,o.yg)("img",{src:"/img/jupyterhub-helm-chart-upgrade-chart-three.png",alt:"",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h1",{id:"-19"}),(0,o.yg)("admonition",{title:"Configure JupyterHub",type:"success"},(0,o.yg)("p",{parentName:"admonition"},"Feel free to ",(0,o.yg)("a",{parentName:"p",href:"https://servicedesk.icts.maastrichtuniversity.nl/tas/public/ssp/content/serviceflow?unid=1ffa93e9ecd94d938ad46e3cb24c2392"},"submit a ticket")," to ask for help configuring your JupyterHub.")),(0,o.yg)("h1",{id:"-20"}),(0,o.yg)("h2",{id:"deploying-jupyterhub-using-the-command-line-interface-cli-"},"Deploying JupyterHub using the Command Line Interface (CLI) \ud83e\ude90"),(0,o.yg)("admonition",{title:"Before you begin download the config.yaml",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"Download the preconfigured ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," from our ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/refs/heads/master/applications/jupyterhub/config.yaml"},"GitHub repository"),".\nThe default config that is provided by JupyterHub will not work. ")),(0,o.yg)("h3",{id:"installing-the-jupyterhub-helm-chart-repository-1"},"Installing the JupyterHub Helm Chart repository"),(0,o.yg)("p",null,"After you have created a project you can start with installing the JupyterHub Helm Chart. If you do not have access to DSRI or created a project yet, and you need to find out how, please refer to our ",(0,o.yg)("a",{parentName:"p",href:"https://dsri.maastrichtuniversity.nl/docs/"},"documentation.")),(0,o.yg)("admonition",{title:"Helm Chart already available",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"The Helm Chart should be already made available for everyone to use on the DSRI platform. There will be no need to install the repository yourself.")),(0,o.yg)("h1",{id:"-21"}),(0,o.yg)("p",null,"Add the JupyterHub Helm Chart repository:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add jupyterhub https://hub.jupyter.org/helm-chart/\nhelm repo update\n")),(0,o.yg)("h3",{id:"installing-the-jupyterhub-helm-chart-1"},"Installing the JupyterHub Helm Chart"),(0,o.yg)("admonition",{type:"info"},(0,o.yg)("p",{parentName:"admonition"},"At the moment the latest -and only- Helm Chart version which is supported by DSRI is version 3.3.8. Newer versions will not work, and older versions are not tested and/or configured!")),(0,o.yg)("h1",{id:"-22"}),(0,o.yg)("p",null,"Make sure you use the right ",(0,o.yg)("inlineCode",{parentName:"p"},"config.yaml")," downloaded from our ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/refs/heads/master/applications/jupyterhub/config.yaml"},"GitHub repository"),"."),(0,o.yg)("p",null,"Install the Helm Chart using the following command:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"helm upgrade --cleanup-on-fail \\\n --install jupyterhub jupyterhub/jupyterhub \\\n --version=3.3.8 \\\n --namespace= \\\n --values config.yaml\n")),(0,o.yg)("p",null,(0,o.yg)("inlineCode",{parentName:"p"},"")," is the name of the namespace your project is in. "),(0,o.yg)("h3",{id:"creating-a-secured-route-1"},"Creating a secured route"),(0,o.yg)("p",null,"Create a secured route, with TLS edge termination:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc create route edge --namespace --service=proxy-public --port=http\n")),(0,o.yg)("p",null,(0,o.yg)("inlineCode",{parentName:"p"},"")," is the name of the namespace your project is in.\n",(0,o.yg)("inlineCode",{parentName:"p"},"")," is the name of the route. "),(0,o.yg)("h3",{id:"upgrading-the-configyaml-1"},"Upgrading the config.yaml"),(0,o.yg)("p",null,"Run the following command with your new config.yaml:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"helm upgrade --cleanup-on-fail \\\n --install jupyterhub jupyterhub/jupyterhub \\\n --version=3.3.8 \\\n --namespace= \\\n --values config.yaml\n")),(0,o.yg)("p",null,(0,o.yg)("inlineCode",{parentName:"p"},"")," is the name of the namespace your project is in. "),(0,o.yg)("p",null,(0,o.yg)("strong",{parentName:"p"},"Note")," that the namespace should be the same namespace as the one where your original deployment was initiated!"),(0,o.yg)("h1",{id:"-23"}),(0,o.yg)("admonition",{title:"Configure JupyterHub",type:"success"},(0,o.yg)("p",{parentName:"admonition"},"Feel free to ",(0,o.yg)("a",{parentName:"p",href:"https://servicedesk.icts.maastrichtuniversity.nl/tas/public/ssp/content/serviceflow?unid=1ffa93e9ecd94d938ad46e3cb24c2392"},"submit a ticket")," to ask for help configuring your JupyterHub.")))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/47a4a695.c2e3bbad.js b/assets/js/47a4a695.1d355b94.js similarity index 99% rename from assets/js/47a4a695.c2e3bbad.js rename to assets/js/47a4a695.1d355b94.js index c9ee8a268..1d7616c70 100644 --- a/assets/js/47a4a695.c2e3bbad.js +++ b/assets/js/47a4a695.1d355b94.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[3836],{5680:(e,r,t)=>{t.d(r,{xA:()=>g,yg:()=>y});var o=t(6540);function n(e,r,t){return r in e?Object.defineProperty(e,r,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[r]=t,e}function a(e,r){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);r&&(o=o.filter((function(r){return Object.getOwnPropertyDescriptor(e,r).enumerable}))),t.push.apply(t,o)}return t}function i(e){for(var r=1;r=0||(n[t]=e[t]);return n}(e,r);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(n[t]=e[t])}return n}var l=o.createContext({}),s=function(e){var r=o.useContext(l),t=r;return e&&(t="function"==typeof e?e(r):i(i({},r),e)),t},g=function(e){var r=s(e.components);return o.createElement(l.Provider,{value:r},e.children)},p={inlineCode:"code",wrapper:function(e){var r=e.children;return o.createElement(o.Fragment,{},r)}},u=o.forwardRef((function(e,r){var t=e.components,n=e.mdxType,a=e.originalType,l=e.parentName,g=c(e,["components","mdxType","originalType","parentName"]),u=s(t),y=n,m=u["".concat(l,".").concat(y)]||u[y]||p[y]||a;return t?o.createElement(m,i(i({ref:r},g),{},{components:t})):o.createElement(m,i({ref:r},g))}));function y(e,r){var t=arguments,n=r&&r.mdxType;if("string"==typeof e||n){var a=t.length,i=new Array(a);i[0]=u;var c={};for(var l in r)hasOwnProperty.call(r,l)&&(c[l]=r[l]);c.originalType=e,c.mdxType="string"==typeof e?e:n,i[1]=c;for(var s=2;s{t.r(r),t.d(r,{assets:()=>g,contentTitle:()=>l,default:()=>y,frontMatter:()=>c,metadata:()=>s,toc:()=>p});var o=t(9668),n=t(1367),a=(t(6540),t(5680)),i=["components"],c={id:"login-docker-registry",title:"Login to Docker registries"},l=void 0,s={unversionedId:"login-docker-registry",id:"login-docker-registry",title:"Login to Docker registries",description:"Login to an external container registry can be helpful to pull private images, or increase the DockerHub pull limitations.",source:"@site/docs/login-docker-registry.md",sourceDirName:".",slug:"/login-docker-registry",permalink:"/docs/login-docker-registry",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/login-docker-registry.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"login-docker-registry",title:"Login to Docker registries"},sidebar:"docs",previous:{title:"Prepare a workshop",permalink:"/docs/guide-workshop"},next:{title:"Command Line Interface",permalink:"/docs/openshift-commands"}},g={},p=[{value:"UM Container registry",id:"um-container-registry",level:2},{value:"Logging in with Docker CLI",id:"logging-in-with-docker-cli",level:3},{value:"Using a Proxy Cache",id:"using-a-proxy-cache",level:3},{value:"Creating your own project",id:"creating-your-own-project",level:3},{value:"Using your own user",id:"using-your-own-user",level:3},{value:"Using a robot account",id:"using-a-robot-account",level:3},{value:"GitHub Container Registry",id:"github-container-registry",level:2},{value:"DockerHub",id:"dockerhub",level:2}],u={toc:p};function y(e){var r=e.components,t=(0,n.A)(e,i);return(0,a.yg)("wrapper",(0,o.A)({},u,t,{components:r,mdxType:"MDXLayout"}),(0,a.yg)("p",null,"Login to an external container registry can be helpful to pull private images, or increase the DockerHub pull limitations."),(0,a.yg)("p",null,"You will need to create a secret in your project, then link it to the default service account of this project."),(0,a.yg)("p",null,"We detail here the process for ",(0,a.yg)("a",{parentName:"p",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),", ",(0,a.yg)("a",{parentName:"p",href:"https://docs.github.com/en/packages/guides/about-github-container-registry"},"GitHub Container Registry")," and ",(0,a.yg)("a",{parentName:"p",href:"https://hub.docker.com/"},"Docker Hub"),", but the process is similar for any other container registry (e.g. ",(0,a.yg)("a",{parentName:"p",href:"https://quay.io/"},"quay.io"),")"),(0,a.yg)("h2",{id:"um-container-registry"},"UM Container registry"),(0,a.yg)("admonition",{title:"Access",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"You need to be connected to the UM network to access this container registry.")),(0,a.yg)("p",null,"This container registry is available at ",(0,a.yg)("a",{parentName:"p",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),". Here you can login using your ",(0,a.yg)("strong",{parentName:"p"},"UM credentials"),' by clicking on the "Login via OIDC provider"'),(0,a.yg)("img",{class:"screenshot",src:"/img/screenshot_harbor_login_page.png",alt:"Harbor_login_page",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,a.yg)("admonition",{title:"Public Projects",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"You don't need to follow the steps below if you are using one of the Public projects. These are available without credentials.")),(0,a.yg)("h3",{id:"logging-in-with-docker-cli"},"Logging in with Docker CLI"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Go to ",(0,a.yg)("a",{parentName:"li",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),", click on your username in the top right corner followed by clicking on ",(0,a.yg)("strong",{parentName:"li"},"User Profile"),". Click on the ",(0,a.yg)("strong",{parentName:"li"},"Copy")," icon."),(0,a.yg)("li",{parentName:"ol"},"Login with your credentials:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre"},"docker login cr.icts.unimaas.nl\n\n(Username)\n(Copied in Step 1)\n")),(0,a.yg)("h3",{id:"using-a-proxy-cache"},"Using a Proxy Cache"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Go to ",(0,a.yg)("a",{parentName:"li",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),", look for a project of type ",(0,a.yg)("strong",{parentName:"li"},"Proxy Cache"),". For each of the mayor registries we created a ",(0,a.yg)("strong",{parentName:"li"},"Proxy Cache"),". Remember the project name, for example ",(0,a.yg)("strong",{parentName:"li"},"dockerhub"),"."),(0,a.yg)("li",{parentName:"ol"},"On the DSRI you can deploy an image like in this example:")),(0,a.yg)("img",{class:"screenshot",src:"/img/screenshot_harbor_proxy_cache.png",alt:"Harbor_proxy_cache",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,a.yg)("admonition",{title:"Docker CLI",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"The same concept can be applied using the docker CLI"),(0,a.yg)("pre",{parentName:"admonition"},(0,a.yg)("code",{parentName:"pre"},"docker pull cr.icts.unimaas.nl/dockerhub/ubuntu:22.04\n"))),(0,a.yg)("h3",{id:"creating-your-own-project"},"Creating your own project"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Go to ",(0,a.yg)("a",{parentName:"p",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),", click on ",(0,a.yg)("strong",{parentName:"p"},"+ NEW PROJECT"),". Fill in the details of project name and Access Level (preferred method is to leave the checkbox unchecked).")),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Click OK"))),(0,a.yg)("h3",{id:"using-your-own-user"},"Using your own user"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Go to ",(0,a.yg)("a",{parentName:"p",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),", click on your username in the top right corner followed by clicking on ",(0,a.yg)("strong",{parentName:"p"},"User Profile"),". Click on the ",(0,a.yg)("strong",{parentName:"p"},"Copy")," icon.")),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Create a secret to login to UM Harbor Container Registry in your project:"))),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre"},"oc create secret docker-registry um-harbor-secret --docker-server=cr.icts.unimaas.nl --docker-username= --docker-password=\n")),(0,a.yg)("ol",{start:3},(0,a.yg)("li",{parentName:"ol"},"Link the login secret to the default service account:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre"},"oc secrets link default um-harbor-secret --for=pull\n")),(0,a.yg)("h3",{id:"using-a-robot-account"},"Using a robot account"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Go to ",(0,a.yg)("a",{parentName:"p",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),", click on your project if you already created one.")),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Click on the tab ",(0,a.yg)("strong",{parentName:"p"},"Robot Accounts"))),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Click on ",(0,a.yg)("strong",{parentName:"p"},"New Robot Account"))),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Create the Robot account to your liking")),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Copy the secret or export it")),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Create a secret to login to UM Harbor Container Registry in your project:"))),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre"},"oc create secret docker-registry um-harbor-secret --docker-server=cr.icts.unimaas.nl --docker-username= --docker-password=\n")),(0,a.yg)("ol",{start:7},(0,a.yg)("li",{parentName:"ol"},"Link the login secret to the default service account:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre"},"oc secrets link default um-harbor-secret --for=pull\n")),(0,a.yg)("h2",{id:"github-container-registry"},"GitHub Container Registry"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Go to ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/settings/tokens"},"GitHub Settings"),", and create a Personal Access Token (PAT) which will be used as password to connect to the GitHub Container Registry")),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Create a secret to login to GitHub Container Registry in your project:"))),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"oc create secret docker-registry github-ghcr-secret --docker-server=ghcr.io --docker-username= --docker-password= --docker-email=\n")),(0,a.yg)("ol",{start:3},(0,a.yg)("li",{parentName:"ol"},"Link the login secret to the default service account:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"oc secrets link default github-ghcr-secret --for=pull\n")),(0,a.yg)("h2",{id:"dockerhub"},"DockerHub"),(0,a.yg)("admonition",{title:"Increase DockerHub limitations",type:"tip"},(0,a.yg)("p",{parentName:"admonition"},"Login with DockerHub also increase the DockerHub limitations to pull images in your project")),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Create a secret to login to DockerHub in your project:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"oc create secret docker-registry dockerhub-secret --docker-server=docker.io --docker-username= --docker-password= --docker-email=\n")),(0,a.yg)("ol",{start:2},(0,a.yg)("li",{parentName:"ol"},"Link the login secret to the default service account:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"oc secrets link default dockerhub-secret --for=pull\n")))}y.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[3836],{5680:(e,r,t)=>{t.d(r,{xA:()=>g,yg:()=>y});var o=t(6540);function n(e,r,t){return r in e?Object.defineProperty(e,r,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[r]=t,e}function a(e,r){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);r&&(o=o.filter((function(r){return Object.getOwnPropertyDescriptor(e,r).enumerable}))),t.push.apply(t,o)}return t}function i(e){for(var r=1;r=0||(n[t]=e[t]);return n}(e,r);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(n[t]=e[t])}return n}var l=o.createContext({}),s=function(e){var r=o.useContext(l),t=r;return e&&(t="function"==typeof e?e(r):i(i({},r),e)),t},g=function(e){var r=s(e.components);return o.createElement(l.Provider,{value:r},e.children)},p={inlineCode:"code",wrapper:function(e){var r=e.children;return o.createElement(o.Fragment,{},r)}},u=o.forwardRef((function(e,r){var t=e.components,n=e.mdxType,a=e.originalType,l=e.parentName,g=c(e,["components","mdxType","originalType","parentName"]),u=s(t),y=n,m=u["".concat(l,".").concat(y)]||u[y]||p[y]||a;return t?o.createElement(m,i(i({ref:r},g),{},{components:t})):o.createElement(m,i({ref:r},g))}));function y(e,r){var t=arguments,n=r&&r.mdxType;if("string"==typeof e||n){var a=t.length,i=new Array(a);i[0]=u;var c={};for(var l in r)hasOwnProperty.call(r,l)&&(c[l]=r[l]);c.originalType=e,c.mdxType="string"==typeof e?e:n,i[1]=c;for(var s=2;s{t.r(r),t.d(r,{assets:()=>g,contentTitle:()=>l,default:()=>y,frontMatter:()=>c,metadata:()=>s,toc:()=>p});var o=t(9668),n=t(1367),a=(t(6540),t(5680)),i=["components"],c={id:"login-docker-registry",title:"Login to Docker registries"},l=void 0,s={unversionedId:"login-docker-registry",id:"login-docker-registry",title:"Login to Docker registries",description:"Login to an external container registry can be helpful to pull private images, or increase the DockerHub pull limitations.",source:"@site/docs/login-docker-registry.md",sourceDirName:".",slug:"/login-docker-registry",permalink:"/docs/login-docker-registry",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/login-docker-registry.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"login-docker-registry",title:"Login to Docker registries"},sidebar:"docs",previous:{title:"Prepare a workshop",permalink:"/docs/guide-workshop"},next:{title:"Command Line Interface",permalink:"/docs/openshift-commands"}},g={},p=[{value:"UM Container registry",id:"um-container-registry",level:2},{value:"Logging in with Docker CLI",id:"logging-in-with-docker-cli",level:3},{value:"Using a Proxy Cache",id:"using-a-proxy-cache",level:3},{value:"Creating your own project",id:"creating-your-own-project",level:3},{value:"Using your own user",id:"using-your-own-user",level:3},{value:"Using a robot account",id:"using-a-robot-account",level:3},{value:"GitHub Container Registry",id:"github-container-registry",level:2},{value:"DockerHub",id:"dockerhub",level:2}],u={toc:p};function y(e){var r=e.components,t=(0,n.A)(e,i);return(0,a.yg)("wrapper",(0,o.A)({},u,t,{components:r,mdxType:"MDXLayout"}),(0,a.yg)("p",null,"Login to an external container registry can be helpful to pull private images, or increase the DockerHub pull limitations."),(0,a.yg)("p",null,"You will need to create a secret in your project, then link it to the default service account of this project."),(0,a.yg)("p",null,"We detail here the process for ",(0,a.yg)("a",{parentName:"p",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),", ",(0,a.yg)("a",{parentName:"p",href:"https://docs.github.com/en/packages/guides/about-github-container-registry"},"GitHub Container Registry")," and ",(0,a.yg)("a",{parentName:"p",href:"https://hub.docker.com/"},"Docker Hub"),", but the process is similar for any other container registry (e.g. ",(0,a.yg)("a",{parentName:"p",href:"https://quay.io/"},"quay.io"),")"),(0,a.yg)("h2",{id:"um-container-registry"},"UM Container registry"),(0,a.yg)("admonition",{title:"Access",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"You need to be connected to the UM network to access this container registry.")),(0,a.yg)("p",null,"This container registry is available at ",(0,a.yg)("a",{parentName:"p",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),". Here you can login using your ",(0,a.yg)("strong",{parentName:"p"},"UM credentials"),' by clicking on the "Login via OIDC provider"'),(0,a.yg)("img",{class:"screenshot",src:"/img/screenshot_harbor_login_page.png",alt:"Harbor_login_page",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,a.yg)("admonition",{title:"Public Projects",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"You don't need to follow the steps below if you are using one of the Public projects. These are available without credentials.")),(0,a.yg)("h3",{id:"logging-in-with-docker-cli"},"Logging in with Docker CLI"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Go to ",(0,a.yg)("a",{parentName:"li",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),", click on your username in the top right corner followed by clicking on ",(0,a.yg)("strong",{parentName:"li"},"User Profile"),". Click on the ",(0,a.yg)("strong",{parentName:"li"},"Copy")," icon."),(0,a.yg)("li",{parentName:"ol"},"Login with your credentials:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre"},"docker login cr.icts.unimaas.nl\n\n(Username)\n(Copied in Step 1)\n")),(0,a.yg)("h3",{id:"using-a-proxy-cache"},"Using a Proxy Cache"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Go to ",(0,a.yg)("a",{parentName:"li",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),", look for a project of type ",(0,a.yg)("strong",{parentName:"li"},"Proxy Cache"),". For each of the mayor registries we created a ",(0,a.yg)("strong",{parentName:"li"},"Proxy Cache"),". Remember the project name, for example ",(0,a.yg)("strong",{parentName:"li"},"dockerhub"),"."),(0,a.yg)("li",{parentName:"ol"},"On the DSRI you can deploy an image like in this example:")),(0,a.yg)("img",{class:"screenshot",src:"/img/screenshot_harbor_proxy_cache.png",alt:"Harbor_proxy_cache",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,a.yg)("admonition",{title:"Docker CLI",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"The same concept can be applied using the docker CLI"),(0,a.yg)("pre",{parentName:"admonition"},(0,a.yg)("code",{parentName:"pre"},"docker pull cr.icts.unimaas.nl/dockerhub/ubuntu:22.04\n"))),(0,a.yg)("h3",{id:"creating-your-own-project"},"Creating your own project"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Go to ",(0,a.yg)("a",{parentName:"p",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),", click on ",(0,a.yg)("strong",{parentName:"p"},"+ NEW PROJECT"),". Fill in the details of project name and Access Level (preferred method is to leave the checkbox unchecked).")),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Click OK"))),(0,a.yg)("h3",{id:"using-your-own-user"},"Using your own user"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Go to ",(0,a.yg)("a",{parentName:"p",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),", click on your username in the top right corner followed by clicking on ",(0,a.yg)("strong",{parentName:"p"},"User Profile"),". Click on the ",(0,a.yg)("strong",{parentName:"p"},"Copy")," icon.")),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Create a secret to login to UM Harbor Container Registry in your project:"))),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre"},"oc create secret docker-registry um-harbor-secret --docker-server=cr.icts.unimaas.nl --docker-username= --docker-password=\n")),(0,a.yg)("ol",{start:3},(0,a.yg)("li",{parentName:"ol"},"Link the login secret to the default service account:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre"},"oc secrets link default um-harbor-secret --for=pull\n")),(0,a.yg)("h3",{id:"using-a-robot-account"},"Using a robot account"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Go to ",(0,a.yg)("a",{parentName:"p",href:"https://cr.icts.unimaas.nl"},"UM Container registry"),", click on your project if you already created one.")),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Click on the tab ",(0,a.yg)("strong",{parentName:"p"},"Robot Accounts"))),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Click on ",(0,a.yg)("strong",{parentName:"p"},"New Robot Account"))),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Create the Robot account to your liking")),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Copy the secret or export it")),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Create a secret to login to UM Harbor Container Registry in your project:"))),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre"},"oc create secret docker-registry um-harbor-secret --docker-server=cr.icts.unimaas.nl --docker-username= --docker-password=\n")),(0,a.yg)("ol",{start:7},(0,a.yg)("li",{parentName:"ol"},"Link the login secret to the default service account:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre"},"oc secrets link default um-harbor-secret --for=pull\n")),(0,a.yg)("h2",{id:"github-container-registry"},"GitHub Container Registry"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Go to ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/settings/tokens"},"GitHub Settings"),", and create a Personal Access Token (PAT) which will be used as password to connect to the GitHub Container Registry")),(0,a.yg)("li",{parentName:"ol"},(0,a.yg)("p",{parentName:"li"},"Create a secret to login to GitHub Container Registry in your project:"))),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"oc create secret docker-registry github-ghcr-secret --docker-server=ghcr.io --docker-username= --docker-password= --docker-email=\n")),(0,a.yg)("ol",{start:3},(0,a.yg)("li",{parentName:"ol"},"Link the login secret to the default service account:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"oc secrets link default github-ghcr-secret --for=pull\n")),(0,a.yg)("h2",{id:"dockerhub"},"DockerHub"),(0,a.yg)("admonition",{title:"Increase DockerHub limitations",type:"tip"},(0,a.yg)("p",{parentName:"admonition"},"Login with DockerHub also increase the DockerHub limitations to pull images in your project")),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Create a secret to login to DockerHub in your project:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"oc create secret docker-registry dockerhub-secret --docker-server=docker.io --docker-username= --docker-password= --docker-email=\n")),(0,a.yg)("ol",{start:2},(0,a.yg)("li",{parentName:"ol"},"Link the login secret to the default service account:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"oc secrets link default dockerhub-secret --for=pull\n")))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/4ba7e5a3.07c00a5e.js b/assets/js/4ba7e5a3.942b72bd.js similarity index 99% rename from assets/js/4ba7e5a3.07c00a5e.js rename to assets/js/4ba7e5a3.942b72bd.js index 0e67b974c..d0851cc74 100644 --- a/assets/js/4ba7e5a3.07c00a5e.js +++ b/assets/js/4ba7e5a3.942b72bd.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9591],{5680:(e,t,n)=>{n.d(t,{xA:()=>u,yg:()=>m});var a=n(6540);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function i(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var s=a.createContext({}),c=function(e){var t=a.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},u=function(e){var t=c(e.components);return a.createElement(s.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},d=a.forwardRef((function(e,t){var n=e.components,r=e.mdxType,o=e.originalType,s=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),d=c(n),m=r,g=d["".concat(s,".").concat(m)]||d[m]||p[m]||o;return n?a.createElement(g,i(i({ref:t},u),{},{components:n})):a.createElement(g,i({ref:t},u))}));function m(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=n.length,i=new Array(o);i[0]=d;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:r,i[1]=l;for(var c=2;c{n.r(t),n.d(t,{assets:()=>u,contentTitle:()=>s,default:()=>m,frontMatter:()=>l,metadata:()=>c,toc:()=>p});var a=n(9668),r=n(1367),o=(n(6540),n(5680)),i=["components"],l={id:"contribute",title:"Contribute"},s="Contribute",c={unversionedId:"contribute",id:"contribute",title:"Contribute",description:"Check if there are issues related to your contribution, or post a new issue to discuss improvement to the documentation.",source:"@site/docs/contribute.md",sourceDirName:".",slug:"/contribute",permalink:"/docs/contribute",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/contribute.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"contribute",title:"Contribute"},sidebar:"docs",previous:{title:"Run CWL workflows",permalink:"/docs/workflows-cwl"},next:{title:"Enabling VPN access in WSL2",permalink:"/docs/enabling-vpn-wsl"}},u={},p=[{value:"\u26a1 Quick edit on GitHub",id:"-quick-edit-on-github",level:2},{value:"\ud83c\udfd7\ufe0f Larger changes locally",id:"\ufe0f-larger-changes-locally",level:2},{value:"\ud83d\udd04 Automated deployment",id:"-automated-deployment",level:2},{value:"\ud83d\udcdd Help",id:"-help",level:2},{value:"\ud83d\udd0e Files locations",id:"-files-locations",level:3},{value:"\ud83e\udd84 Markdown tip",id:"-markdown-tip",level:3},{value:"\u2714\ufe0f Pull Request process",id:"\ufe0f-pull-request-process",level:2}],d={toc:p};function m(e){var t=e.components,n=(0,r.A)(e,i);return(0,o.yg)("wrapper",(0,a.A)({},d,n,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h1",{id:"contribute"},"Contribute"),(0,o.yg)("p",null,"Check if there are ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues"},"issues")," related to your contribution, or post a ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues/new"},"new issue")," to discuss improvement to the documentation. "),(0,o.yg)("a",{href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues",target:"_blank",rel:"noopener noreferrer","aria-label":"GitHub issues"},(0,o.yg)("img",{alt:"GitHub issues",src:"https://img.shields.io/github/issues/MaastrichtU-IDS/dsri-documentation?label=dsri-documentation"})),(0,o.yg)("admonition",{title:"Fork this repository",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"Otherwise you will need to first ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/fork"},"fork this repository"),", then send a pull request when your changes have been pushed.")),(0,o.yg)("admonition",{title:"Direct change if permission",type:"note"},(0,o.yg)("p",{parentName:"admonition"},"If you are part of the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS"},"MaastrichtU-IDS organization on GitHub")," you can directly create a new branch to make your change in the main repository. ")),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"-quick-edit-on-github"},"\u26a1 Quick edit on GitHub"),(0,o.yg)("p",null,"You can really easily make quick changes directly on the GitHub website by clicking the ",(0,o.yg)("strong",{parentName:"p"},"Edit this page")," button at the bottom left of each documentation page. Or browsing to your forked repository."),(0,o.yg)("p",null,"For example to edit the introduction page you can go to ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/introduction.md"},"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/introduction.md")),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"\ufe0f-larger-changes-locally"},"\ud83c\udfd7\ufe0f Larger changes locally"),(0,o.yg)("p",null,"To edit the documentation it is easier to clone the repository on your laptop, and use a ",(0,o.yg)("a",{parentName:"p",href:"https://typora.io"},"user-friendly markdown editor"),"."),(0,o.yg)("admonition",{title:"Use a Markdown editor",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"We strongly recommend you to use a markdown editor, such as ",(0,o.yg)("a",{parentName:"p",href:"https://typora.io"},"Typora"),". It makes writing documentation much faster, and more enjoyable.")),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},"Clone the repository on your machine:")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"git clone https://github.com/MaastrichtU-IDS/dsri-documentation.git\ncd dsri-documentation\n")),(0,o.yg)("ol",{start:2},(0,o.yg)("li",{parentName:"ol"},"Create a new branch from the ",(0,o.yg)("inlineCode",{parentName:"li"},"master")," branch \ud83d\udd4a\ufe0f")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"git checkout -b my-branch\n")),(0,o.yg)("ol",{start:3},(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Add your changes in this branch \u2712\ufe0f")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Start the website on ",(0,o.yg)("a",{parentName:"p",href:"http://localhost:3000"},"http://localhost:3000")," to test it:"))),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"cd website\nyarn install\nyarn start\n")),(0,o.yg)("admonition",{title:"Send a pull request",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"Send a pull request to the ",(0,o.yg)("inlineCode",{parentName:"p"},"master")," branch when your changes are done")),(0,o.yg)("admonition",{title:"Development documentation",type:"note"},(0,o.yg)("p",{parentName:"admonition"},"Read more about running the API in development at ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation#run-for-development"},"https://github.com/MaastrichtU-IDS/dsri-documentation#run-for-development"))),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"-automated-deployment"},"\ud83d\udd04 Automated deployment"),(0,o.yg)("p",null,"The documentation website is automatically updated and redeployed at each change to the ",(0,o.yg)("inlineCode",{parentName:"p"},"main")," branch using a ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/actions"},"GitHub Actions workflow"),"."),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/actions?query=workflow%3A%22Publish+to+GitHub+Pages%22"},(0,o.yg)("img",{parentName:"a",src:"https://github.com/MaastrichtU-IDS/dsri-documentation/workflows/Publish%20to%20GitHub%20Pages/badge.svg",alt:"Publish to GitHub Pages"}))),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"-help"},"\ud83d\udcdd Help"),(0,o.yg)("p",null,"Most pages of this website are written in Markdown, hence they are really easy to edit, especially when you are using a ",(0,o.yg)("a",{parentName:"p",href:"https://typora.io/"},"convenient markdown editor"),". Only the ",(0,o.yg)("inlineCode",{parentName:"p"},"index.js")," page is written in React JavaScript."),(0,o.yg)("h3",{id:"-files-locations"},"\ud83d\udd0e Files locations"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Main DSRI documentation markdown files in ",(0,o.yg)("inlineCode",{parentName:"li"},"website/docs"),(0,o.yg)("ul",{parentName:"li"},(0,o.yg)("li",{parentName:"ul"},"Left docs menu defined in ",(0,o.yg)("inlineCode",{parentName:"li"},"website/sidebars.json")," "))),(0,o.yg)("li",{parentName:"ul"},"Blog articles as markdown files in ",(0,o.yg)("inlineCode",{parentName:"li"},"website/docs")),(0,o.yg)("li",{parentName:"ul"},"Index and contribute pages in ",(0,o.yg)("inlineCode",{parentName:"li"},"website/src/pages")),(0,o.yg)("li",{parentName:"ul"},"Images in ",(0,o.yg)("inlineCode",{parentName:"li"},"website/src/static/img")),(0,o.yg)("li",{parentName:"ul"},"Website configuration file in ",(0,o.yg)("inlineCode",{parentName:"li"},"website/docusaurus.config.js")," ")),(0,o.yg)("h3",{id:"-markdown-tip"},"\ud83e\udd84 Markdown tip"),(0,o.yg)("admonition",{title:"Colorful boxes",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"Use the following tags to create colorful boxes in markdown files:")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-markdown"},":::note You can specify an optional title\nGrey box\n:::\n\n:::tip Green box\nThe content and title *can* include markdown.\n:::\n\n:::info Blue box\nUseful information.\n:::\n\n:::caution Be careful!\nYellow box\n:::\n\n:::danger Fire red box\nDanger danger, mayday!\n:::\n")),(0,o.yg)("h2",{id:"\ufe0f-pull-request-process"},"\u2714\ufe0f Pull Request process"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},"Before sending a pull request make sure the DSRI documentation website still work as expected with the new changes properly integrated:")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"cd website\nyarn install\nyarn start\n")),(0,o.yg)("ol",{start:2},(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/compare"},"Send a pull request")," to the ",(0,o.yg)("inlineCode",{parentName:"li"},"master")," branch."),(0,o.yg)("li",{parentName:"ol"},"Project contributors will review your change as soon as they can!")))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9591],{5680:(e,t,n)=>{n.d(t,{xA:()=>u,yg:()=>m});var a=n(6540);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function i(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var s=a.createContext({}),c=function(e){var t=a.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},u=function(e){var t=c(e.components);return a.createElement(s.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},d=a.forwardRef((function(e,t){var n=e.components,r=e.mdxType,o=e.originalType,s=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),d=c(n),m=r,g=d["".concat(s,".").concat(m)]||d[m]||p[m]||o;return n?a.createElement(g,i(i({ref:t},u),{},{components:n})):a.createElement(g,i({ref:t},u))}));function m(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=n.length,i=new Array(o);i[0]=d;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:r,i[1]=l;for(var c=2;c{n.r(t),n.d(t,{assets:()=>u,contentTitle:()=>s,default:()=>m,frontMatter:()=>l,metadata:()=>c,toc:()=>p});var a=n(9668),r=n(1367),o=(n(6540),n(5680)),i=["components"],l={id:"contribute",title:"Contribute"},s="Contribute",c={unversionedId:"contribute",id:"contribute",title:"Contribute",description:"Check if there are issues related to your contribution, or post a new issue to discuss improvement to the documentation.",source:"@site/docs/contribute.md",sourceDirName:".",slug:"/contribute",permalink:"/docs/contribute",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/contribute.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"contribute",title:"Contribute"},sidebar:"docs",previous:{title:"Run CWL workflows",permalink:"/docs/workflows-cwl"},next:{title:"Enabling VPN access in WSL2",permalink:"/docs/enabling-vpn-wsl"}},u={},p=[{value:"\u26a1 Quick edit on GitHub",id:"-quick-edit-on-github",level:2},{value:"\ud83c\udfd7\ufe0f Larger changes locally",id:"\ufe0f-larger-changes-locally",level:2},{value:"\ud83d\udd04 Automated deployment",id:"-automated-deployment",level:2},{value:"\ud83d\udcdd Help",id:"-help",level:2},{value:"\ud83d\udd0e Files locations",id:"-files-locations",level:3},{value:"\ud83e\udd84 Markdown tip",id:"-markdown-tip",level:3},{value:"\u2714\ufe0f Pull Request process",id:"\ufe0f-pull-request-process",level:2}],d={toc:p};function m(e){var t=e.components,n=(0,r.A)(e,i);return(0,o.yg)("wrapper",(0,a.A)({},d,n,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h1",{id:"contribute"},"Contribute"),(0,o.yg)("p",null,"Check if there are ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues"},"issues")," related to your contribution, or post a ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues/new"},"new issue")," to discuss improvement to the documentation. "),(0,o.yg)("a",{href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues",target:"_blank",rel:"noopener noreferrer","aria-label":"GitHub issues"},(0,o.yg)("img",{alt:"GitHub issues",src:"https://img.shields.io/github/issues/MaastrichtU-IDS/dsri-documentation?label=dsri-documentation"})),(0,o.yg)("admonition",{title:"Fork this repository",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"Otherwise you will need to first ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/fork"},"fork this repository"),", then send a pull request when your changes have been pushed.")),(0,o.yg)("admonition",{title:"Direct change if permission",type:"note"},(0,o.yg)("p",{parentName:"admonition"},"If you are part of the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS"},"MaastrichtU-IDS organization on GitHub")," you can directly create a new branch to make your change in the main repository. ")),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"-quick-edit-on-github"},"\u26a1 Quick edit on GitHub"),(0,o.yg)("p",null,"You can really easily make quick changes directly on the GitHub website by clicking the ",(0,o.yg)("strong",{parentName:"p"},"Edit this page")," button at the bottom left of each documentation page. Or browsing to your forked repository."),(0,o.yg)("p",null,"For example to edit the introduction page you can go to ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/introduction.md"},"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/introduction.md")),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"\ufe0f-larger-changes-locally"},"\ud83c\udfd7\ufe0f Larger changes locally"),(0,o.yg)("p",null,"To edit the documentation it is easier to clone the repository on your laptop, and use a ",(0,o.yg)("a",{parentName:"p",href:"https://typora.io"},"user-friendly markdown editor"),"."),(0,o.yg)("admonition",{title:"Use a Markdown editor",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"We strongly recommend you to use a markdown editor, such as ",(0,o.yg)("a",{parentName:"p",href:"https://typora.io"},"Typora"),". It makes writing documentation much faster, and more enjoyable.")),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},"Clone the repository on your machine:")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"git clone https://github.com/MaastrichtU-IDS/dsri-documentation.git\ncd dsri-documentation\n")),(0,o.yg)("ol",{start:2},(0,o.yg)("li",{parentName:"ol"},"Create a new branch from the ",(0,o.yg)("inlineCode",{parentName:"li"},"master")," branch \ud83d\udd4a\ufe0f")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"git checkout -b my-branch\n")),(0,o.yg)("ol",{start:3},(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Add your changes in this branch \u2712\ufe0f")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Start the website on ",(0,o.yg)("a",{parentName:"p",href:"http://localhost:3000"},"http://localhost:3000")," to test it:"))),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"cd website\nyarn install\nyarn start\n")),(0,o.yg)("admonition",{title:"Send a pull request",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"Send a pull request to the ",(0,o.yg)("inlineCode",{parentName:"p"},"master")," branch when your changes are done")),(0,o.yg)("admonition",{title:"Development documentation",type:"note"},(0,o.yg)("p",{parentName:"admonition"},"Read more about running the API in development at ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation#run-for-development"},"https://github.com/MaastrichtU-IDS/dsri-documentation#run-for-development"))),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"-automated-deployment"},"\ud83d\udd04 Automated deployment"),(0,o.yg)("p",null,"The documentation website is automatically updated and redeployed at each change to the ",(0,o.yg)("inlineCode",{parentName:"p"},"main")," branch using a ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/actions"},"GitHub Actions workflow"),"."),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/actions?query=workflow%3A%22Publish+to+GitHub+Pages%22"},(0,o.yg)("img",{parentName:"a",src:"https://github.com/MaastrichtU-IDS/dsri-documentation/workflows/Publish%20to%20GitHub%20Pages/badge.svg",alt:"Publish to GitHub Pages"}))),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"-help"},"\ud83d\udcdd Help"),(0,o.yg)("p",null,"Most pages of this website are written in Markdown, hence they are really easy to edit, especially when you are using a ",(0,o.yg)("a",{parentName:"p",href:"https://typora.io/"},"convenient markdown editor"),". Only the ",(0,o.yg)("inlineCode",{parentName:"p"},"index.js")," page is written in React JavaScript."),(0,o.yg)("h3",{id:"-files-locations"},"\ud83d\udd0e Files locations"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Main DSRI documentation markdown files in ",(0,o.yg)("inlineCode",{parentName:"li"},"website/docs"),(0,o.yg)("ul",{parentName:"li"},(0,o.yg)("li",{parentName:"ul"},"Left docs menu defined in ",(0,o.yg)("inlineCode",{parentName:"li"},"website/sidebars.json")," "))),(0,o.yg)("li",{parentName:"ul"},"Blog articles as markdown files in ",(0,o.yg)("inlineCode",{parentName:"li"},"website/docs")),(0,o.yg)("li",{parentName:"ul"},"Index and contribute pages in ",(0,o.yg)("inlineCode",{parentName:"li"},"website/src/pages")),(0,o.yg)("li",{parentName:"ul"},"Images in ",(0,o.yg)("inlineCode",{parentName:"li"},"website/src/static/img")),(0,o.yg)("li",{parentName:"ul"},"Website configuration file in ",(0,o.yg)("inlineCode",{parentName:"li"},"website/docusaurus.config.js")," ")),(0,o.yg)("h3",{id:"-markdown-tip"},"\ud83e\udd84 Markdown tip"),(0,o.yg)("admonition",{title:"Colorful boxes",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"Use the following tags to create colorful boxes in markdown files:")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-markdown"},":::note You can specify an optional title\nGrey box\n:::\n\n:::tip Green box\nThe content and title *can* include markdown.\n:::\n\n:::info Blue box\nUseful information.\n:::\n\n:::caution Be careful!\nYellow box\n:::\n\n:::danger Fire red box\nDanger danger, mayday!\n:::\n")),(0,o.yg)("h2",{id:"\ufe0f-pull-request-process"},"\u2714\ufe0f Pull Request process"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},"Before sending a pull request make sure the DSRI documentation website still work as expected with the new changes properly integrated:")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"cd website\nyarn install\nyarn start\n")),(0,o.yg)("ol",{start:2},(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/compare"},"Send a pull request")," to the ",(0,o.yg)("inlineCode",{parentName:"li"},"master")," branch."),(0,o.yg)("li",{parentName:"ol"},"Project contributors will review your change as soon as they can!")))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/521adc3d.4acaee7f.js b/assets/js/521adc3d.4741c575.js similarity index 99% rename from assets/js/521adc3d.4acaee7f.js rename to assets/js/521adc3d.4741c575.js index 8d72eba37..227ba5775 100644 --- a/assets/js/521adc3d.4acaee7f.js +++ b/assets/js/521adc3d.4741c575.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7234],{5680:(e,t,a)=>{a.d(t,{xA:()=>c,yg:()=>y});var r=a(6540);function n(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,r)}return a}function p(e){for(var t=1;t=0||(n[a]=e[a]);return n}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(n[a]=e[a])}return n}var s=r.createContext({}),l=function(e){var t=r.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):p(p({},t),e)),a},c=function(e){var t=l(e.components);return r.createElement(s.Provider,{value:t},e.children)},g={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},m=r.forwardRef((function(e,t){var a=e.components,n=e.mdxType,o=e.originalType,s=e.parentName,c=i(e,["components","mdxType","originalType","parentName"]),m=l(a),y=n,d=m["".concat(s,".").concat(y)]||m[y]||g[y]||o;return a?r.createElement(d,p(p({ref:t},c),{},{components:a})):r.createElement(d,p({ref:t},c))}));function y(e,t){var a=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var o=a.length,p=new Array(o);p[0]=m;var i={};for(var s in t)hasOwnProperty.call(t,s)&&(i[s]=t[s]);i.originalType=e,i.mdxType="string"==typeof e?e:n,p[1]=i;for(var l=2;l{a.r(t),a.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>y,frontMatter:()=>i,metadata:()=>l,toc:()=>g});var r=a(9668),n=a(1367),o=(a(6540),a(5680)),p=["components"],i={id:"project-management",title:"Create a new Project"},s=void 0,l={unversionedId:"project-management",id:"project-management",title:"Create a new Project",description:"Create a project using the web UI",source:"@site/docs/create-dsri-project.md",sourceDirName:".",slug:"/project-management",permalink:"/docs/project-management",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/create-dsri-project.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"project-management",title:"Create a new Project"},sidebar:"docs",previous:{title:"Delete objects (advanced)",permalink:"/docs/openshift-delete-objects"},next:{title:"Libraries for Machine Learning",permalink:"/docs/tools-machine-learning"}},c={},g=[{value:"Create a project using the web UI",id:"create-a-project-using-the-web-ui",level:2},{value:"Create a project using the CLI",id:"create-a-project-using-the-cli",level:2},{value:"Access permissions for developers to your project",id:"access-permissions-for-developers-to-your-project",level:2},{value:"Delete a project using the web UI",id:"delete-a-project-using-the-web-ui",level:2},{value:"Delete a project using the CLI",id:"delete-a-project-using-the-cli",level:2}],m={toc:g};function y(e){var t=e.components,a=(0,n.A)(e,p);return(0,o.yg)("wrapper",(0,r.A)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"create-a-project-using-the-web-ui"},"Create a project using the web UI"),(0,o.yg)("admonition",{title:"Avoid creating multiple projects",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"Try to avoid to create multiple projects for nothing please. Be responsible and delete applications you are not using anymore in your project to free resources, instead of creating a new project with a different number at the end."),(0,o.yg)("p",{parentName:"admonition"},"It is also easier to connect your different applications containers and storages when you create them in the same project.")),(0,o.yg)("p",null,"You can create a project using the ",(0,o.yg)("strong",{parentName:"p"},"Developer")," perspective, as follows:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click the ",(0,o.yg)("strong",{parentName:"p"},"Project")," drop-down menu to see a list of all available projects. Select ",(0,o.yg)("strong",{parentName:"p"},"Create Project"),".")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"In the ",(0,o.yg)("strong",{parentName:"p"},"Create Project")," dialog box, enter a unique name in the ",(0,o.yg)("strong",{parentName:"p"},"Name")," field. Use a short and meaningful name for your project as the project identifier is unique across all projects, such as ",(0,o.yg)("inlineCode",{parentName:"p"},"workspace-yourname")," or ",(0,o.yg)("inlineCode",{parentName:"p"},"ml-covid-pathways"))),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Add the ",(0,o.yg)("strong",{parentName:"p"},"Display Name")," ",(0,o.yg)("inlineCode",{parentName:"p"}," DSR Workshop"),"and ",(0,o.yg)("strong",{parentName:"p"},"Description")," ",(0,o.yg)("inlineCode",{parentName:"p"}," DSRI Community Workshop Projects"),"details for the project.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click ",(0,o.yg)("strong",{parentName:"p"},"Create"),".")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Use the left navigation panel to navigate to the ",(0,o.yg)("strong",{parentName:"p"},"Project")," view and see the dashboard for your project."))),(0,o.yg)("img",{src:"/img/screenshot_create_project.png",alt:"Create Project",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("ol",{start:6},(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Optional:"),(0,o.yg)("ul",{parentName:"li"},(0,o.yg)("li",{parentName:"ul"},"Use the ",(0,o.yg)("strong",{parentName:"li"},"Project")," drop-down menu at the top of the screen and select ",(0,o.yg)("strong",{parentName:"li"},"all projects")," to list all of the projects in your cluster."),(0,o.yg)("li",{parentName:"ul"},"Use the ",(0,o.yg)("strong",{parentName:"li"},"Details")," tab to see the project details."),(0,o.yg)("li",{parentName:"ul"},"If you have adequate permissions for a project, you can use the ",(0,o.yg)("strong",{parentName:"li"},"Project Access")," tab to provide or revoke ",(0,o.yg)("em",{parentName:"li"},"administrator"),", ",(0,o.yg)("em",{parentName:"li"},"edit"),", and ",(0,o.yg)("em",{parentName:"li"},"view")," privileges for the project.")))),(0,o.yg)("h2",{id:"create-a-project-using-the-cli"},"Create a project using the CLI"),(0,o.yg)("p",null,"You need to be logged in to the DSRI and ",(0,o.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/openshift-install#login-in-the-terminal-with-oc"},"copy the login command"),"."),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"Run"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},'oc new-project --description="" --display-name=""\n'))),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"Example"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},'oc new-project dsri-workshop --description="DSRI Workshop" \\\n --display-name="DSRI Community Workshop Projects"\n')))),(0,o.yg)("admonition",{title:"Reuse your project",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"Only create new projects when it is necessary (for a new project). You can easily ",(0,o.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/project-management#delete-a-project-using-the-web-ui"},"clean up your current project")," instead of creating a new one every time you want to try something.")),(0,o.yg)("h2",{id:"access-permissions-for-developers-to-your-project"},"Access permissions for developers to your project"),(0,o.yg)("p",null,"You can use the ",(0,o.yg)("strong",{parentName:"p"},"Project")," view in the ",(0,o.yg)("strong",{parentName:"p"},"Developer")," perspective to grant or revoke access permissions to your project."),(0,o.yg)("p",null,"To add users to your project and provide ",(0,o.yg)("strong",{parentName:"p"},"Admin"),", ",(0,o.yg)("strong",{parentName:"p"},"Edit"),", or ",(0,o.yg)("strong",{parentName:"p"},"View")," access to them:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"In the ",(0,o.yg)("strong",{parentName:"p"},"Developer")," perspective, navigate to the ",(0,o.yg)("strong",{parentName:"p"},"Project")," view.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"In the ",(0,o.yg)("strong",{parentName:"p"},"Project")," page, select the ",(0,o.yg)("strong",{parentName:"p"},"Project Access")," tab.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click ",(0,o.yg)("strong",{parentName:"p"},"Add Access")," to add a new row of permissions to the default ones."),(0,o.yg)("img",{src:"/img/screenshot_project_access.png",alt:"Project Access",style:{maxWidth:"100%",maxHeight:"100%"}}))),(0,o.yg)("ol",{start:4},(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Enter the user name, click the ",(0,o.yg)("strong",{parentName:"p"},"Select a role")," drop-down list, and select an appropriate role.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click ",(0,o.yg)("strong",{parentName:"p"},"Save")," to add the new permissions."))),(0,o.yg)("p",null,"You can also use:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"The ",(0,o.yg)("strong",{parentName:"p"},"Select a role")," drop-down list, to modify the access permissions of an existing user.")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"The ",(0,o.yg)("strong",{parentName:"p"},"Remove Access")," icon, to completely remove the access permissions of an existing user to the project."))),(0,o.yg)("admonition",{type:"info"},(0,o.yg)("p",{parentName:"admonition"}," Advanced role-based access control is managed in the ",(0,o.yg)("strong",{parentName:"p"},"Roles")," and ",(0,o.yg)("strong",{parentName:"p"},"Roles Binding")," views in the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," perspective")),(0,o.yg)("h2",{id:"delete-a-project-using-the-web-ui"},"Delete a project using the web UI"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Navigate to ",(0,o.yg)("strong",{parentName:"p"},"Home")," \u2192 ",(0,o.yg)("strong",{parentName:"p"},"Projects"),".")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Locate the project that you want to delete from the list of projects.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"On the far right side of the project listing, select ",(0,o.yg)("strong",{parentName:"p"},"Delete Project")," from the Options menu ",(0,o.yg)("img",{parentName:"p",src:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABsAAAAjCAIAAADqn+bCAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAA+0lEQVRIie2WMQqEMBBFJ47gUXRBLyBYqbUXULCx9CR2XsAb6AlUEM9kpckW7obdZhwWYWHXX/3i8TPJZEKEUgpOlXFu3JX4V4kmB2qaZhgGKSUiZlkWxzEBC84N9zxv27bdO47Tti0Bs3at4wBgXVca/lJnfN/XPggCGmadIwAsywIAiGhZFk1ydy2EYJKgGCqK4vZUVVU0zKpxnmftp2mi4S/1GhG1N82DMWNNYVmW4zgqpRAxTVMa5t4evlg11nXd9/1eY57nSZIQMKtG13WllLu3bbvrOgJmdUbHwfur8Xniqw6Hh5UYRdGDNowwDA+WvP4UV+JPJ94B1gKUWcTOCT0AAAAASUVORK5CYII=",alt:"kebab"}),".")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"When the ",(0,o.yg)("strong",{parentName:"p"},"Delete Project")," pane opens, enter the name of the project that you want to delete in the field.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click ",(0,o.yg)("strong",{parentName:"p"},"Delete"),"."),(0,o.yg)("img",{src:"/img/screenshot_delete_project.png",alt:"Delete Project",style:{maxWidth:"100%",maxHeight:"100%"}}))),(0,o.yg)("h2",{id:"delete-a-project-using-the-cli"},"Delete a project using the CLI"),(0,o.yg)("admonition",{title:"Delete Project",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"When you delete a project, the server updates the project status to ",(0,o.yg)("strong",{parentName:"p"},"Terminating")," from ",(0,o.yg)("strong",{parentName:"p"},"Active"),". Then, the server clears all content from a project that is in the ",(0,o.yg)("strong",{parentName:"p"},"Terminating")," state before finally removing the project. While a project is in ",(0,o.yg)("strong",{parentName:"p"},"Terminating")," status, you cannot add new content to the project. Projects can be deleted from the CLI or the web console.")),(0,o.yg)("p",null,"You need to be logged in to the DSRI and ",(0,o.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/openshift-install#login-in-the-terminal-with-oc"},"copy the login command"),"."),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"Run"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete project \n")))),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"Example"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete project dsri-workshop\n")))))}y.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7234],{5680:(e,t,a)=>{a.d(t,{xA:()=>c,yg:()=>y});var r=a(6540);function n(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,r)}return a}function p(e){for(var t=1;t=0||(n[a]=e[a]);return n}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(n[a]=e[a])}return n}var s=r.createContext({}),l=function(e){var t=r.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):p(p({},t),e)),a},c=function(e){var t=l(e.components);return r.createElement(s.Provider,{value:t},e.children)},g={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},m=r.forwardRef((function(e,t){var a=e.components,n=e.mdxType,o=e.originalType,s=e.parentName,c=i(e,["components","mdxType","originalType","parentName"]),m=l(a),y=n,d=m["".concat(s,".").concat(y)]||m[y]||g[y]||o;return a?r.createElement(d,p(p({ref:t},c),{},{components:a})):r.createElement(d,p({ref:t},c))}));function y(e,t){var a=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var o=a.length,p=new Array(o);p[0]=m;var i={};for(var s in t)hasOwnProperty.call(t,s)&&(i[s]=t[s]);i.originalType=e,i.mdxType="string"==typeof e?e:n,p[1]=i;for(var l=2;l{a.r(t),a.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>y,frontMatter:()=>i,metadata:()=>l,toc:()=>g});var r=a(9668),n=a(1367),o=(a(6540),a(5680)),p=["components"],i={id:"project-management",title:"Create a new Project"},s=void 0,l={unversionedId:"project-management",id:"project-management",title:"Create a new Project",description:"Create a project using the web UI",source:"@site/docs/create-dsri-project.md",sourceDirName:".",slug:"/project-management",permalink:"/docs/project-management",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/create-dsri-project.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"project-management",title:"Create a new Project"},sidebar:"docs",previous:{title:"Delete objects (advanced)",permalink:"/docs/openshift-delete-objects"},next:{title:"Libraries for Machine Learning",permalink:"/docs/tools-machine-learning"}},c={},g=[{value:"Create a project using the web UI",id:"create-a-project-using-the-web-ui",level:2},{value:"Create a project using the CLI",id:"create-a-project-using-the-cli",level:2},{value:"Access permissions for developers to your project",id:"access-permissions-for-developers-to-your-project",level:2},{value:"Delete a project using the web UI",id:"delete-a-project-using-the-web-ui",level:2},{value:"Delete a project using the CLI",id:"delete-a-project-using-the-cli",level:2}],m={toc:g};function y(e){var t=e.components,a=(0,n.A)(e,p);return(0,o.yg)("wrapper",(0,r.A)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"create-a-project-using-the-web-ui"},"Create a project using the web UI"),(0,o.yg)("admonition",{title:"Avoid creating multiple projects",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"Try to avoid to create multiple projects for nothing please. Be responsible and delete applications you are not using anymore in your project to free resources, instead of creating a new project with a different number at the end."),(0,o.yg)("p",{parentName:"admonition"},"It is also easier to connect your different applications containers and storages when you create them in the same project.")),(0,o.yg)("p",null,"You can create a project using the ",(0,o.yg)("strong",{parentName:"p"},"Developer")," perspective, as follows:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click the ",(0,o.yg)("strong",{parentName:"p"},"Project")," drop-down menu to see a list of all available projects. Select ",(0,o.yg)("strong",{parentName:"p"},"Create Project"),".")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"In the ",(0,o.yg)("strong",{parentName:"p"},"Create Project")," dialog box, enter a unique name in the ",(0,o.yg)("strong",{parentName:"p"},"Name")," field. Use a short and meaningful name for your project as the project identifier is unique across all projects, such as ",(0,o.yg)("inlineCode",{parentName:"p"},"workspace-yourname")," or ",(0,o.yg)("inlineCode",{parentName:"p"},"ml-covid-pathways"))),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Add the ",(0,o.yg)("strong",{parentName:"p"},"Display Name")," ",(0,o.yg)("inlineCode",{parentName:"p"}," DSR Workshop"),"and ",(0,o.yg)("strong",{parentName:"p"},"Description")," ",(0,o.yg)("inlineCode",{parentName:"p"}," DSRI Community Workshop Projects"),"details for the project.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click ",(0,o.yg)("strong",{parentName:"p"},"Create"),".")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Use the left navigation panel to navigate to the ",(0,o.yg)("strong",{parentName:"p"},"Project")," view and see the dashboard for your project."))),(0,o.yg)("img",{src:"/img/screenshot_create_project.png",alt:"Create Project",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("ol",{start:6},(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Optional:"),(0,o.yg)("ul",{parentName:"li"},(0,o.yg)("li",{parentName:"ul"},"Use the ",(0,o.yg)("strong",{parentName:"li"},"Project")," drop-down menu at the top of the screen and select ",(0,o.yg)("strong",{parentName:"li"},"all projects")," to list all of the projects in your cluster."),(0,o.yg)("li",{parentName:"ul"},"Use the ",(0,o.yg)("strong",{parentName:"li"},"Details")," tab to see the project details."),(0,o.yg)("li",{parentName:"ul"},"If you have adequate permissions for a project, you can use the ",(0,o.yg)("strong",{parentName:"li"},"Project Access")," tab to provide or revoke ",(0,o.yg)("em",{parentName:"li"},"administrator"),", ",(0,o.yg)("em",{parentName:"li"},"edit"),", and ",(0,o.yg)("em",{parentName:"li"},"view")," privileges for the project.")))),(0,o.yg)("h2",{id:"create-a-project-using-the-cli"},"Create a project using the CLI"),(0,o.yg)("p",null,"You need to be logged in to the DSRI and ",(0,o.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/openshift-install#login-in-the-terminal-with-oc"},"copy the login command"),"."),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"Run"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},'oc new-project --description="" --display-name=""\n'))),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"Example"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},'oc new-project dsri-workshop --description="DSRI Workshop" \\\n --display-name="DSRI Community Workshop Projects"\n')))),(0,o.yg)("admonition",{title:"Reuse your project",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"Only create new projects when it is necessary (for a new project). You can easily ",(0,o.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/project-management#delete-a-project-using-the-web-ui"},"clean up your current project")," instead of creating a new one every time you want to try something.")),(0,o.yg)("h2",{id:"access-permissions-for-developers-to-your-project"},"Access permissions for developers to your project"),(0,o.yg)("p",null,"You can use the ",(0,o.yg)("strong",{parentName:"p"},"Project")," view in the ",(0,o.yg)("strong",{parentName:"p"},"Developer")," perspective to grant or revoke access permissions to your project."),(0,o.yg)("p",null,"To add users to your project and provide ",(0,o.yg)("strong",{parentName:"p"},"Admin"),", ",(0,o.yg)("strong",{parentName:"p"},"Edit"),", or ",(0,o.yg)("strong",{parentName:"p"},"View")," access to them:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"In the ",(0,o.yg)("strong",{parentName:"p"},"Developer")," perspective, navigate to the ",(0,o.yg)("strong",{parentName:"p"},"Project")," view.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"In the ",(0,o.yg)("strong",{parentName:"p"},"Project")," page, select the ",(0,o.yg)("strong",{parentName:"p"},"Project Access")," tab.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click ",(0,o.yg)("strong",{parentName:"p"},"Add Access")," to add a new row of permissions to the default ones."),(0,o.yg)("img",{src:"/img/screenshot_project_access.png",alt:"Project Access",style:{maxWidth:"100%",maxHeight:"100%"}}))),(0,o.yg)("ol",{start:4},(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Enter the user name, click the ",(0,o.yg)("strong",{parentName:"p"},"Select a role")," drop-down list, and select an appropriate role.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click ",(0,o.yg)("strong",{parentName:"p"},"Save")," to add the new permissions."))),(0,o.yg)("p",null,"You can also use:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"The ",(0,o.yg)("strong",{parentName:"p"},"Select a role")," drop-down list, to modify the access permissions of an existing user.")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"The ",(0,o.yg)("strong",{parentName:"p"},"Remove Access")," icon, to completely remove the access permissions of an existing user to the project."))),(0,o.yg)("admonition",{type:"info"},(0,o.yg)("p",{parentName:"admonition"}," Advanced role-based access control is managed in the ",(0,o.yg)("strong",{parentName:"p"},"Roles")," and ",(0,o.yg)("strong",{parentName:"p"},"Roles Binding")," views in the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," perspective")),(0,o.yg)("h2",{id:"delete-a-project-using-the-web-ui"},"Delete a project using the web UI"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Navigate to ",(0,o.yg)("strong",{parentName:"p"},"Home")," \u2192 ",(0,o.yg)("strong",{parentName:"p"},"Projects"),".")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Locate the project that you want to delete from the list of projects.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"On the far right side of the project listing, select ",(0,o.yg)("strong",{parentName:"p"},"Delete Project")," from the Options menu ",(0,o.yg)("img",{parentName:"p",src:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABsAAAAjCAIAAADqn+bCAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAA+0lEQVRIie2WMQqEMBBFJ47gUXRBLyBYqbUXULCx9CR2XsAb6AlUEM9kpckW7obdZhwWYWHXX/3i8TPJZEKEUgpOlXFu3JX4V4kmB2qaZhgGKSUiZlkWxzEBC84N9zxv27bdO47Tti0Bs3at4wBgXVca/lJnfN/XPggCGmadIwAsywIAiGhZFk1ydy2EYJKgGCqK4vZUVVU0zKpxnmftp2mi4S/1GhG1N82DMWNNYVmW4zgqpRAxTVMa5t4evlg11nXd9/1eY57nSZIQMKtG13WllLu3bbvrOgJmdUbHwfur8Xniqw6Hh5UYRdGDNowwDA+WvP4UV+JPJ94B1gKUWcTOCT0AAAAASUVORK5CYII=",alt:"kebab"}),".")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"When the ",(0,o.yg)("strong",{parentName:"p"},"Delete Project")," pane opens, enter the name of the project that you want to delete in the field.")),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("p",{parentName:"li"},"Click ",(0,o.yg)("strong",{parentName:"p"},"Delete"),"."),(0,o.yg)("img",{src:"/img/screenshot_delete_project.png",alt:"Delete Project",style:{maxWidth:"100%",maxHeight:"100%"}}))),(0,o.yg)("h2",{id:"delete-a-project-using-the-cli"},"Delete a project using the CLI"),(0,o.yg)("admonition",{title:"Delete Project",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"When you delete a project, the server updates the project status to ",(0,o.yg)("strong",{parentName:"p"},"Terminating")," from ",(0,o.yg)("strong",{parentName:"p"},"Active"),". Then, the server clears all content from a project that is in the ",(0,o.yg)("strong",{parentName:"p"},"Terminating")," state before finally removing the project. While a project is in ",(0,o.yg)("strong",{parentName:"p"},"Terminating")," status, you cannot add new content to the project. Projects can be deleted from the CLI or the web console.")),(0,o.yg)("p",null,"You need to be logged in to the DSRI and ",(0,o.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/openshift-install#login-in-the-terminal-with-oc"},"copy the login command"),"."),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"Run"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete project \n")))),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"Example"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete project dsri-workshop\n")))))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/5514662e.9a79f6b2.js b/assets/js/5514662e.7f729865.js similarity index 98% rename from assets/js/5514662e.9a79f6b2.js rename to assets/js/5514662e.7f729865.js index 634d5fa04..5eebf3bde 100644 --- a/assets/js/5514662e.9a79f6b2.js +++ b/assets/js/5514662e.7f729865.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2865],{5680:(e,t,a)=>{a.d(t,{xA:()=>s,yg:()=>d});var n=a(6540);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function c(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var l=n.createContext({}),p=function(e){var t=n.useContext(l),a=t;return e&&(a="function"==typeof e?e(t):c(c({},t),e)),a},s=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},u=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,o=e.originalType,l=e.parentName,s=i(e,["components","mdxType","originalType","parentName"]),u=p(a),d=r,g=u["".concat(l,".").concat(d)]||u[d]||m[d]||o;return a?n.createElement(g,c(c({ref:t},s),{},{components:a})):n.createElement(g,c({ref:t},s))}));function d(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=a.length,c=new Array(o);c[0]=u;var i={};for(var l in t)hasOwnProperty.call(t,l)&&(i[l]=t[l]);i.originalType=e,i.mdxType="string"==typeof e?e:r,c[1]=i;for(var p=2;p{a.r(t),a.d(t,{assets:()=>s,contentTitle:()=>l,default:()=>d,frontMatter:()=>i,metadata:()=>p,toc:()=>m});var n=a(9668),r=a(1367),o=(a(6540),a(5680)),c=["components"],i={id:"catalog-data-streaming",title:"Data streaming"},l=void 0,p={unversionedId:"catalog-data-streaming",id:"catalog-data-streaming",title:"Data streaming",description:"Apache Flink",source:"@site/docs/catalog-data-streaming.md",sourceDirName:".",slug:"/catalog-data-streaming",permalink:"/docs/catalog-data-streaming",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/catalog-data-streaming.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"catalog-data-streaming",title:"Data streaming"}},s={},m=[{value:"Apache Flink",id:"apache-flink",level:2}],u={toc:m};function d(e){var t=e.components,a=(0,r.A)(e,c);return(0,o.yg)("wrapper",(0,n.A)({},u,a,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"apache-flink"},"Apache Flink"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://flink.apache.org/"},"Apache Flink")," enables processing of Data Streams using languages such as Java or Scala ."),(0,o.yg)("admonition",{title:"Root permission required",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"\ud83d\udd12 You need root containers enabled (aka. anyuid) in your project to start this application.")),(0,o.yg)("p",null,"Create the Apache Flink template in your project using ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/vemonet/flink-on-openshift"},"vemonet/flink-on-openshift")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"oc apply -f https://raw.githubusercontent.com/vemonet/flink-on-openshift/master/template-flink-dsri.yml\n")),(0,o.yg)("p",null,"Use the template to start the cluster from the catalog."),(0,o.yg)("p",null,"Use this command to get the Flink Jobmanager pod id and copy file to the pod."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name\n\n# Example creating the workspace folder and copying the RMLStreamer.jar to the pod\noc exec -- mkdir -p /mnt/workspace/resources\noc cp workspace/resources/RMLStreamer.jar :/mnt/\n")),(0,o.yg)("p",null,"Delete the Apache Flink cluster (change the application name):"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=flink-cluster\n")))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2865],{5680:(e,t,a)=>{a.d(t,{xA:()=>s,yg:()=>d});var n=a(6540);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function c(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var l=n.createContext({}),p=function(e){var t=n.useContext(l),a=t;return e&&(a="function"==typeof e?e(t):c(c({},t),e)),a},s=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},u=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,o=e.originalType,l=e.parentName,s=i(e,["components","mdxType","originalType","parentName"]),u=p(a),d=r,g=u["".concat(l,".").concat(d)]||u[d]||m[d]||o;return a?n.createElement(g,c(c({ref:t},s),{},{components:a})):n.createElement(g,c({ref:t},s))}));function d(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=a.length,c=new Array(o);c[0]=u;var i={};for(var l in t)hasOwnProperty.call(t,l)&&(i[l]=t[l]);i.originalType=e,i.mdxType="string"==typeof e?e:r,c[1]=i;for(var p=2;p{a.r(t),a.d(t,{assets:()=>s,contentTitle:()=>l,default:()=>d,frontMatter:()=>i,metadata:()=>p,toc:()=>m});var n=a(9668),r=a(1367),o=(a(6540),a(5680)),c=["components"],i={id:"catalog-data-streaming",title:"Data streaming"},l=void 0,p={unversionedId:"catalog-data-streaming",id:"catalog-data-streaming",title:"Data streaming",description:"Apache Flink",source:"@site/docs/catalog-data-streaming.md",sourceDirName:".",slug:"/catalog-data-streaming",permalink:"/docs/catalog-data-streaming",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/catalog-data-streaming.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"catalog-data-streaming",title:"Data streaming"}},s={},m=[{value:"Apache Flink",id:"apache-flink",level:2}],u={toc:m};function d(e){var t=e.components,a=(0,r.A)(e,c);return(0,o.yg)("wrapper",(0,n.A)({},u,a,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"apache-flink"},"Apache Flink"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://flink.apache.org/"},"Apache Flink")," enables processing of Data Streams using languages such as Java or Scala ."),(0,o.yg)("admonition",{title:"Root permission required",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"\ud83d\udd12 You need root containers enabled (aka. anyuid) in your project to start this application.")),(0,o.yg)("p",null,"Create the Apache Flink template in your project using ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/vemonet/flink-on-openshift"},"vemonet/flink-on-openshift")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"oc apply -f https://raw.githubusercontent.com/vemonet/flink-on-openshift/master/template-flink-dsri.yml\n")),(0,o.yg)("p",null,"Use the template to start the cluster from the catalog."),(0,o.yg)("p",null,"Use this command to get the Flink Jobmanager pod id and copy file to the pod."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name\n\n# Example creating the workspace folder and copying the RMLStreamer.jar to the pod\noc exec -- mkdir -p /mnt/workspace/resources\noc cp workspace/resources/RMLStreamer.jar :/mnt/\n")),(0,o.yg)("p",null,"Delete the Apache Flink cluster (change the application name):"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=flink-cluster\n")))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/5890eb41.464e3bf2.js b/assets/js/5890eb41.bc2091d8.js similarity index 99% rename from assets/js/5890eb41.464e3bf2.js rename to assets/js/5890eb41.bc2091d8.js index 564c298cb..8b4962dd5 100644 --- a/assets/js/5890eb41.464e3bf2.js +++ b/assets/js/5890eb41.bc2091d8.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[8603],{5680:(e,t,n)=>{n.d(t,{xA:()=>c,yg:()=>g});var a=n(6540);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function o(e){for(var t=1;t=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var s=a.createContext({}),p=function(e){var t=a.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},c=function(e){var t=p(e.components);return a.createElement(s.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},y=a.forwardRef((function(e,t){var n=e.components,i=e.mdxType,r=e.originalType,s=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),y=p(n),g=i,m=y["".concat(s,".").concat(g)]||y[g]||u[g]||r;return n?a.createElement(m,o(o({ref:t},c),{},{components:n})):a.createElement(m,o({ref:t},c))}));function g(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var r=n.length,o=new Array(r);o[0]=y;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:i,o[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>g,frontMatter:()=>l,metadata:()=>p,toc:()=>u});var a=n(9668),i=n(1367),r=(n(6540),n(5680)),o=["components"],l={id:"guide-vpn",title:"Install UM VPN"},s=void 0,p={unversionedId:"guide-vpn",id:"guide-vpn",title:"Install UM VPN",description:"Request an account",source:"@site/docs/guide-vpn.md",sourceDirName:".",slug:"/guide-vpn",permalink:"/docs/guide-vpn",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-vpn.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-vpn",title:"Install UM VPN"},sidebar:"docs",previous:{title:"Publish a Docker image",permalink:"/docs/guide-publish-image"},next:{title:"Prepare a workshop",permalink:"/docs/guide-workshop"}},c={},u=[{value:"Request an account",id:"request-an-account",level:2},{value:"Connect to the UM network",id:"connect-to-the-um-network",level:2},{value:"Install the VPN (AnyConnect Secure Mobility Client) on Windows",id:"install-the-vpn-anyconnect-secure-mobility-client-on-windows",level:4},{value:"Log in to the VPN (AnyConnect Secure Mobility Client)",id:"log-in-to-the-vpn-anyconnect-secure-mobility-client",level:4},{value:"Install the VPN (AnyConnect Secure Mobility Client) on Linux",id:"install-the-vpn-anyconnect-secure-mobility-client-on-linux",level:4}],y={toc:u};function g(e){var t=e.components,n=(0,i.A)(e,o);return(0,r.yg)("wrapper",(0,a.A)({},y,n,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("h2",{id:"request-an-account"},"Request an account"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"You will need to have an account at Maastricht University with an email ending with ",(0,r.yg)("inlineCode",{parentName:"p"},"@maastrichtuniversity.nl")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"@student.maastrichtuniversity.nl"),".")),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Request access to the DSRI for your account Please fill this ",(0,r.yg)("a",{parentName:"p",href:"/register"},"form \ud83d\udcec"),". to provide us some information on what you plan to do with the DSRI."))),(0,r.yg)("h2",{id:"connect-to-the-um-network"},"Connect to the UM network"),(0,r.yg)("p",null,"You need to be connected to the UM network to access the DSRI."),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Connect to ",(0,r.yg)("strong",{parentName:"p"},"UMnet")," or ",(0,r.yg)("strong",{parentName:"p"},"eduroam")," WiFi at Maastricht University")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Use the ",(0,r.yg)("strong",{parentName:"p"},"Maastricht University VPN")," at ",(0,r.yg)("strong",{parentName:"p"},(0,r.yg)("a",{parentName:"strong",href:"https://vpn.maastrichtuniversity.nl/"},"vpn.maastrichtuniversity.nl"))),(0,r.yg)("p",{parentName:"li"},"Log in to that using your UM username and password."))),(0,r.yg)("admonition",{title:"Students",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"By default the UM VPN is only available to employees. As a student you can access UM resources from any location via ",(0,r.yg)("a",{parentName:"p",href:"https://athenadesktop.maastrichtuniversity.nl"},"Student Desktop Anywhere"),".\u202fHowever, if VPN access is absolutely necessary you can request access via your course coordinator. ")),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"The ",(0,r.yg)("strong",{parentName:"p"},"prefix of your UM email address")," with the first letter capitalized, e.g. ",(0,r.yg)("inlineCode",{parentName:"p"},"Firstname.Lastname")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"F.Lastname"),"Or your ",(0,r.yg)("strong",{parentName:"p"},"employee number")," at Maastricht University (a.k.a. P number), e.g. ",(0,r.yg)("inlineCode",{parentName:"p"},"P7000000")),(0,r.yg)("img",{src:"/img/vpn-login.png",alt:"VPN Log in View",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",{parentName:"li"},"Then You will see below page to download the ",(0,r.yg)("strong",{parentName:"p"},"AnyConnect Secure Mobility Client")),(0,r.yg)("img",{src:"/img/VPN-anyconnect.png",alt:"Download AnyConnect Secure Mobility Client",style:{maxWidth:"100%",maxHeight:"100%"}}))),(0,r.yg)("h4",{id:"install-the-vpn-anyconnect-secure-mobility-client-on-windows"},"Install the VPN (AnyConnect Secure Mobility Client) on Windows"),(0,r.yg)("p",null,"Double click on the ",(0,r.yg)("inlineCode",{parentName:"p"},".exe")," file to install the VPN."),(0,r.yg)("p",null,"You can follow below steps as in pictures."),(0,r.yg)("img",{src:"/img/vpnsetup1.png",alt:"Install VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("img",{src:"/img/vpnsetup2.png",alt:"Install VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("img",{src:"/img/vpnsetup3.png",alt:"Install VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("img",{src:"/img/vpnsetup4.png",alt:"Install VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("h4",{id:"log-in-to-the-vpn-anyconnect-secure-mobility-client"},"Log in to the VPN (AnyConnect Secure Mobility Client)"),(0,r.yg)("p",null,"Once you finish installing you can run the Cisco AnyConnect Secure Mobility Client. "),(0,r.yg)("img",{src:"/img/vpnsetup7.png",alt:"Log in to the VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"Then after you will get the bellow wizard and click connect"),(0,r.yg)("img",{src:"/img/vpnsetup5.png",alt:"Log in to the VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"Provide your UM username and password. (",(0,r.yg)("strong",{parentName:"p"},"employee number")," at Maastricht University (a.k.a. P number), e.g. ",(0,r.yg)("inlineCode",{parentName:"p"},"P7000000"),")"),(0,r.yg)("img",{src:"/img/vpnsetup6.png",alt:"Log in to the VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("h4",{id:"install-the-vpn-anyconnect-secure-mobility-client-on-linux"},"Install the VPN (AnyConnect Secure Mobility Client) on Linux"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Connect to ",(0,r.yg)("strong",{parentName:"p"},"UMnet")," or ",(0,r.yg)("strong",{parentName:"p"},"eduroam")," WiFi at Maastricht University")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"For ",(0,r.yg)("strong",{parentName:"p"},"Linux"),", use ",(0,r.yg)("inlineCode",{parentName:"p"},"openconnect")," to connect to the UM VPN. You can easily install it on Ubuntu and Debian distributions with ",(0,r.yg)("inlineCode",{parentName:"p"},"apt"),":"),(0,r.yg)("pre",{parentName:"li"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"sudo apt install openconnect\nsudo openconnect -u YOUR.USER --authgroup 01-Employees --useragent=AnyConnect vpn.maastrichtuniversity.nl\n")),(0,r.yg)("blockquote",{parentName:"li"},(0,r.yg)("p",{parentName:"blockquote"},"Provide your UM password when prompted."))),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"For ",(0,r.yg)("strong",{parentName:"p"},"students"),":"),(0,r.yg)("ul",{parentName:"li"},(0,r.yg)("li",{parentName:"ul"},"By default the UM VPN is only available to employees. As a student you can access UM resources from any location via ",(0,r.yg)("a",{parentName:"li",href:"https://athenadesktop.maastrichtuniversity.nl"},"Student Desktop Anywhere"),".\u202fHowever, if VPN access is absolutely necessary you can request access via your course coordinator.")))))}g.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[8603],{5680:(e,t,n)=>{n.d(t,{xA:()=>c,yg:()=>g});var a=n(6540);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function o(e){for(var t=1;t=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var s=a.createContext({}),p=function(e){var t=a.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},c=function(e){var t=p(e.components);return a.createElement(s.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},y=a.forwardRef((function(e,t){var n=e.components,i=e.mdxType,r=e.originalType,s=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),y=p(n),g=i,m=y["".concat(s,".").concat(g)]||y[g]||u[g]||r;return n?a.createElement(m,o(o({ref:t},c),{},{components:n})):a.createElement(m,o({ref:t},c))}));function g(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var r=n.length,o=new Array(r);o[0]=y;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:i,o[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>g,frontMatter:()=>l,metadata:()=>p,toc:()=>u});var a=n(9668),i=n(1367),r=(n(6540),n(5680)),o=["components"],l={id:"guide-vpn",title:"Install UM VPN"},s=void 0,p={unversionedId:"guide-vpn",id:"guide-vpn",title:"Install UM VPN",description:"Request an account",source:"@site/docs/guide-vpn.md",sourceDirName:".",slug:"/guide-vpn",permalink:"/docs/guide-vpn",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-vpn.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-vpn",title:"Install UM VPN"},sidebar:"docs",previous:{title:"Publish a Docker image",permalink:"/docs/guide-publish-image"},next:{title:"Prepare a workshop",permalink:"/docs/guide-workshop"}},c={},u=[{value:"Request an account",id:"request-an-account",level:2},{value:"Connect to the UM network",id:"connect-to-the-um-network",level:2},{value:"Install the VPN (AnyConnect Secure Mobility Client) on Windows",id:"install-the-vpn-anyconnect-secure-mobility-client-on-windows",level:4},{value:"Log in to the VPN (AnyConnect Secure Mobility Client)",id:"log-in-to-the-vpn-anyconnect-secure-mobility-client",level:4},{value:"Install the VPN (AnyConnect Secure Mobility Client) on Linux",id:"install-the-vpn-anyconnect-secure-mobility-client-on-linux",level:4}],y={toc:u};function g(e){var t=e.components,n=(0,i.A)(e,o);return(0,r.yg)("wrapper",(0,a.A)({},y,n,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("h2",{id:"request-an-account"},"Request an account"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"You will need to have an account at Maastricht University with an email ending with ",(0,r.yg)("inlineCode",{parentName:"p"},"@maastrichtuniversity.nl")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"@student.maastrichtuniversity.nl"),".")),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Request access to the DSRI for your account Please fill this ",(0,r.yg)("a",{parentName:"p",href:"/register"},"form \ud83d\udcec"),". to provide us some information on what you plan to do with the DSRI."))),(0,r.yg)("h2",{id:"connect-to-the-um-network"},"Connect to the UM network"),(0,r.yg)("p",null,"You need to be connected to the UM network to access the DSRI."),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Connect to ",(0,r.yg)("strong",{parentName:"p"},"UMnet")," or ",(0,r.yg)("strong",{parentName:"p"},"eduroam")," WiFi at Maastricht University")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Use the ",(0,r.yg)("strong",{parentName:"p"},"Maastricht University VPN")," at ",(0,r.yg)("strong",{parentName:"p"},(0,r.yg)("a",{parentName:"strong",href:"https://vpn.maastrichtuniversity.nl/"},"vpn.maastrichtuniversity.nl"))),(0,r.yg)("p",{parentName:"li"},"Log in to that using your UM username and password."))),(0,r.yg)("admonition",{title:"Students",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"By default the UM VPN is only available to employees. As a student you can access UM resources from any location via ",(0,r.yg)("a",{parentName:"p",href:"https://athenadesktop.maastrichtuniversity.nl"},"Student Desktop Anywhere"),".\u202fHowever, if VPN access is absolutely necessary you can request access via your course coordinator. ")),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"The ",(0,r.yg)("strong",{parentName:"p"},"prefix of your UM email address")," with the first letter capitalized, e.g. ",(0,r.yg)("inlineCode",{parentName:"p"},"Firstname.Lastname")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"F.Lastname"),"Or your ",(0,r.yg)("strong",{parentName:"p"},"employee number")," at Maastricht University (a.k.a. P number), e.g. ",(0,r.yg)("inlineCode",{parentName:"p"},"P7000000")),(0,r.yg)("img",{src:"/img/vpn-login.png",alt:"VPN Log in View",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",{parentName:"li"},"Then You will see below page to download the ",(0,r.yg)("strong",{parentName:"p"},"AnyConnect Secure Mobility Client")),(0,r.yg)("img",{src:"/img/VPN-anyconnect.png",alt:"Download AnyConnect Secure Mobility Client",style:{maxWidth:"100%",maxHeight:"100%"}}))),(0,r.yg)("h4",{id:"install-the-vpn-anyconnect-secure-mobility-client-on-windows"},"Install the VPN (AnyConnect Secure Mobility Client) on Windows"),(0,r.yg)("p",null,"Double click on the ",(0,r.yg)("inlineCode",{parentName:"p"},".exe")," file to install the VPN."),(0,r.yg)("p",null,"You can follow below steps as in pictures."),(0,r.yg)("img",{src:"/img/vpnsetup1.png",alt:"Install VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("img",{src:"/img/vpnsetup2.png",alt:"Install VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("img",{src:"/img/vpnsetup3.png",alt:"Install VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("img",{src:"/img/vpnsetup4.png",alt:"Install VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("h4",{id:"log-in-to-the-vpn-anyconnect-secure-mobility-client"},"Log in to the VPN (AnyConnect Secure Mobility Client)"),(0,r.yg)("p",null,"Once you finish installing you can run the Cisco AnyConnect Secure Mobility Client. "),(0,r.yg)("img",{src:"/img/vpnsetup7.png",alt:"Log in to the VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"Then after you will get the bellow wizard and click connect"),(0,r.yg)("img",{src:"/img/vpnsetup5.png",alt:"Log in to the VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"Provide your UM username and password. (",(0,r.yg)("strong",{parentName:"p"},"employee number")," at Maastricht University (a.k.a. P number), e.g. ",(0,r.yg)("inlineCode",{parentName:"p"},"P7000000"),")"),(0,r.yg)("img",{src:"/img/vpnsetup6.png",alt:"Log in to the VPN",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("h4",{id:"install-the-vpn-anyconnect-secure-mobility-client-on-linux"},"Install the VPN (AnyConnect Secure Mobility Client) on Linux"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Connect to ",(0,r.yg)("strong",{parentName:"p"},"UMnet")," or ",(0,r.yg)("strong",{parentName:"p"},"eduroam")," WiFi at Maastricht University")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"For ",(0,r.yg)("strong",{parentName:"p"},"Linux"),", use ",(0,r.yg)("inlineCode",{parentName:"p"},"openconnect")," to connect to the UM VPN. You can easily install it on Ubuntu and Debian distributions with ",(0,r.yg)("inlineCode",{parentName:"p"},"apt"),":"),(0,r.yg)("pre",{parentName:"li"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"sudo apt install openconnect\nsudo openconnect -u YOUR.USER --authgroup 01-Employees --useragent=AnyConnect vpn.maastrichtuniversity.nl\n")),(0,r.yg)("blockquote",{parentName:"li"},(0,r.yg)("p",{parentName:"blockquote"},"Provide your UM password when prompted."))),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"For ",(0,r.yg)("strong",{parentName:"p"},"students"),":"),(0,r.yg)("ul",{parentName:"li"},(0,r.yg)("li",{parentName:"ul"},"By default the UM VPN is only available to employees. As a student you can access UM resources from any location via ",(0,r.yg)("a",{parentName:"li",href:"https://athenadesktop.maastrichtuniversity.nl"},"Student Desktop Anywhere"),".\u202fHowever, if VPN access is absolutely necessary you can request access via your course coordinator.")))))}g.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/5db33872.14f5bb11.js b/assets/js/5db33872.75d53e63.js similarity index 99% rename from assets/js/5db33872.14f5bb11.js rename to assets/js/5db33872.75d53e63.js index 26340c9d1..3aa67c4c1 100644 --- a/assets/js/5db33872.14f5bb11.js +++ b/assets/js/5db33872.75d53e63.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2479],{5680:(e,t,n)=>{n.d(t,{xA:()=>c,yg:()=>y});var a=n(6540);function o(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function r(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var p=a.createContext({}),s=function(e){var t=a.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},c=function(e){var t=s(e.components);return a.createElement(p.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},g=a.forwardRef((function(e,t){var n=e.components,o=e.mdxType,i=e.originalType,p=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),g=s(n),y=o,d=g["".concat(p,".").concat(y)]||g[y]||m[y]||i;return n?a.createElement(d,r(r({ref:t},c),{},{components:n})):a.createElement(d,r({ref:t},c))}));function y(e,t){var n=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var i=n.length,r=new Array(i);r[0]=g;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:o,r[1]=l;for(var s=2;s{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>p,default:()=>y,frontMatter:()=>l,metadata:()=>s,toc:()=>m});var a=n(9668),o=n(1367),i=(n(6540),n(5680)),r=["components"],l={id:"openshift-install",title:"Install the client"},p=void 0,s={unversionedId:"openshift-install",id:"openshift-install",title:"Install the client",description:"Install the OpenShift Command Line Interface (CLI): oc to access the DSRI from your computer's terminal.",source:"@site/docs/openshift-install.md",sourceDirName:".",slug:"/openshift-install",permalink:"/docs/openshift-install",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/openshift-install.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"openshift-install",title:"Install the client"},sidebar:"docs",previous:{title:"Start your workspace",permalink:"/docs/start-workspace"},next:{title:"Prepare your project",permalink:"/docs/prepare-project-for-dsri"}},c={},m=[{value:"Install the oc client",id:"install-the-oc-client",level:2},{value:"On Linux",id:"on-linux",level:3},{value:"On Mac",id:"on-mac",level:3},{value:"On Windows",id:"on-windows",level:3},{value:"Login in the terminal with oc",id:"login-in-the-terminal-with-oc",level:2}],g={toc:m};function y(e){var t=e.components,n=(0,o.A)(e,r);return(0,i.yg)("wrapper",(0,a.A)({},g,n,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("p",null,"Install the ",(0,i.yg)("strong",{parentName:"p"},"OpenShift Command Line Interface (CLI)"),": ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," to access the DSRI from your computer's terminal. "),(0,i.yg)("p",null,"The ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," CLI enables to perform operations on your applications deployed on the DSRI, such as:"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},"Copy large files to or from the DSRI using ",(0,i.yg)("inlineCode",{parentName:"li"},"oc cp")),(0,i.yg)("li",{parentName:"ul"},"Connect to an application terminal using ",(0,i.yg)("inlineCode",{parentName:"li"},"oc rsh")),(0,i.yg)("li",{parentName:"ul"},"Get the applications running in your project with ",(0,i.yg)("inlineCode",{parentName:"li"},"oc get pods"))),(0,i.yg)("h2",{id:"install-the-oc-client"},"Install the ",(0,i.yg)("inlineCode",{parentName:"h2"},"oc")," client"),(0,i.yg)("h3",{id:"on-linux"},"On Linux"),(0,i.yg)("p",null,"Download the ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," and ",(0,i.yg)("inlineCode",{parentName:"p"},"kubectl")," Command Line Interface clients:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-shell"},"wget https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz && tar xvf oc.tar.gz\nsudo mv oc kubectl /usr/local/bin/\n")),(0,i.yg)("h3",{id:"on-mac"},"On Mac"),(0,i.yg)("p",null,"Use ",(0,i.yg)("a",{parentName:"p",href:"https://brew.sh"},(0,i.yg)("inlineCode",{parentName:"a"},"brew")),":"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"brew install openshift-cli\n")),(0,i.yg)("p",null,"Or manually download the program and add it to your path:"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("p",{parentName:"li"},"Download ",(0,i.yg)("a",{parentName:"p",href:"https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/macosx/oc.tar.gz"},"https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/macosx/oc.tar.gz"))),(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("p",{parentName:"li"},"Unzip the archive")),(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("p",{parentName:"li"},"Move the ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," binary to a directory on your PATH."),(0,i.yg)("p",{parentName:"li"},"To check your ",(0,i.yg)("inlineCode",{parentName:"p"},"PATH"),", open a terminal and execute the following command:"),(0,i.yg)("pre",{parentName:"li"},(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"echo $PATH\n")))),(0,i.yg)("h3",{id:"on-windows"},"On Windows"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},"Create a folder for OpenShift in Program Files: ",(0,i.yg)("inlineCode",{parentName:"li"},"C:\\Program Files (x86)\\OpenShift")),(0,i.yg)("li",{parentName:"ol"},"Click ",(0,i.yg)("a",{parentName:"li",href:"https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/windows/oc.zip"},"here")," to download the ",(0,i.yg)("inlineCode",{parentName:"li"},"oc")," tool ",(0,i.yg)("inlineCode",{parentName:"li"},".zip")," file, and move it to ",(0,i.yg)("inlineCode",{parentName:"li"},"C:\\Program Files (x86)\\OpenShift"),"."),(0,i.yg)("li",{parentName:"ol"},"Extract the ",(0,i.yg)("inlineCode",{parentName:"li"},".zip")," file."),(0,i.yg)("li",{parentName:"ol"},"Next set the system ",(0,i.yg)("strong",{parentName:"li"},"PATH")," environment variables for the directory containing the ",(0,i.yg)("inlineCode",{parentName:"li"},"oc.exe")," file, which now resides in your newly created ",(0,i.yg)("strong",{parentName:"li"},"OpenShift")," folder inside of ",(0,i.yg)("inlineCode",{parentName:"li"},"C:\\Program Files (x86)\\OpenShift"),(0,i.yg)("ol",{parentName:"li"},(0,i.yg)("li",{parentName:"ol"},"Open the Control Panel, and click on ",(0,i.yg)("strong",{parentName:"li"},"System")),(0,i.yg)("li",{parentName:"ol"},"Click on ",(0,i.yg)("strong",{parentName:"li"},"Advance system settings")," on the left or open the ",(0,i.yg)("strong",{parentName:"li"},"Advance")," tab of ",(0,i.yg)("em",{parentName:"li"},"System Properties.")," "),(0,i.yg)("li",{parentName:"ol"},"Click the button labeled ",(0,i.yg)("strong",{parentName:"li"},"Environment Variables...")," at the bottom. "),(0,i.yg)("li",{parentName:"ol"},"Look for the option ",(0,i.yg)("strong",{parentName:"li"},"Path")," in either the ",(0,i.yg)("strong",{parentName:"li"},"User variables")," section (for the current user) or the ",(0,i.yg)("strong",{parentName:"li"},"System variables")," section (for all users on the system).")))),(0,i.yg)("img",{class:"screenshot",src:"/img/OC_Path.png",alt:"Set OC Path",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,i.yg)("p",null,"This makes it easy to access the ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," command line interface by simply opening up the ",(0,i.yg)("strong",{parentName:"p"},"PowerShell")," and typing in the ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," command, e.g.:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-powershell"},"oc version\n")),(0,i.yg)("admonition",{title:"Official documentation",type:"note"},(0,i.yg)("p",{parentName:"admonition"},"See the ",(0,i.yg)("a",{parentName:"p",href:"https://docs.okd.io/latest/cli_reference/openshift_cli/getting-started-cli.html#installing-openshift-cli"},"official documentation to install the client")," if needed.")),(0,i.yg)("h2",{id:"login-in-the-terminal-with-oc"},"Login in the terminal with ",(0,i.yg)("inlineCode",{parentName:"h2"},"oc")),(0,i.yg)("p",null,"To use the ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," Command Line Interface, you will need to authenticate to the ",(0,i.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/console"},"DSRI")," in your terminal:"),(0,i.yg)("admonition",{title:"PASSWORD NOT SUPPORTED",type:"warning"},(0,i.yg)("p",{parentName:"admonition"},"Authentication to the ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," Command Line Interface using your password is not supported. ")),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"oc login --token=\n")),(0,i.yg)("p",null,"The token is provided by the Web UI:"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("p",{parentName:"li"},"Go to the ",(0,i.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/console"},"DSRI web UI"),".")),(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("p",{parentName:"li"},"Click on the ",(0,i.yg)("strong",{parentName:"p"},"Copy Login Command")," button (in the top right of the page)."),(0,i.yg)("img",{src:"/img/screenshot_copy_login.png",alt:"Deploy VSCode",style:{maxWidth:"100%",maxHeight:"100%"}})),(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("p",{parentName:"li"},"Paste the copied command in your terminal, and execute it to login with ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," \ud83d\udd11"))),(0,i.yg)("admonition",{title:"Login command",type:"info"},(0,i.yg)("p",{parentName:"admonition"},"The command should look like this:"),(0,i.yg)("pre",{parentName:"admonition"},(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"oc login https://api.dsri2.unimaas.nl:6443 --token=$GENERATED_TOKEN\n"))))}y.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2479],{5680:(e,t,n)=>{n.d(t,{xA:()=>c,yg:()=>y});var a=n(6540);function o(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function r(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var p=a.createContext({}),s=function(e){var t=a.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},c=function(e){var t=s(e.components);return a.createElement(p.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},g=a.forwardRef((function(e,t){var n=e.components,o=e.mdxType,i=e.originalType,p=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),g=s(n),y=o,d=g["".concat(p,".").concat(y)]||g[y]||m[y]||i;return n?a.createElement(d,r(r({ref:t},c),{},{components:n})):a.createElement(d,r({ref:t},c))}));function y(e,t){var n=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var i=n.length,r=new Array(i);r[0]=g;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:o,r[1]=l;for(var s=2;s{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>p,default:()=>y,frontMatter:()=>l,metadata:()=>s,toc:()=>m});var a=n(9668),o=n(1367),i=(n(6540),n(5680)),r=["components"],l={id:"openshift-install",title:"Install the client"},p=void 0,s={unversionedId:"openshift-install",id:"openshift-install",title:"Install the client",description:"Install the OpenShift Command Line Interface (CLI): oc to access the DSRI from your computer's terminal.",source:"@site/docs/openshift-install.md",sourceDirName:".",slug:"/openshift-install",permalink:"/docs/openshift-install",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/openshift-install.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"openshift-install",title:"Install the client"},sidebar:"docs",previous:{title:"Start your workspace",permalink:"/docs/start-workspace"},next:{title:"Prepare your project",permalink:"/docs/prepare-project-for-dsri"}},c={},m=[{value:"Install the oc client",id:"install-the-oc-client",level:2},{value:"On Linux",id:"on-linux",level:3},{value:"On Mac",id:"on-mac",level:3},{value:"On Windows",id:"on-windows",level:3},{value:"Login in the terminal with oc",id:"login-in-the-terminal-with-oc",level:2}],g={toc:m};function y(e){var t=e.components,n=(0,o.A)(e,r);return(0,i.yg)("wrapper",(0,a.A)({},g,n,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("p",null,"Install the ",(0,i.yg)("strong",{parentName:"p"},"OpenShift Command Line Interface (CLI)"),": ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," to access the DSRI from your computer's terminal. "),(0,i.yg)("p",null,"The ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," CLI enables to perform operations on your applications deployed on the DSRI, such as:"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},"Copy large files to or from the DSRI using ",(0,i.yg)("inlineCode",{parentName:"li"},"oc cp")),(0,i.yg)("li",{parentName:"ul"},"Connect to an application terminal using ",(0,i.yg)("inlineCode",{parentName:"li"},"oc rsh")),(0,i.yg)("li",{parentName:"ul"},"Get the applications running in your project with ",(0,i.yg)("inlineCode",{parentName:"li"},"oc get pods"))),(0,i.yg)("h2",{id:"install-the-oc-client"},"Install the ",(0,i.yg)("inlineCode",{parentName:"h2"},"oc")," client"),(0,i.yg)("h3",{id:"on-linux"},"On Linux"),(0,i.yg)("p",null,"Download the ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," and ",(0,i.yg)("inlineCode",{parentName:"p"},"kubectl")," Command Line Interface clients:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-shell"},"wget https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz && tar xvf oc.tar.gz\nsudo mv oc kubectl /usr/local/bin/\n")),(0,i.yg)("h3",{id:"on-mac"},"On Mac"),(0,i.yg)("p",null,"Use ",(0,i.yg)("a",{parentName:"p",href:"https://brew.sh"},(0,i.yg)("inlineCode",{parentName:"a"},"brew")),":"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"brew install openshift-cli\n")),(0,i.yg)("p",null,"Or manually download the program and add it to your path:"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("p",{parentName:"li"},"Download ",(0,i.yg)("a",{parentName:"p",href:"https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/macosx/oc.tar.gz"},"https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/macosx/oc.tar.gz"))),(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("p",{parentName:"li"},"Unzip the archive")),(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("p",{parentName:"li"},"Move the ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," binary to a directory on your PATH."),(0,i.yg)("p",{parentName:"li"},"To check your ",(0,i.yg)("inlineCode",{parentName:"p"},"PATH"),", open a terminal and execute the following command:"),(0,i.yg)("pre",{parentName:"li"},(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"echo $PATH\n")))),(0,i.yg)("h3",{id:"on-windows"},"On Windows"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},"Create a folder for OpenShift in Program Files: ",(0,i.yg)("inlineCode",{parentName:"li"},"C:\\Program Files (x86)\\OpenShift")),(0,i.yg)("li",{parentName:"ol"},"Click ",(0,i.yg)("a",{parentName:"li",href:"https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/windows/oc.zip"},"here")," to download the ",(0,i.yg)("inlineCode",{parentName:"li"},"oc")," tool ",(0,i.yg)("inlineCode",{parentName:"li"},".zip")," file, and move it to ",(0,i.yg)("inlineCode",{parentName:"li"},"C:\\Program Files (x86)\\OpenShift"),"."),(0,i.yg)("li",{parentName:"ol"},"Extract the ",(0,i.yg)("inlineCode",{parentName:"li"},".zip")," file."),(0,i.yg)("li",{parentName:"ol"},"Next set the system ",(0,i.yg)("strong",{parentName:"li"},"PATH")," environment variables for the directory containing the ",(0,i.yg)("inlineCode",{parentName:"li"},"oc.exe")," file, which now resides in your newly created ",(0,i.yg)("strong",{parentName:"li"},"OpenShift")," folder inside of ",(0,i.yg)("inlineCode",{parentName:"li"},"C:\\Program Files (x86)\\OpenShift"),(0,i.yg)("ol",{parentName:"li"},(0,i.yg)("li",{parentName:"ol"},"Open the Control Panel, and click on ",(0,i.yg)("strong",{parentName:"li"},"System")),(0,i.yg)("li",{parentName:"ol"},"Click on ",(0,i.yg)("strong",{parentName:"li"},"Advance system settings")," on the left or open the ",(0,i.yg)("strong",{parentName:"li"},"Advance")," tab of ",(0,i.yg)("em",{parentName:"li"},"System Properties.")," "),(0,i.yg)("li",{parentName:"ol"},"Click the button labeled ",(0,i.yg)("strong",{parentName:"li"},"Environment Variables...")," at the bottom. "),(0,i.yg)("li",{parentName:"ol"},"Look for the option ",(0,i.yg)("strong",{parentName:"li"},"Path")," in either the ",(0,i.yg)("strong",{parentName:"li"},"User variables")," section (for the current user) or the ",(0,i.yg)("strong",{parentName:"li"},"System variables")," section (for all users on the system).")))),(0,i.yg)("img",{class:"screenshot",src:"/img/OC_Path.png",alt:"Set OC Path",style:{zoom:"100%",maxHeight:"500px",maxWidth:"500px"}}),(0,i.yg)("p",null,"This makes it easy to access the ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," command line interface by simply opening up the ",(0,i.yg)("strong",{parentName:"p"},"PowerShell")," and typing in the ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," command, e.g.:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-powershell"},"oc version\n")),(0,i.yg)("admonition",{title:"Official documentation",type:"note"},(0,i.yg)("p",{parentName:"admonition"},"See the ",(0,i.yg)("a",{parentName:"p",href:"https://docs.okd.io/latest/cli_reference/openshift_cli/getting-started-cli.html#installing-openshift-cli"},"official documentation to install the client")," if needed.")),(0,i.yg)("h2",{id:"login-in-the-terminal-with-oc"},"Login in the terminal with ",(0,i.yg)("inlineCode",{parentName:"h2"},"oc")),(0,i.yg)("p",null,"To use the ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," Command Line Interface, you will need to authenticate to the ",(0,i.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/console"},"DSRI")," in your terminal:"),(0,i.yg)("admonition",{title:"PASSWORD NOT SUPPORTED",type:"warning"},(0,i.yg)("p",{parentName:"admonition"},"Authentication to the ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," Command Line Interface using your password is not supported. ")),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"oc login --token=\n")),(0,i.yg)("p",null,"The token is provided by the Web UI:"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("p",{parentName:"li"},"Go to the ",(0,i.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/console"},"DSRI web UI"),".")),(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("p",{parentName:"li"},"Click on the ",(0,i.yg)("strong",{parentName:"p"},"Copy Login Command")," button (in the top right of the page)."),(0,i.yg)("img",{src:"/img/screenshot_copy_login.png",alt:"Deploy VSCode",style:{maxWidth:"100%",maxHeight:"100%"}})),(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("p",{parentName:"li"},"Paste the copied command in your terminal, and execute it to login with ",(0,i.yg)("inlineCode",{parentName:"p"},"oc")," \ud83d\udd11"))),(0,i.yg)("admonition",{title:"Login command",type:"info"},(0,i.yg)("p",{parentName:"admonition"},"The command should look like this:"),(0,i.yg)("pre",{parentName:"admonition"},(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"oc login https://api.dsri2.unimaas.nl:6443 --token=$GENERATED_TOKEN\n"))))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/613f0a4f.a0904496.js b/assets/js/613f0a4f.af857580.js similarity index 99% rename from assets/js/613f0a4f.a0904496.js rename to assets/js/613f0a4f.af857580.js index 276394d7f..b47459d71 100644 --- a/assets/js/613f0a4f.a0904496.js +++ b/assets/js/613f0a4f.af857580.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6108],{5680:(e,a,t)=>{t.d(a,{xA:()=>c,yg:()=>m});var n=t(6540);function r(e,a,t){return a in e?Object.defineProperty(e,a,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[a]=t,e}function o(e,a){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);a&&(n=n.filter((function(a){return Object.getOwnPropertyDescriptor(e,a).enumerable}))),t.push.apply(t,n)}return t}function s(e){for(var a=1;a=0||(r[t]=e[t]);return r}(e,a);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var l=n.createContext({}),u=function(e){var a=n.useContext(l),t=a;return e&&(t="function"==typeof e?e(a):s(s({},a),e)),t},c=function(e){var a=u(e.components);return n.createElement(l.Provider,{value:a},e.children)},d={inlineCode:"code",wrapper:function(e){var a=e.children;return n.createElement(n.Fragment,{},a)}},p=n.forwardRef((function(e,a){var t=e.components,r=e.mdxType,o=e.originalType,l=e.parentName,c=i(e,["components","mdxType","originalType","parentName"]),p=u(t),m=r,g=p["".concat(l,".").concat(m)]||p[m]||d[m]||o;return t?n.createElement(g,s(s({ref:a},c),{},{components:t})):n.createElement(g,s({ref:a},c))}));function m(e,a){var t=arguments,r=a&&a.mdxType;if("string"==typeof e||r){var o=t.length,s=new Array(o);s[0]=p;var i={};for(var l in a)hasOwnProperty.call(a,l)&&(i[l]=a[l]);i.originalType=e,i.mdxType="string"==typeof e?e:r,s[1]=i;for(var u=2;u{t.r(a),t.d(a,{assets:()=>c,contentTitle:()=>l,default:()=>m,frontMatter:()=>i,metadata:()=>u,toc:()=>d});var n=t(9668),r=t(1367),o=(t(6540),t(5680)),s=["components"],i={id:"dask-tutorial",title:"Parallelization using Dask"},l=void 0,u={unversionedId:"dask-tutorial",id:"dask-tutorial",title:"Parallelization using Dask",description:"\ud83e\uddca Installation",source:"@site/docs/dask-tutorial.md",sourceDirName:".",slug:"/dask-tutorial",permalink:"/docs/dask-tutorial",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/dask-tutorial.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"dask-tutorial",title:"Parallelization using Dask"},sidebar:"docs",previous:{title:"Checkpointing Machine Learning Training",permalink:"/docs/checkpointing-ml-training"},next:{title:"Known Issues",permalink:"/docs/guide-known-issues"}},c={},d=[{value:"\ud83e\uddca Installation",id:"-installation",level:2},{value:"\ud83e\ude90 Basic Concepts of Dask",id:"-basic-concepts-of-dask",level:3},{value:"\u2728 Selecting columns and element-wise operations",id:"-selecting-columns-and-element-wise-operations",level:3},{value:"\u26a1\ufe0f Conditional filtering",id:"\ufe0f-conditional-filtering",level:3},{value:"\u2728 Common summary statistics",id:"-common-summary-statistics",level:3},{value:"\u2728 Groupby",id:"-groupby",level:3},{value:"\u26a1\ufe0f Lazy evaluation",id:"\ufe0f-lazy-evaluation",level:3},{value:"\ud83e\ude90 Dask Bags and Dask Delayed for Unstructured Data",id:"-dask-bags-and-dask-delayed-for-unstructured-data",level:4}],p={toc:d};function m(e){var a=e.components,t=(0,r.A)(e,s);return(0,o.yg)("wrapper",(0,n.A)({},p,t,{components:a,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"-installation"},"\ud83e\uddca Installation"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'!pip install "dask[complete]"\n')),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"import dask\n\ndask.__version__\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"'2023.5.0'\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"import dask.array as da\nimport dask.bag as db\nimport dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\n")),(0,o.yg)("h3",{id:"-basic-concepts-of-dask"},"\ud83e\ude90 Basic Concepts of Dask"),(0,o.yg)("p",null,"On a high-level, you can think of Dask as a wrapper that extends the capabilities of traditional tools like pandas, NumPy, and Spark to handle larger-than-memory datasets."),(0,o.yg)("p",null,"When faced with large objects like larger-than-memory arrays (vectors) or matrices (dataframes), Dask breaks them up into chunks, also called partitions."),(0,o.yg)("p",null,"For example, consider the array of 12 random numbers in both NumPy and Dask:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"narr = np.random.rand(12)\n\nnarr\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"array([0.44236558, 0.00504448, 0.87087911, 0.468925 , 0.37513511,\n 0.22607761, 0.83035297, 0.07772372, 0.61587933, 0.82861156,\n 0.66214299, 0.90979423])\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"darr = da.from_array(narr, chunks=3)\ndarr\n")),(0,o.yg)("img",{src:"/img/Screenshot-dask.png",alt:"dask table",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"The image above shows that the Dask array contains four chunks as we set chunks to 3. Under the hood, each chunk is a NumPy array in itself."),(0,o.yg)("p",null,"To fully appreciate the benefits of Dask, we need a large dataset, preferably over 1 GB in size. Consider the autogenerated data from the script below:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"import string\n\n# Set the desired number of rows and columns\nnum_rows = 5_000_000\nnum_cols = 10\nchunk_size = 100_000\n\n# Define an empty DataFrame to store the chunks\ndf_chunks = pd.DataFrame()\n\n# Generate and write the dataset in chunks\nfor i in range(0, num_rows, chunk_size):\n # Generate random numeric data\n numeric_data = np.random.rand(chunk_size, num_cols)\n\n # Generate random categorical data\n letters = list(string.ascii_uppercase)\n categorical_data = np.random.choice(letters, (chunk_size, num_cols))\n\n # Combine numeric and categorical data into a Pandas DataFrame\n df_chunk = pd.DataFrame(np.concatenate([numeric_data, categorical_data], axis=1))\n\n # Set column names for better understanding\n column_names = [f'Numeric_{i}' for i in range(num_cols)] + [f'Categorical_{i}' for i in range(num_cols)]\n df_chunk.columns = column_names\n\n # Append the current chunk to the DataFrame holding all chunks\n df_chunks = pd.concat([df_chunks, df_chunk], ignore_index=True)\n\n # Write the DataFrame chunk to a CSV file incrementally\n if (i + chunk_size) >= num_rows or (i // chunk_size) % 10 == 0:\n df_chunks.to_csv('large_dataset.csv', index=False, mode='a', header=(i == 0))\n df_chunks = pd.DataFrame()\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'dask_df = dd.read_csv("large_dataset.csv")\n\ndask_df.head()\n')),(0,o.yg)("p",null,"Even though the file is large, you will notice that the result is fetched almost instantaneously. For even larger files, you can specify the ",(0,o.yg)("inlineCode",{parentName:"p"},"blocksize")," parameter, which determines the number of bytes to break up the file into."),(0,o.yg)("p",null,"Similar to how Dask Arrays contain chunks of small NumPy arrays, Dask is designed to handle multiple small Pandas DataFrames arranged along the row index."),(0,o.yg)("h3",{id:"-selecting-columns-and-element-wise-operations"},"\u2728 Selecting columns and element-wise operations"),(0,o.yg)("p",null,"In this example, we're doing some pretty straightforward column operations on our Dask DataFrame, called dask_df. We're adding the values from the column Numeric_0 to the result of multiplying the values from Numeric_9 and Numeric_3. We store the outcome in a variable named result."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'result = (\n dask_df["Numeric_0"] + dask_df["Numeric_9"] * dask_df["Numeric_3"]\n)\n\nresult.compute().head()\n')),(0,o.yg)("p",null,"As we\u2019ve mentioned, Dask is a bit different from traditional computing tools in that it doesn't immediately execute these operations. Instead, it creates a kind of 'plan' called a task graph to carry out these operations later on. This approach allows Dask to optimize the computations and parallelize them when needed. The compute() function triggers Dask to finally perform these computations, and head() just shows us the first few rows of the result."),(0,o.yg)("h3",{id:"\ufe0f-conditional-filtering"},"\u26a1\ufe0f Conditional filtering"),(0,o.yg)("p",null,'Now, let\'s look at how Dask can filter data. We\'re selecting rows from our DataFrame where the value in the "Categorical_5" column is "A".'),(0,o.yg)("p",null,"This filtering process is similar to how you'd do it in pandas, but with a twist - Dask does this operation lazily. It prepares the task graph for this operation but waits to execute it until we call compute(). When we run head(), we get to see the first few rows of our filtered DataFrame."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'dask_df[dask_df["Categorical_5"] == "A"].compute().head()\n')),(0,o.yg)("h3",{id:"-common-summary-statistics"},"\u2728 Common summary statistics"),(0,o.yg)("p",null,"Next, we're going to generate some common summary statistics using Dask's describe() function."),(0,o.yg)("p",null,"It gives us a handful of descriptive statistics for our DataFrame, including the mean, standard deviation, minimum, maximum, and so on. As with our previous examples, Dask prepares the task graph for this operation when we call describe(), but it waits to execute it until we call compute()."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"dask_df.describe().compute()\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'dask_df["Categorical_3"].value_counts().compute().head()\n')),(0,o.yg)("p",null,'We also use value_counts() to count the number of occurrences of each unique value in the "Categorical_3" column. We trigger the operation with compute(), and head() shows us the most common values.'),(0,o.yg)("h3",{id:"-groupby"},"\u2728 Groupby"),(0,o.yg)("p",null,'Finally, let\'s use the groupby() function to group our data based on values in the "Categorical_8" column. Then we select the "Numeric_7" column and calculate the mean for each group.'),(0,o.yg)("p",null,'This is similar to how you might use \u2018groupby()\u2019 in pandas, but as you might have guessed, Dask does this lazily. We trigger the operation with compute(), and head() displays the average of the "Numeric_7" column for the first few groups.'),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'dask_df.groupby("Categorical_8")["Numeric_7"].mean().compute().head()\n')),(0,o.yg)("h3",{id:"\ufe0f-lazy-evaluation"},"\u26a1\ufe0f Lazy evaluation"),(0,o.yg)("p",null,"Now, let\u2019s explore the use of the compute function at the end of each code block."),(0,o.yg)("p",null,"Dask evaluates code blocks in lazy mode compared to Pandas\u2019 eager mode, which returns results immediately."),(0,o.yg)("p",null,"To draw a parallel in cooking, lazy evaluation is like preparing ingredients and chopping vegetables in advance but only combining them to cook when needed. The compute function serves that purpose."),(0,o.yg)("p",null,"In contrast, eager evaluation is like throwing ingredients into the fire to cook as soon as they are ready. This approach ensures everything is ready to serve at once."),(0,o.yg)("p",null,"Lazy evaluation is key to Dask\u2019s excellent performance as it provides:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("strong",{parentName:"li"},"Reduced computation.")," Expressions are evaluated only when needed (when compute is called), avoiding unnecessary intermediate results that may not be used in the final result."),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("strong",{parentName:"li"},"Optimal resource allocation.")," Lazy evaluation avoids allocating memory or processing power to intermediate results that may not be required."),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("strong",{parentName:"li"},"Support for large datasets.")," This method processes data elements on-the-fly or in smaller chunks, enabling efficient utilization of memory resources.")),(0,o.yg)("p",null,"When the results of compute are returned, they are given as Pandas Series/DataFrames or NumPy arrays instead of native Dask DataFrames."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"type(dask_df)\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"dask.dataframe.core.DataFrame\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'type(\n dask_df[["Numeric_5", "Numeric_6", "Numeric_7"]].mean().compute()\n)\n')),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"pandas.core.series.Series\n")),(0,o.yg)("p",null,"The reason for this is that most data manipulation operations return only a subset of the original dataframe, taking up much smaller space. So, there won\u2019t be any need to use parallelism of Dask, and you continue the rest of your workflow either in pandas or NumPy."),(0,o.yg)("h4",{id:"-dask-bags-and-dask-delayed-for-unstructured-data"},"\ud83e\ude90 Dask Bags and Dask Delayed for Unstructured Data"),(0,o.yg)("p",null,"Dask Bags and Dask Delayed are two components of the Dask library that provide powerful tools for working with unstructured or semi-structured data and enabling lazy evaluation."),(0,o.yg)("p",null,"While in the past, tabular data was the most common, today\u2019s datasets often involve unstructured files such as images, text files, videos, and audio. Dask Bags provides the functionality and API to handle such unstructured files in a parallel and scalable manner."),(0,o.yg)("p",null,"For example, let\u2019s consider a simple illustration:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'# Create a Dask Bag from a list of strings\nb = db.from_sequence(["apple", "banana", "orange", "grape", "kiwi"])\n\n# Filter the strings that start with the letter \'a\'\nfiltered_strings = b.filter(lambda x: x.startswith("a"))\n\n# Map a function to convert each string to uppercase\nuppercase_strings = filtered_strings.map(lambda x: x.upper())\n\n# Compute the result as a list\nresult = uppercase_strings.compute()\n\nprint(result)\n')),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"['APPLE']\n")),(0,o.yg)("p",null,"In this example, we create a Dask Bag b from a list of strings. We then apply operations on the Bag to filter the strings that start with the letter 'a' and convert them to uppercase using the filter() and map() functions, respectively. Finally, we compute the result as a list using the compute() method and print the output."),(0,o.yg)("p",null,"Now imagine that you can perform even more complex operations on billions of similar strings stored in a text file. Without the lazy evaluation and parallelism offered by Dask Bags, you would face significant challenges. "),(0,o.yg)("p",null,"As for Dask Delayed, it provides even more flexibility and introduces lazy evaluation and parallelism to various other scenarios. With Dask Delayed, you can convert any native Python function into a lazy object using the @dask.delayed decorator."),(0,o.yg)("p",null,"Here is a simple example:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"%%time\n\nimport time\n@dask.delayed\ndef process_data(x):\n # Simulate some computation\n time.sleep(1)\n return x**2\n\n\n# Generate a list of inputs\ninputs = range(1000)\n\n# Apply the delayed function to each input\nresults = [process_data(x) for x in inputs]\n\n# Compute the results in parallel\ncomputed_results = dask.compute(*results)\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"CPU times: user 260 ms, sys: 68.1 ms, total: 328 ms\nWall time: 32.2 s\n")),(0,o.yg)("p",null,"In this example, we define a function process_data decorated with @dask.delayed. The function simulates some computational work by sleeping for 1 second and then returning the square of the input value."),(0,o.yg)("p",null,"Without parallelism, performing this computation on 1000 inputs would have taken more than 1000 seconds. However, with Dask Delayed and parallel execution, the computation only took about 42.1 seconds."),(0,o.yg)("p",null,"This example demonstrates the power of parallelism in reducing computation time by efficiently distributing the workload across multiple cores or workers."),(0,o.yg)("p",null,"That\u2019s what parallelism is all about. for more information see ",(0,o.yg)("a",{parentName:"p",href:"https://docs.dask.org/en/stable/"},"https://docs.dask.org/en/stable/")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"")))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6108],{5680:(e,a,t)=>{t.d(a,{xA:()=>c,yg:()=>m});var n=t(6540);function r(e,a,t){return a in e?Object.defineProperty(e,a,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[a]=t,e}function o(e,a){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);a&&(n=n.filter((function(a){return Object.getOwnPropertyDescriptor(e,a).enumerable}))),t.push.apply(t,n)}return t}function s(e){for(var a=1;a=0||(r[t]=e[t]);return r}(e,a);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var l=n.createContext({}),u=function(e){var a=n.useContext(l),t=a;return e&&(t="function"==typeof e?e(a):s(s({},a),e)),t},c=function(e){var a=u(e.components);return n.createElement(l.Provider,{value:a},e.children)},d={inlineCode:"code",wrapper:function(e){var a=e.children;return n.createElement(n.Fragment,{},a)}},p=n.forwardRef((function(e,a){var t=e.components,r=e.mdxType,o=e.originalType,l=e.parentName,c=i(e,["components","mdxType","originalType","parentName"]),p=u(t),m=r,g=p["".concat(l,".").concat(m)]||p[m]||d[m]||o;return t?n.createElement(g,s(s({ref:a},c),{},{components:t})):n.createElement(g,s({ref:a},c))}));function m(e,a){var t=arguments,r=a&&a.mdxType;if("string"==typeof e||r){var o=t.length,s=new Array(o);s[0]=p;var i={};for(var l in a)hasOwnProperty.call(a,l)&&(i[l]=a[l]);i.originalType=e,i.mdxType="string"==typeof e?e:r,s[1]=i;for(var u=2;u{t.r(a),t.d(a,{assets:()=>c,contentTitle:()=>l,default:()=>m,frontMatter:()=>i,metadata:()=>u,toc:()=>d});var n=t(9668),r=t(1367),o=(t(6540),t(5680)),s=["components"],i={id:"dask-tutorial",title:"Parallelization using Dask"},l=void 0,u={unversionedId:"dask-tutorial",id:"dask-tutorial",title:"Parallelization using Dask",description:"\ud83e\uddca Installation",source:"@site/docs/dask-tutorial.md",sourceDirName:".",slug:"/dask-tutorial",permalink:"/docs/dask-tutorial",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/dask-tutorial.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"dask-tutorial",title:"Parallelization using Dask"},sidebar:"docs",previous:{title:"Checkpointing Machine Learning Training",permalink:"/docs/checkpointing-ml-training"},next:{title:"Known Issues",permalink:"/docs/guide-known-issues"}},c={},d=[{value:"\ud83e\uddca Installation",id:"-installation",level:2},{value:"\ud83e\ude90 Basic Concepts of Dask",id:"-basic-concepts-of-dask",level:3},{value:"\u2728 Selecting columns and element-wise operations",id:"-selecting-columns-and-element-wise-operations",level:3},{value:"\u26a1\ufe0f Conditional filtering",id:"\ufe0f-conditional-filtering",level:3},{value:"\u2728 Common summary statistics",id:"-common-summary-statistics",level:3},{value:"\u2728 Groupby",id:"-groupby",level:3},{value:"\u26a1\ufe0f Lazy evaluation",id:"\ufe0f-lazy-evaluation",level:3},{value:"\ud83e\ude90 Dask Bags and Dask Delayed for Unstructured Data",id:"-dask-bags-and-dask-delayed-for-unstructured-data",level:4}],p={toc:d};function m(e){var a=e.components,t=(0,r.A)(e,s);return(0,o.yg)("wrapper",(0,n.A)({},p,t,{components:a,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"-installation"},"\ud83e\uddca Installation"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'!pip install "dask[complete]"\n')),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"import dask\n\ndask.__version__\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"'2023.5.0'\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"import dask.array as da\nimport dask.bag as db\nimport dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\n")),(0,o.yg)("h3",{id:"-basic-concepts-of-dask"},"\ud83e\ude90 Basic Concepts of Dask"),(0,o.yg)("p",null,"On a high-level, you can think of Dask as a wrapper that extends the capabilities of traditional tools like pandas, NumPy, and Spark to handle larger-than-memory datasets."),(0,o.yg)("p",null,"When faced with large objects like larger-than-memory arrays (vectors) or matrices (dataframes), Dask breaks them up into chunks, also called partitions."),(0,o.yg)("p",null,"For example, consider the array of 12 random numbers in both NumPy and Dask:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"narr = np.random.rand(12)\n\nnarr\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"array([0.44236558, 0.00504448, 0.87087911, 0.468925 , 0.37513511,\n 0.22607761, 0.83035297, 0.07772372, 0.61587933, 0.82861156,\n 0.66214299, 0.90979423])\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"darr = da.from_array(narr, chunks=3)\ndarr\n")),(0,o.yg)("img",{src:"/img/Screenshot-dask.png",alt:"dask table",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"The image above shows that the Dask array contains four chunks as we set chunks to 3. Under the hood, each chunk is a NumPy array in itself."),(0,o.yg)("p",null,"To fully appreciate the benefits of Dask, we need a large dataset, preferably over 1 GB in size. Consider the autogenerated data from the script below:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"import string\n\n# Set the desired number of rows and columns\nnum_rows = 5_000_000\nnum_cols = 10\nchunk_size = 100_000\n\n# Define an empty DataFrame to store the chunks\ndf_chunks = pd.DataFrame()\n\n# Generate and write the dataset in chunks\nfor i in range(0, num_rows, chunk_size):\n # Generate random numeric data\n numeric_data = np.random.rand(chunk_size, num_cols)\n\n # Generate random categorical data\n letters = list(string.ascii_uppercase)\n categorical_data = np.random.choice(letters, (chunk_size, num_cols))\n\n # Combine numeric and categorical data into a Pandas DataFrame\n df_chunk = pd.DataFrame(np.concatenate([numeric_data, categorical_data], axis=1))\n\n # Set column names for better understanding\n column_names = [f'Numeric_{i}' for i in range(num_cols)] + [f'Categorical_{i}' for i in range(num_cols)]\n df_chunk.columns = column_names\n\n # Append the current chunk to the DataFrame holding all chunks\n df_chunks = pd.concat([df_chunks, df_chunk], ignore_index=True)\n\n # Write the DataFrame chunk to a CSV file incrementally\n if (i + chunk_size) >= num_rows or (i // chunk_size) % 10 == 0:\n df_chunks.to_csv('large_dataset.csv', index=False, mode='a', header=(i == 0))\n df_chunks = pd.DataFrame()\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'dask_df = dd.read_csv("large_dataset.csv")\n\ndask_df.head()\n')),(0,o.yg)("p",null,"Even though the file is large, you will notice that the result is fetched almost instantaneously. For even larger files, you can specify the ",(0,o.yg)("inlineCode",{parentName:"p"},"blocksize")," parameter, which determines the number of bytes to break up the file into."),(0,o.yg)("p",null,"Similar to how Dask Arrays contain chunks of small NumPy arrays, Dask is designed to handle multiple small Pandas DataFrames arranged along the row index."),(0,o.yg)("h3",{id:"-selecting-columns-and-element-wise-operations"},"\u2728 Selecting columns and element-wise operations"),(0,o.yg)("p",null,"In this example, we're doing some pretty straightforward column operations on our Dask DataFrame, called dask_df. We're adding the values from the column Numeric_0 to the result of multiplying the values from Numeric_9 and Numeric_3. We store the outcome in a variable named result."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'result = (\n dask_df["Numeric_0"] + dask_df["Numeric_9"] * dask_df["Numeric_3"]\n)\n\nresult.compute().head()\n')),(0,o.yg)("p",null,"As we\u2019ve mentioned, Dask is a bit different from traditional computing tools in that it doesn't immediately execute these operations. Instead, it creates a kind of 'plan' called a task graph to carry out these operations later on. This approach allows Dask to optimize the computations and parallelize them when needed. The compute() function triggers Dask to finally perform these computations, and head() just shows us the first few rows of the result."),(0,o.yg)("h3",{id:"\ufe0f-conditional-filtering"},"\u26a1\ufe0f Conditional filtering"),(0,o.yg)("p",null,'Now, let\'s look at how Dask can filter data. We\'re selecting rows from our DataFrame where the value in the "Categorical_5" column is "A".'),(0,o.yg)("p",null,"This filtering process is similar to how you'd do it in pandas, but with a twist - Dask does this operation lazily. It prepares the task graph for this operation but waits to execute it until we call compute(). When we run head(), we get to see the first few rows of our filtered DataFrame."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'dask_df[dask_df["Categorical_5"] == "A"].compute().head()\n')),(0,o.yg)("h3",{id:"-common-summary-statistics"},"\u2728 Common summary statistics"),(0,o.yg)("p",null,"Next, we're going to generate some common summary statistics using Dask's describe() function."),(0,o.yg)("p",null,"It gives us a handful of descriptive statistics for our DataFrame, including the mean, standard deviation, minimum, maximum, and so on. As with our previous examples, Dask prepares the task graph for this operation when we call describe(), but it waits to execute it until we call compute()."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"dask_df.describe().compute()\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'dask_df["Categorical_3"].value_counts().compute().head()\n')),(0,o.yg)("p",null,'We also use value_counts() to count the number of occurrences of each unique value in the "Categorical_3" column. We trigger the operation with compute(), and head() shows us the most common values.'),(0,o.yg)("h3",{id:"-groupby"},"\u2728 Groupby"),(0,o.yg)("p",null,'Finally, let\'s use the groupby() function to group our data based on values in the "Categorical_8" column. Then we select the "Numeric_7" column and calculate the mean for each group.'),(0,o.yg)("p",null,'This is similar to how you might use \u2018groupby()\u2019 in pandas, but as you might have guessed, Dask does this lazily. We trigger the operation with compute(), and head() displays the average of the "Numeric_7" column for the first few groups.'),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'dask_df.groupby("Categorical_8")["Numeric_7"].mean().compute().head()\n')),(0,o.yg)("h3",{id:"\ufe0f-lazy-evaluation"},"\u26a1\ufe0f Lazy evaluation"),(0,o.yg)("p",null,"Now, let\u2019s explore the use of the compute function at the end of each code block."),(0,o.yg)("p",null,"Dask evaluates code blocks in lazy mode compared to Pandas\u2019 eager mode, which returns results immediately."),(0,o.yg)("p",null,"To draw a parallel in cooking, lazy evaluation is like preparing ingredients and chopping vegetables in advance but only combining them to cook when needed. The compute function serves that purpose."),(0,o.yg)("p",null,"In contrast, eager evaluation is like throwing ingredients into the fire to cook as soon as they are ready. This approach ensures everything is ready to serve at once."),(0,o.yg)("p",null,"Lazy evaluation is key to Dask\u2019s excellent performance as it provides:"),(0,o.yg)("ol",null,(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("strong",{parentName:"li"},"Reduced computation.")," Expressions are evaluated only when needed (when compute is called), avoiding unnecessary intermediate results that may not be used in the final result."),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("strong",{parentName:"li"},"Optimal resource allocation.")," Lazy evaluation avoids allocating memory or processing power to intermediate results that may not be required."),(0,o.yg)("li",{parentName:"ol"},(0,o.yg)("strong",{parentName:"li"},"Support for large datasets.")," This method processes data elements on-the-fly or in smaller chunks, enabling efficient utilization of memory resources.")),(0,o.yg)("p",null,"When the results of compute are returned, they are given as Pandas Series/DataFrames or NumPy arrays instead of native Dask DataFrames."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"type(dask_df)\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"dask.dataframe.core.DataFrame\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'type(\n dask_df[["Numeric_5", "Numeric_6", "Numeric_7"]].mean().compute()\n)\n')),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"pandas.core.series.Series\n")),(0,o.yg)("p",null,"The reason for this is that most data manipulation operations return only a subset of the original dataframe, taking up much smaller space. So, there won\u2019t be any need to use parallelism of Dask, and you continue the rest of your workflow either in pandas or NumPy."),(0,o.yg)("h4",{id:"-dask-bags-and-dask-delayed-for-unstructured-data"},"\ud83e\ude90 Dask Bags and Dask Delayed for Unstructured Data"),(0,o.yg)("p",null,"Dask Bags and Dask Delayed are two components of the Dask library that provide powerful tools for working with unstructured or semi-structured data and enabling lazy evaluation."),(0,o.yg)("p",null,"While in the past, tabular data was the most common, today\u2019s datasets often involve unstructured files such as images, text files, videos, and audio. Dask Bags provides the functionality and API to handle such unstructured files in a parallel and scalable manner."),(0,o.yg)("p",null,"For example, let\u2019s consider a simple illustration:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'# Create a Dask Bag from a list of strings\nb = db.from_sequence(["apple", "banana", "orange", "grape", "kiwi"])\n\n# Filter the strings that start with the letter \'a\'\nfiltered_strings = b.filter(lambda x: x.startswith("a"))\n\n# Map a function to convert each string to uppercase\nuppercase_strings = filtered_strings.map(lambda x: x.upper())\n\n# Compute the result as a list\nresult = uppercase_strings.compute()\n\nprint(result)\n')),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"['APPLE']\n")),(0,o.yg)("p",null,"In this example, we create a Dask Bag b from a list of strings. We then apply operations on the Bag to filter the strings that start with the letter 'a' and convert them to uppercase using the filter() and map() functions, respectively. Finally, we compute the result as a list using the compute() method and print the output."),(0,o.yg)("p",null,"Now imagine that you can perform even more complex operations on billions of similar strings stored in a text file. Without the lazy evaluation and parallelism offered by Dask Bags, you would face significant challenges. "),(0,o.yg)("p",null,"As for Dask Delayed, it provides even more flexibility and introduces lazy evaluation and parallelism to various other scenarios. With Dask Delayed, you can convert any native Python function into a lazy object using the @dask.delayed decorator."),(0,o.yg)("p",null,"Here is a simple example:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"%%time\n\nimport time\n@dask.delayed\ndef process_data(x):\n # Simulate some computation\n time.sleep(1)\n return x**2\n\n\n# Generate a list of inputs\ninputs = range(1000)\n\n# Apply the delayed function to each input\nresults = [process_data(x) for x in inputs]\n\n# Compute the results in parallel\ncomputed_results = dask.compute(*results)\n")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre"},"CPU times: user 260 ms, sys: 68.1 ms, total: 328 ms\nWall time: 32.2 s\n")),(0,o.yg)("p",null,"In this example, we define a function process_data decorated with @dask.delayed. The function simulates some computational work by sleeping for 1 second and then returning the square of the input value."),(0,o.yg)("p",null,"Without parallelism, performing this computation on 1000 inputs would have taken more than 1000 seconds. However, with Dask Delayed and parallel execution, the computation only took about 42.1 seconds."),(0,o.yg)("p",null,"This example demonstrates the power of parallelism in reducing computation time by efficiently distributing the workload across multiple cores or workers."),(0,o.yg)("p",null,"That\u2019s what parallelism is all about. for more information see ",(0,o.yg)("a",{parentName:"p",href:"https://docs.dask.org/en/stable/"},"https://docs.dask.org/en/stable/")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"")))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/63528e5f.07c1d667.js b/assets/js/63528e5f.1f846c29.js similarity index 99% rename from assets/js/63528e5f.07c1d667.js rename to assets/js/63528e5f.1f846c29.js index 71622e596..c69237a70 100644 --- a/assets/js/63528e5f.07c1d667.js +++ b/assets/js/63528e5f.1f846c29.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5803],{5680:(e,t,n)=>{n.d(t,{xA:()=>s,yg:()=>u});var a=n(6540);function o(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function i(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var c=a.createContext({}),p=function(e){var t=a.useContext(c),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},s=function(e){var t=p(e.components);return a.createElement(c.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},g=a.forwardRef((function(e,t){var n=e.components,o=e.mdxType,r=e.originalType,c=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),g=p(n),u=o,y=g["".concat(c,".").concat(u)]||g[u]||d[u]||r;return n?a.createElement(y,i(i({ref:t},s),{},{components:n})):a.createElement(y,i({ref:t},s))}));function u(e,t){var n=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var r=n.length,i=new Array(r);i[0]=g;var l={};for(var c in t)hasOwnProperty.call(t,c)&&(l[c]=t[c]);l.originalType=e,l.mdxType="string"==typeof e?e:o,i[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>s,contentTitle:()=>c,default:()=>u,frontMatter:()=>l,metadata:()=>p,toc:()=>d});var a=n(9668),o=n(1367),r=(n(6540),n(5680)),i=["components"],l={id:"openshift-delete-objects",title:"Delete objects (advanced)"},c=void 0,p={unversionedId:"openshift-delete-objects",id:"openshift-delete-objects",title:"Delete objects (advanced)",description:"This documentation provide guidelines to delete various types of objects in the OpenShift DSRI. Be careful when you are deleting object in your project, as it could be an object required to run an application.",source:"@site/docs/openshift-delete-objects.md",sourceDirName:".",slug:"/openshift-delete-objects",permalink:"/docs/openshift-delete-objects",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/openshift-delete-objects.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"openshift-delete-objects",title:"Delete objects (advanced)"},sidebar:"docs",previous:{title:"Data storage",permalink:"/docs/openshift-storage"},next:{title:"Create a new Project",permalink:"/docs/project-management"}},s={},d=[{value:"Delete an application",id:"delete-an-application",level:2},{value:"Delete pod",id:"delete-pod",level:2},{value:"Delete a project",id:"delete-a-project",level:2},{value:"Delete persistent storage",id:"delete-persistent-storage",level:2},{value:"Fix stuck deletions",id:"fix-stuck-deletions",level:2},{value:"Stuck provisioned service",id:"stuck-provisioned-service",level:3},{value:"Delete stuck project",id:"delete-stuck-project",level:3}],g={toc:d};function u(e){var t=e.components,n=(0,o.A)(e,i);return(0,r.yg)("wrapper",(0,a.A)({},g,n,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("admonition",{title:"Be careful",type:"warning"},(0,r.yg)("p",{parentName:"admonition"},"This documentation provide guidelines to delete various types of objects in the OpenShift DSRI. Be careful when you are deleting object in your project, as it could be an object required to run an application.")),(0,r.yg)("p",null,"It is recommend to use the ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," tool to delete OpenShift objects, as it will allow to properly delete all objects related to specific deployments."),(0,r.yg)("admonition",{title:"Project",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"Make sure you are connected to the right project:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc project my-project\n"))),(0,r.yg)("h2",{id:"delete-an-application"},"Delete an application"),(0,r.yg)("p",null,"The best way to make sure all objects related to your application have been deleted is to use the command line providing your application name."),(0,r.yg)("p",null,"Different selectors can be used to easily delete all objects generated by an application deployment. 2 selectors can easily be found in the template configuration:"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"app")," : the name you gave when creating your application"),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"template")," : the name of the template you used to create the application. Use it only if you want to delete all applications created by a specific template.")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=my-application\n")),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"Delete storage if necessary from the OpenShift web UI.")),(0,r.yg)("admonition",{title:"Force deletion",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"You can force the deletion if the objects are not deleting properly:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete all,secret,configmaps,serviceaccount,rolebinding --force --grace-period=0 --selector app=my-application\n"))),(0,r.yg)("h2",{id:"delete-pod"},"Delete pod"),(0,r.yg)("p",null,"Get the ID of the specific pod you want to delete:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc get pod\n")),(0,r.yg)("p",null,"Use the pod ID retrieved to delete the pod:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete pod \n")),(0,r.yg)("admonition",{title:"Force deletion",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"If the pod is not properly deleted, you can force its deletion:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete pod --force --grace-period=0 \n"))),(0,r.yg)("h2",{id:"delete-a-project"},"Delete a project"),(0,r.yg)("admonition",{title:"Be careful",type:"warning"},(0,r.yg)("p",{parentName:"admonition"},"All objects and persistent storages in this project will be deleted and cannot be retrieved.")),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"To properly delete a project you need to first delete all objects in this project:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinstance --all -n \n")),(0,r.yg)("ol",{start:2},(0,r.yg)("li",{parentName:"ol"},"Then delete the project:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete project \n")),(0,r.yg)("h2",{id:"delete-persistent-storage"},"Delete persistent storage"),(0,r.yg)("admonition",{title:"Be careful",type:"warning"},(0,r.yg)("p",{parentName:"admonition"},"All data stored in this persistent storage will be lost and cannot be retrieved.")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete pvc storage-name\n")),(0,r.yg)("h2",{id:"fix-stuck-deletions"},"Fix stuck deletions"),(0,r.yg)("h3",{id:"stuck-provisioned-service"},"Stuck provisioned service"),(0,r.yg)("p",null,"If a provisioned service is stuck on ",(0,r.yg)("inlineCode",{parentName:"p"},"Marked for deletion")," you might need to set finalizers to null in the YAML."),(0,r.yg)("p",null,"This can be done using the OpenShift web UI:"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Go to the ",(0,r.yg)("strong",{parentName:"p"},"Provisionned Service")," in the OpenShift UI overview")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Click on ",(0,r.yg)("strong",{parentName:"p"},"Edit YAML"))),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Remove the ",(0,r.yg)("strong",{parentName:"p"},"finalizers"),":"),(0,r.yg)("pre",{parentName:"li"},(0,r.yg)("code",{parentName:"pre",className:"language-yaml"}," finalizers:\n - kubernetes-incubator/service-catalog\n")))),(0,r.yg)("p",null,"You can also do it using the ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," CLI:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},'oc get serviceinstance\n \n# Delete problematic line from serviceinstance to delete them\noc get serviceinstance -o yaml | grep Terminating | sed "/kubernetes-incubator/d"| oc apply -f - \n')),(0,r.yg)("admonition",{title:"No global catalog",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"The OpenShift Catalog does not handle deploying templates globally properly (on all projects). If a template is deployed globally, OpenShift will try to create unnecessary objects such as provisioned service (aka. ServiceInstance), or ClusterClasses. Those services are not used, and some of them cannot be deleted easily. ")),(0,r.yg)("admonition",{title:"Catalog per project ",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"At the moment it is more reliable to create the template in directly in your project if you need to use it multiple time.")),(0,r.yg)("h3",{id:"delete-stuck-project"},"Delete stuck project"),(0,r.yg)("p",null,"Project can get stuck as marked for deletion. Usually due to Objects still present in the project that are not terminated or ",(0,r.yg)("inlineCode",{parentName:"p"},"finalizers")," left in the some objects YAML file."),(0,r.yg)("p",null,"The following commands will allow you to clean up all the projects stuck in terminating state you have access to ."),(0,r.yg)("p",null,"Force deletion of terminating projects:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc delete project --force --grace-period=0 $i ; done\n")),(0,r.yg)("p",null,"Delete all objects in terminating projects:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc delete all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinstance --force --grace-period=0 --all -n $i ; done\n")),(0,r.yg)("p",null,"Remove Kubernetes finalizers from terminating projects:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'for i in $(oc get projects | grep Terminating| awk \'{print $1}\'); do echo $i; oc get project $i -o yaml | sed "/kubernetes/d" | sed "/finalizers:/d" | oc apply -f - ; done\n')),(0,r.yg)("admonition",{title:"Fix deletion",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"If ",(0,r.yg)("inlineCode",{parentName:"p"},"ServiceInstances")," refuses to get deleted, try to remove kubernetes finalizers:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc get serviceinstance -n $i -o yaml | sed \"/kubernetes-incubator/d\"| oc apply -f - ; done\n"))),(0,r.yg)("admonition",{title:"Check deletion",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Check if there are still objects in a project:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc get all,configmap,pvc,serviceaccount,secret,rolebinding,serviceinstance\n"))))}u.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5803],{5680:(e,t,n)=>{n.d(t,{xA:()=>s,yg:()=>u});var a=n(6540);function o(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function i(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var c=a.createContext({}),p=function(e){var t=a.useContext(c),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},s=function(e){var t=p(e.components);return a.createElement(c.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},g=a.forwardRef((function(e,t){var n=e.components,o=e.mdxType,r=e.originalType,c=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),g=p(n),u=o,y=g["".concat(c,".").concat(u)]||g[u]||d[u]||r;return n?a.createElement(y,i(i({ref:t},s),{},{components:n})):a.createElement(y,i({ref:t},s))}));function u(e,t){var n=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var r=n.length,i=new Array(r);i[0]=g;var l={};for(var c in t)hasOwnProperty.call(t,c)&&(l[c]=t[c]);l.originalType=e,l.mdxType="string"==typeof e?e:o,i[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>s,contentTitle:()=>c,default:()=>u,frontMatter:()=>l,metadata:()=>p,toc:()=>d});var a=n(9668),o=n(1367),r=(n(6540),n(5680)),i=["components"],l={id:"openshift-delete-objects",title:"Delete objects (advanced)"},c=void 0,p={unversionedId:"openshift-delete-objects",id:"openshift-delete-objects",title:"Delete objects (advanced)",description:"This documentation provide guidelines to delete various types of objects in the OpenShift DSRI. Be careful when you are deleting object in your project, as it could be an object required to run an application.",source:"@site/docs/openshift-delete-objects.md",sourceDirName:".",slug:"/openshift-delete-objects",permalink:"/docs/openshift-delete-objects",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/openshift-delete-objects.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"openshift-delete-objects",title:"Delete objects (advanced)"},sidebar:"docs",previous:{title:"Data storage",permalink:"/docs/openshift-storage"},next:{title:"Create a new Project",permalink:"/docs/project-management"}},s={},d=[{value:"Delete an application",id:"delete-an-application",level:2},{value:"Delete pod",id:"delete-pod",level:2},{value:"Delete a project",id:"delete-a-project",level:2},{value:"Delete persistent storage",id:"delete-persistent-storage",level:2},{value:"Fix stuck deletions",id:"fix-stuck-deletions",level:2},{value:"Stuck provisioned service",id:"stuck-provisioned-service",level:3},{value:"Delete stuck project",id:"delete-stuck-project",level:3}],g={toc:d};function u(e){var t=e.components,n=(0,o.A)(e,i);return(0,r.yg)("wrapper",(0,a.A)({},g,n,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("admonition",{title:"Be careful",type:"warning"},(0,r.yg)("p",{parentName:"admonition"},"This documentation provide guidelines to delete various types of objects in the OpenShift DSRI. Be careful when you are deleting object in your project, as it could be an object required to run an application.")),(0,r.yg)("p",null,"It is recommend to use the ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," tool to delete OpenShift objects, as it will allow to properly delete all objects related to specific deployments."),(0,r.yg)("admonition",{title:"Project",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"Make sure you are connected to the right project:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc project my-project\n"))),(0,r.yg)("h2",{id:"delete-an-application"},"Delete an application"),(0,r.yg)("p",null,"The best way to make sure all objects related to your application have been deleted is to use the command line providing your application name."),(0,r.yg)("p",null,"Different selectors can be used to easily delete all objects generated by an application deployment. 2 selectors can easily be found in the template configuration:"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"app")," : the name you gave when creating your application"),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"template")," : the name of the template you used to create the application. Use it only if you want to delete all applications created by a specific template.")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=my-application\n")),(0,r.yg)("blockquote",null,(0,r.yg)("p",{parentName:"blockquote"},"Delete storage if necessary from the OpenShift web UI.")),(0,r.yg)("admonition",{title:"Force deletion",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"You can force the deletion if the objects are not deleting properly:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete all,secret,configmaps,serviceaccount,rolebinding --force --grace-period=0 --selector app=my-application\n"))),(0,r.yg)("h2",{id:"delete-pod"},"Delete pod"),(0,r.yg)("p",null,"Get the ID of the specific pod you want to delete:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc get pod\n")),(0,r.yg)("p",null,"Use the pod ID retrieved to delete the pod:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete pod \n")),(0,r.yg)("admonition",{title:"Force deletion",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"If the pod is not properly deleted, you can force its deletion:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete pod --force --grace-period=0 \n"))),(0,r.yg)("h2",{id:"delete-a-project"},"Delete a project"),(0,r.yg)("admonition",{title:"Be careful",type:"warning"},(0,r.yg)("p",{parentName:"admonition"},"All objects and persistent storages in this project will be deleted and cannot be retrieved.")),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"To properly delete a project you need to first delete all objects in this project:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinstance --all -n \n")),(0,r.yg)("ol",{start:2},(0,r.yg)("li",{parentName:"ol"},"Then delete the project:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete project \n")),(0,r.yg)("h2",{id:"delete-persistent-storage"},"Delete persistent storage"),(0,r.yg)("admonition",{title:"Be careful",type:"warning"},(0,r.yg)("p",{parentName:"admonition"},"All data stored in this persistent storage will be lost and cannot be retrieved.")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc delete pvc storage-name\n")),(0,r.yg)("h2",{id:"fix-stuck-deletions"},"Fix stuck deletions"),(0,r.yg)("h3",{id:"stuck-provisioned-service"},"Stuck provisioned service"),(0,r.yg)("p",null,"If a provisioned service is stuck on ",(0,r.yg)("inlineCode",{parentName:"p"},"Marked for deletion")," you might need to set finalizers to null in the YAML."),(0,r.yg)("p",null,"This can be done using the OpenShift web UI:"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Go to the ",(0,r.yg)("strong",{parentName:"p"},"Provisionned Service")," in the OpenShift UI overview")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Click on ",(0,r.yg)("strong",{parentName:"p"},"Edit YAML"))),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Remove the ",(0,r.yg)("strong",{parentName:"p"},"finalizers"),":"),(0,r.yg)("pre",{parentName:"li"},(0,r.yg)("code",{parentName:"pre",className:"language-yaml"}," finalizers:\n - kubernetes-incubator/service-catalog\n")))),(0,r.yg)("p",null,"You can also do it using the ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," CLI:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-shell"},'oc get serviceinstance\n \n# Delete problematic line from serviceinstance to delete them\noc get serviceinstance -o yaml | grep Terminating | sed "/kubernetes-incubator/d"| oc apply -f - \n')),(0,r.yg)("admonition",{title:"No global catalog",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"The OpenShift Catalog does not handle deploying templates globally properly (on all projects). If a template is deployed globally, OpenShift will try to create unnecessary objects such as provisioned service (aka. ServiceInstance), or ClusterClasses. Those services are not used, and some of them cannot be deleted easily. ")),(0,r.yg)("admonition",{title:"Catalog per project ",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"At the moment it is more reliable to create the template in directly in your project if you need to use it multiple time.")),(0,r.yg)("h3",{id:"delete-stuck-project"},"Delete stuck project"),(0,r.yg)("p",null,"Project can get stuck as marked for deletion. Usually due to Objects still present in the project that are not terminated or ",(0,r.yg)("inlineCode",{parentName:"p"},"finalizers")," left in the some objects YAML file."),(0,r.yg)("p",null,"The following commands will allow you to clean up all the projects stuck in terminating state you have access to ."),(0,r.yg)("p",null,"Force deletion of terminating projects:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc delete project --force --grace-period=0 $i ; done\n")),(0,r.yg)("p",null,"Delete all objects in terminating projects:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc delete all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinstance --force --grace-period=0 --all -n $i ; done\n")),(0,r.yg)("p",null,"Remove Kubernetes finalizers from terminating projects:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'for i in $(oc get projects | grep Terminating| awk \'{print $1}\'); do echo $i; oc get project $i -o yaml | sed "/kubernetes/d" | sed "/finalizers:/d" | oc apply -f - ; done\n')),(0,r.yg)("admonition",{title:"Fix deletion",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"If ",(0,r.yg)("inlineCode",{parentName:"p"},"ServiceInstances")," refuses to get deleted, try to remove kubernetes finalizers:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-shell"},"for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc get serviceinstance -n $i -o yaml | sed \"/kubernetes-incubator/d\"| oc apply -f - ; done\n"))),(0,r.yg)("admonition",{title:"Check deletion",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Check if there are still objects in a project:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc get all,configmap,pvc,serviceaccount,secret,rolebinding,serviceinstance\n"))))}u.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/682467b2.681b6d9e.js b/assets/js/682467b2.94ea0f7b.js similarity index 99% rename from assets/js/682467b2.681b6d9e.js rename to assets/js/682467b2.94ea0f7b.js index 68414192e..8deb3fad7 100644 --- a/assets/js/682467b2.681b6d9e.js +++ b/assets/js/682467b2.94ea0f7b.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6746],{5680:(e,r,t)=>{t.d(r,{xA:()=>p,yg:()=>f});var o=t(6540);function n(e,r,t){return r in e?Object.defineProperty(e,r,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[r]=t,e}function i(e,r){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);r&&(o=o.filter((function(r){return Object.getOwnPropertyDescriptor(e,r).enumerable}))),t.push.apply(t,o)}return t}function a(e){for(var r=1;r=0||(n[t]=e[t]);return n}(e,r);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(n[t]=e[t])}return n}var s=o.createContext({}),c=function(e){var r=o.useContext(s),t=r;return e&&(t="function"==typeof e?e(r):a(a({},r),e)),t},p=function(e){var r=c(e.components);return o.createElement(s.Provider,{value:r},e.children)},u={inlineCode:"code",wrapper:function(e){var r=e.children;return o.createElement(o.Fragment,{},r)}},d=o.forwardRef((function(e,r){var t=e.components,n=e.mdxType,i=e.originalType,s=e.parentName,p=l(e,["components","mdxType","originalType","parentName"]),d=c(t),f=n,y=d["".concat(s,".").concat(f)]||d[f]||u[f]||i;return t?o.createElement(y,a(a({ref:r},p),{},{components:t})):o.createElement(y,a({ref:r},p))}));function f(e,r){var t=arguments,n=r&&r.mdxType;if("string"==typeof e||n){var i=t.length,a=new Array(i);a[0]=d;var l={};for(var s in r)hasOwnProperty.call(r,s)&&(l[s]=r[s]);l.originalType=e,l.mdxType="string"==typeof e?e:n,a[1]=l;for(var c=2;c{t.r(r),t.d(r,{assets:()=>p,contentTitle:()=>s,default:()=>f,frontMatter:()=>l,metadata:()=>c,toc:()=>u});var o=t(9668),n=t(1367),i=(t(6540),t(5680)),a=["components"],l={id:"profile-pytorch-code",title:"PyTorch Profiling"},s=void 0,c={unversionedId:"profile-pytorch-code",id:"profile-pytorch-code",title:"PyTorch Profiling",description:"What is profiling?",source:"@site/docs/profile-pytorch-code.md",sourceDirName:".",slug:"/profile-pytorch-code",permalink:"/docs/profile-pytorch-code",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/profile-pytorch-code.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"profile-pytorch-code",title:"PyTorch Profiling"},sidebar:"docs",previous:{title:"Increase your processes speed",permalink:"/docs/increase-process-speed"},next:{title:"Tensorflow Optimization",permalink:"/docs/speeding-tensorflow-dl"}},p={},u=[{value:"What is profiling?",id:"what-is-profiling",level:2},{value:"Why should I care about profiling?",id:"why-should-i-care-about-profiling",level:2},{value:"When should I care about profiling?",id:"when-should-i-care-about-profiling",level:2},{value:"How DSRI team can help you?",id:"how-dsri-team-can-help-you",level:2},{value:"External Resources and references",id:"external-resources-and-references",level:2}],d={toc:u};function f(e){var r=e.components,t=(0,n.A)(e,a);return(0,i.yg)("wrapper",(0,o.A)({},d,t,{components:r,mdxType:"MDXLayout"}),(0,i.yg)("h2",{id:"what-is-profiling"},"What is profiling?"),(0,i.yg)("p",null,"According to wikipedia:"),(0,i.yg)("p",null,'"Profiling is a form of dynamic program analysis that measures, for example, the space (memory) or time complexity of a program, the usage of particular instructions, or the frequency and duration of function calls. Most commonly, profiling information serves to aid program optimization, and more specifically, performance engineering."'),(0,i.yg)("h2",{id:"why-should-i-care-about-profiling"},"Why should I care about profiling?"),(0,i.yg)("p",null,"You may know that training large models like GPT-3 takes several million dollars source and a few hundred MWh source. If the engineers that trained these models did not spend time on optimization, it might have been several million dollars and hunderds of MWh more."),(0,i.yg)("p",null,"Sure, the model you'd like to train is probably not quite as big. But maybe you want to train it 10000 times, because you want to do hyperparameter optimization. And even if you only train it once, it may take quite a bit of compute resources, i.e. money and energy."),(0,i.yg)("h2",{id:"when-should-i-care-about-profiling"},"When should I care about profiling?"),(0,i.yg)("p",null,"Well, you should always care if your code runs efficiently, but there's different levels of caring."),(0,i.yg)("p",null,"From personal experience: if I know I'm going to run a code only once, for a few days, on a single GPU, I'll probably not create a full profile. What I would do is inspect my GPU and CPU utilization during my runs, just to see if it is somewhat efficient, and if I didn't make any obvious mistakes (e.g. accidentally not using the GPU, even if I have one available)."),(0,i.yg)("p",null,"If I know that I'll run my code on multiple GPUs, for multiple days, (potentially) on multiple nodes, and/or I need to run it multiple times, I know that my resource footprint is going to be large, and it's worth spending some time and effort to optimize the code. That's when I'll create a profile. The good part is: the more often you do it, the quicker and more adapt you become at it."),(0,i.yg)("h2",{id:"how-dsri-team-can-help-you"},"How DSRI team can help you?"),(0,i.yg)("p",null,"We can assist you with analyzing the bottleneck/s in your deep learning pipeline and recommend the improvments to speed up your pipeline."),(0,i.yg)("h2",{id:"external-resources-and-references"},"External Resources and references"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},"This documentation is taken from the Surf's PyTorch profiling wiki (",(0,i.yg)("a",{parentName:"li",href:"https://servicedesk.surf.nl/wiki/display/WIKI/PyTorch+Profiling"},"https://servicedesk.surf.nl/wiki/display/WIKI/PyTorch+Profiling"),")"),(0,i.yg)("li",{parentName:"ul"},"Tutorial on PyTorch profiling can be found here: (",(0,i.yg)("a",{parentName:"li",href:"https://github.com/sara-nl/PraceHPML2022/blob/master/notebooks/PyTorch_profiling/PyTorch_profiling.ipynb"},"https://github.com/sara-nl/PraceHPML2022/blob/master/notebooks/PyTorch_profiling/PyTorch_profiling.ipynb"),")")))}f.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6746],{5680:(e,r,t)=>{t.d(r,{xA:()=>p,yg:()=>f});var o=t(6540);function n(e,r,t){return r in e?Object.defineProperty(e,r,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[r]=t,e}function i(e,r){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);r&&(o=o.filter((function(r){return Object.getOwnPropertyDescriptor(e,r).enumerable}))),t.push.apply(t,o)}return t}function a(e){for(var r=1;r=0||(n[t]=e[t]);return n}(e,r);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(n[t]=e[t])}return n}var s=o.createContext({}),c=function(e){var r=o.useContext(s),t=r;return e&&(t="function"==typeof e?e(r):a(a({},r),e)),t},p=function(e){var r=c(e.components);return o.createElement(s.Provider,{value:r},e.children)},u={inlineCode:"code",wrapper:function(e){var r=e.children;return o.createElement(o.Fragment,{},r)}},d=o.forwardRef((function(e,r){var t=e.components,n=e.mdxType,i=e.originalType,s=e.parentName,p=l(e,["components","mdxType","originalType","parentName"]),d=c(t),f=n,y=d["".concat(s,".").concat(f)]||d[f]||u[f]||i;return t?o.createElement(y,a(a({ref:r},p),{},{components:t})):o.createElement(y,a({ref:r},p))}));function f(e,r){var t=arguments,n=r&&r.mdxType;if("string"==typeof e||n){var i=t.length,a=new Array(i);a[0]=d;var l={};for(var s in r)hasOwnProperty.call(r,s)&&(l[s]=r[s]);l.originalType=e,l.mdxType="string"==typeof e?e:n,a[1]=l;for(var c=2;c{t.r(r),t.d(r,{assets:()=>p,contentTitle:()=>s,default:()=>f,frontMatter:()=>l,metadata:()=>c,toc:()=>u});var o=t(9668),n=t(1367),i=(t(6540),t(5680)),a=["components"],l={id:"profile-pytorch-code",title:"PyTorch Profiling"},s=void 0,c={unversionedId:"profile-pytorch-code",id:"profile-pytorch-code",title:"PyTorch Profiling",description:"What is profiling?",source:"@site/docs/profile-pytorch-code.md",sourceDirName:".",slug:"/profile-pytorch-code",permalink:"/docs/profile-pytorch-code",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/profile-pytorch-code.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"profile-pytorch-code",title:"PyTorch Profiling"},sidebar:"docs",previous:{title:"Increase your processes speed",permalink:"/docs/increase-process-speed"},next:{title:"Tensorflow Optimization",permalink:"/docs/speeding-tensorflow-dl"}},p={},u=[{value:"What is profiling?",id:"what-is-profiling",level:2},{value:"Why should I care about profiling?",id:"why-should-i-care-about-profiling",level:2},{value:"When should I care about profiling?",id:"when-should-i-care-about-profiling",level:2},{value:"How DSRI team can help you?",id:"how-dsri-team-can-help-you",level:2},{value:"External Resources and references",id:"external-resources-and-references",level:2}],d={toc:u};function f(e){var r=e.components,t=(0,n.A)(e,a);return(0,i.yg)("wrapper",(0,o.A)({},d,t,{components:r,mdxType:"MDXLayout"}),(0,i.yg)("h2",{id:"what-is-profiling"},"What is profiling?"),(0,i.yg)("p",null,"According to wikipedia:"),(0,i.yg)("p",null,'"Profiling is a form of dynamic program analysis that measures, for example, the space (memory) or time complexity of a program, the usage of particular instructions, or the frequency and duration of function calls. Most commonly, profiling information serves to aid program optimization, and more specifically, performance engineering."'),(0,i.yg)("h2",{id:"why-should-i-care-about-profiling"},"Why should I care about profiling?"),(0,i.yg)("p",null,"You may know that training large models like GPT-3 takes several million dollars source and a few hundred MWh source. If the engineers that trained these models did not spend time on optimization, it might have been several million dollars and hunderds of MWh more."),(0,i.yg)("p",null,"Sure, the model you'd like to train is probably not quite as big. But maybe you want to train it 10000 times, because you want to do hyperparameter optimization. And even if you only train it once, it may take quite a bit of compute resources, i.e. money and energy."),(0,i.yg)("h2",{id:"when-should-i-care-about-profiling"},"When should I care about profiling?"),(0,i.yg)("p",null,"Well, you should always care if your code runs efficiently, but there's different levels of caring."),(0,i.yg)("p",null,"From personal experience: if I know I'm going to run a code only once, for a few days, on a single GPU, I'll probably not create a full profile. What I would do is inspect my GPU and CPU utilization during my runs, just to see if it is somewhat efficient, and if I didn't make any obvious mistakes (e.g. accidentally not using the GPU, even if I have one available)."),(0,i.yg)("p",null,"If I know that I'll run my code on multiple GPUs, for multiple days, (potentially) on multiple nodes, and/or I need to run it multiple times, I know that my resource footprint is going to be large, and it's worth spending some time and effort to optimize the code. That's when I'll create a profile. The good part is: the more often you do it, the quicker and more adapt you become at it."),(0,i.yg)("h2",{id:"how-dsri-team-can-help-you"},"How DSRI team can help you?"),(0,i.yg)("p",null,"We can assist you with analyzing the bottleneck/s in your deep learning pipeline and recommend the improvments to speed up your pipeline."),(0,i.yg)("h2",{id:"external-resources-and-references"},"External Resources and references"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},"This documentation is taken from the Surf's PyTorch profiling wiki (",(0,i.yg)("a",{parentName:"li",href:"https://servicedesk.surf.nl/wiki/display/WIKI/PyTorch+Profiling"},"https://servicedesk.surf.nl/wiki/display/WIKI/PyTorch+Profiling"),")"),(0,i.yg)("li",{parentName:"ul"},"Tutorial on PyTorch profiling can be found here: (",(0,i.yg)("a",{parentName:"li",href:"https://github.com/sara-nl/PraceHPML2022/blob/master/notebooks/PyTorch_profiling/PyTorch_profiling.ipynb"},"https://github.com/sara-nl/PraceHPML2022/blob/master/notebooks/PyTorch_profiling/PyTorch_profiling.ipynb"),")")))}f.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/6b741ffd.a3e8248b.js b/assets/js/6b741ffd.2028c3a5.js similarity index 98% rename from assets/js/6b741ffd.a3e8248b.js rename to assets/js/6b741ffd.2028c3a5.js index 4e4f64be9..2bf96dc18 100644 --- a/assets/js/6b741ffd.a3e8248b.js +++ b/assets/js/6b741ffd.2028c3a5.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[93],{5680:(e,t,r)=>{r.d(t,{xA:()=>p,yg:()=>m});var o=r(6540);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function l(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var s=o.createContext({}),c=function(e){var t=o.useContext(s),r=t;return e&&(r="function"==typeof e?e(t):l(l({},t),e)),r},p=function(e){var t=c(e.components);return o.createElement(s.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},u=o.forwardRef((function(e,t){var r=e.components,n=e.mdxType,a=e.originalType,s=e.parentName,p=i(e,["components","mdxType","originalType","parentName"]),u=c(r),m=n,f=u["".concat(s,".").concat(m)]||u[m]||d[m]||a;return r?o.createElement(f,l(l({ref:t},p),{},{components:r})):o.createElement(f,l({ref:t},p))}));function m(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var a=r.length,l=new Array(a);l[0]=u;var i={};for(var s in t)hasOwnProperty.call(t,s)&&(i[s]=t[s]);i.originalType=e,i.mdxType="string"==typeof e?e:n,l[1]=i;for(var c=2;c{r.r(t),r.d(t,{assets:()=>p,contentTitle:()=>s,default:()=>m,frontMatter:()=>i,metadata:()=>c,toc:()=>d});var o=r(9668),n=r(1367),a=(r(6540),r(5680)),l=["components"],i={id:"workflows-cwl",title:"Run CWL workflows"},s=void 0,c={unversionedId:"workflows-cwl",id:"workflows-cwl",title:"Run CWL workflows",description:"The Common Workflow Language (CWL) is an open standard for describing analysis workflows and tools in a way that makes them portable and scalable across a variety of software and hardware environments.",source:"@site/docs/workflows-cwl.md",sourceDirName:".",slug:"/workflows-cwl",permalink:"/docs/workflows-cwl",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/workflows-cwl.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"workflows-cwl",title:"Run CWL workflows"},sidebar:"docs",previous:{title:"Run Nextflow workflows",permalink:"/docs/workflows-nextflow"},next:{title:"Contribute",permalink:"/docs/contribute"}},p={},d=[{value:"Clone the repository",id:"clone-the-repository",level:2},{value:"Start pod",id:"start-pod",level:2},{value:"Delete created pod",id:"delete-created-pod",level:2}],u={toc:d};function m(e){var t=e.components,r=(0,n.A)(e,l);return(0,a.yg)("wrapper",(0,o.A)({},u,r,{components:t,mdxType:"MDXLayout"}),(0,a.yg)("p",null,"The ",(0,a.yg)("a",{parentName:"p",href:"https://www.commonwl.org/"},"Common Workflow Language")," (CWL) is an open standard for describing analysis workflows and tools in a way that makes them portable and scalable across a variety of software and hardware environments."),(0,a.yg)("p",null,"We use the ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/Duke-GCB/calrissian"},"CWL Calrissian")," implementation, note that this project is young and still in development, feel free to report issues and contribute to its documentation."),(0,a.yg)("h2",{id:"clone-the-repository"},"Clone the repository"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Git clone in ",(0,a.yg)("inlineCode",{parentName:"li"},"/calrissian")," on a ",(0,a.yg)("a",{parentName:"li",href:"/docs/openshift-storage"},"persistent volume")," on the cluster from a terminal. ")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"cd /data/calrissian\ngit clone --recursive https://github.com/MaastrichtU-IDS/d2s-project-template.git\ncd d2s-project-template\n")),(0,a.yg)("ol",{start:2},(0,a.yg)("li",{parentName:"ol"},"You will need to create the folder for the workflow output data, in our example it is ",(0,a.yg)("inlineCode",{parentName:"li"},"output-data"),":")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"mkdir /data/calrissian/output-data\n")),(0,a.yg)("ol",{start:3},(0,a.yg)("li",{parentName:"ol"},"You might need to give permissions (CWL execution will fail due to permissions issues otherwise).")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"chmod -R 777 /data/calrissian\n")),(0,a.yg)("h2",{id:"start-pod"},"Start pod"),(0,a.yg)("p",null,"Start the CWL execution from your computer using the ",(0,a.yg)("inlineCode",{parentName:"p"},"oc")," client. Define the CWL command arguments to run in ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/d2s-core/blob/master/support/run-workflows-cwl.yaml"},"run-workflows-cwl.yaml")," (be careful to properly define the paths to the CWL files in the pod storage)."),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"oc create -f d2s-core/support/run-workflows-cwl.yaml\n")),(0,a.yg)("admonition",{title:"Delete the pod",type:"caution"},(0,a.yg)("p",{parentName:"admonition"},"You will need to delete the pod if you want to re-create it.")),(0,a.yg)("h2",{id:"delete-created-pod"},"Delete created pod"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete -f d2s-core/support/run-workflows-cwl.yaml\n")))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[93],{5680:(e,t,r)=>{r.d(t,{xA:()=>p,yg:()=>m});var o=r(6540);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function l(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var s=o.createContext({}),c=function(e){var t=o.useContext(s),r=t;return e&&(r="function"==typeof e?e(t):l(l({},t),e)),r},p=function(e){var t=c(e.components);return o.createElement(s.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},u=o.forwardRef((function(e,t){var r=e.components,n=e.mdxType,a=e.originalType,s=e.parentName,p=i(e,["components","mdxType","originalType","parentName"]),u=c(r),m=n,f=u["".concat(s,".").concat(m)]||u[m]||d[m]||a;return r?o.createElement(f,l(l({ref:t},p),{},{components:r})):o.createElement(f,l({ref:t},p))}));function m(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var a=r.length,l=new Array(a);l[0]=u;var i={};for(var s in t)hasOwnProperty.call(t,s)&&(i[s]=t[s]);i.originalType=e,i.mdxType="string"==typeof e?e:n,l[1]=i;for(var c=2;c{r.r(t),r.d(t,{assets:()=>p,contentTitle:()=>s,default:()=>m,frontMatter:()=>i,metadata:()=>c,toc:()=>d});var o=r(9668),n=r(1367),a=(r(6540),r(5680)),l=["components"],i={id:"workflows-cwl",title:"Run CWL workflows"},s=void 0,c={unversionedId:"workflows-cwl",id:"workflows-cwl",title:"Run CWL workflows",description:"The Common Workflow Language (CWL) is an open standard for describing analysis workflows and tools in a way that makes them portable and scalable across a variety of software and hardware environments.",source:"@site/docs/workflows-cwl.md",sourceDirName:".",slug:"/workflows-cwl",permalink:"/docs/workflows-cwl",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/workflows-cwl.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"workflows-cwl",title:"Run CWL workflows"},sidebar:"docs",previous:{title:"Run Nextflow workflows",permalink:"/docs/workflows-nextflow"},next:{title:"Contribute",permalink:"/docs/contribute"}},p={},d=[{value:"Clone the repository",id:"clone-the-repository",level:2},{value:"Start pod",id:"start-pod",level:2},{value:"Delete created pod",id:"delete-created-pod",level:2}],u={toc:d};function m(e){var t=e.components,r=(0,n.A)(e,l);return(0,a.yg)("wrapper",(0,o.A)({},u,r,{components:t,mdxType:"MDXLayout"}),(0,a.yg)("p",null,"The ",(0,a.yg)("a",{parentName:"p",href:"https://www.commonwl.org/"},"Common Workflow Language")," (CWL) is an open standard for describing analysis workflows and tools in a way that makes them portable and scalable across a variety of software and hardware environments."),(0,a.yg)("p",null,"We use the ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/Duke-GCB/calrissian"},"CWL Calrissian")," implementation, note that this project is young and still in development, feel free to report issues and contribute to its documentation."),(0,a.yg)("h2",{id:"clone-the-repository"},"Clone the repository"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Git clone in ",(0,a.yg)("inlineCode",{parentName:"li"},"/calrissian")," on a ",(0,a.yg)("a",{parentName:"li",href:"/docs/openshift-storage"},"persistent volume")," on the cluster from a terminal. ")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"cd /data/calrissian\ngit clone --recursive https://github.com/MaastrichtU-IDS/d2s-project-template.git\ncd d2s-project-template\n")),(0,a.yg)("ol",{start:2},(0,a.yg)("li",{parentName:"ol"},"You will need to create the folder for the workflow output data, in our example it is ",(0,a.yg)("inlineCode",{parentName:"li"},"output-data"),":")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"mkdir /data/calrissian/output-data\n")),(0,a.yg)("ol",{start:3},(0,a.yg)("li",{parentName:"ol"},"You might need to give permissions (CWL execution will fail due to permissions issues otherwise).")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"chmod -R 777 /data/calrissian\n")),(0,a.yg)("h2",{id:"start-pod"},"Start pod"),(0,a.yg)("p",null,"Start the CWL execution from your computer using the ",(0,a.yg)("inlineCode",{parentName:"p"},"oc")," client. Define the CWL command arguments to run in ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/d2s-core/blob/master/support/run-workflows-cwl.yaml"},"run-workflows-cwl.yaml")," (be careful to properly define the paths to the CWL files in the pod storage)."),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"oc create -f d2s-core/support/run-workflows-cwl.yaml\n")),(0,a.yg)("admonition",{title:"Delete the pod",type:"caution"},(0,a.yg)("p",{parentName:"admonition"},"You will need to delete the pod if you want to re-create it.")),(0,a.yg)("h2",{id:"delete-created-pod"},"Delete created pod"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete -f d2s-core/support/run-workflows-cwl.yaml\n")))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/707d3f57.95128426.js b/assets/js/707d3f57.6bb3e3d6.js similarity index 99% rename from assets/js/707d3f57.95128426.js rename to assets/js/707d3f57.6bb3e3d6.js index 1b3479469..c5d643ec9 100644 --- a/assets/js/707d3f57.95128426.js +++ b/assets/js/707d3f57.6bb3e3d6.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[248],{5680:(e,t,n)=>{n.d(t,{xA:()=>u,yg:()=>y});var a=n(6540);function o(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function i(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var p=a.createContext({}),s=function(e){var t=a.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},u=function(e){var t=s(e.components);return a.createElement(p.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},c=a.forwardRef((function(e,t){var n=e.components,o=e.mdxType,r=e.originalType,p=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),c=s(n),y=o,g=c["".concat(p,".").concat(y)]||c[y]||d[y]||r;return n?a.createElement(g,i(i({ref:t},u),{},{components:n})):a.createElement(g,i({ref:t},u))}));function y(e,t){var n=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var r=n.length,i=new Array(r);i[0]=c;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:o,i[1]=l;for(var s=2;s{n.r(t),n.d(t,{assets:()=>u,contentTitle:()=>p,default:()=>y,frontMatter:()=>l,metadata:()=>s,toc:()=>d});var a=n(9668),o=n(1367),r=(n(6540),n(5680)),i=["components"],l={id:"deploy-on-gpu",title:"GPU applications"},p=void 0,s={unversionedId:"deploy-on-gpu",id:"deploy-on-gpu",title:"GPU applications",description:"GPUs on the DSRI can only be used by one workspace at a time, and there is a limited number of GPUs (8).",source:"@site/docs/deploy-on-gpu.md",sourceDirName:".",slug:"/deploy-on-gpu",permalink:"/docs/deploy-on-gpu",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-on-gpu.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-on-gpu",title:"GPU applications"},sidebar:"docs",previous:{title:"Monitor your applications",permalink:"/docs/guide-monitoring"},next:{title:"Deploy from a Dockerfile",permalink:"/docs/guide-dockerfile-to-openshift"}},u={},d=[{value:"Prepare your GPU workspace",id:"prepare-your-gpu-workspace",level:2},{value:"About the docker images",id:"about-the-docker-images",level:3},{value:"Deploy the workspace",id:"deploy-the-workspace",level:3},{value:"Prepare the workspace",id:"prepare-the-workspace",level:3},{value:"Enable the GPU",id:"enable-the-gpu",level:2},{value:"Disable the GPU",id:"disable-the-gpu",level:2},{value:"Increase the number of GPUs",id:"increase-the-number-of-gpus",level:2},{value:"Install GPU drivers in any image",id:"install-gpu-drivers-in-any-image",level:2}],c={toc:d};function y(e){var t=e.components,n=(0,o.A)(e,i);return(0,r.yg)("wrapper",(0,a.A)({},c,n,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("p",null,"GPUs on the DSRI can only be used by one workspace at a time, and there is a limited number of GPUs (8)."),(0,r.yg)("p",null,"\u26a0\ufe0f We currently provide a free access to those GPUs, but with the growing demands for GPUs it might get more restricted. As consideration for others, and to help keep this system open, it is important to make a maximum use of your GPUs when you get access to them. "),(0,r.yg)("p",null,"Unfortunately job scheduling is currently not mature enough on Kubernetes, you can look into ",(0,r.yg)("a",{parentName:"p",href:"https://volcano.sh/en/"},"volcano.sh")," if you are interested, but it is still quite experimental."),(0,r.yg)("p",null,"To use the GPU on the DSRI you will go through this process:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Deploy, prepare and debug your GPU workspace"),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("a",{parentName:"li",href:"/gpu-booking"},"Book a GPU")),(0,r.yg)("li",{parentName:"ol"},"Once the booking is done you will receive an email about your reservation, and more emails when it starts and before it ends"),(0,r.yg)("li",{parentName:"ol"},"Enable the GPU in workspace when your booking starts, and make the best use of it!")),(0,r.yg)("admonition",{title:"Book a GPU",type:"warning"},(0,r.yg)("p",{parentName:"admonition"},(0,r.yg)("strong",{parentName:"p"},"By default you do not have the permission to run applications on GPU"),", you need to make a reservation."),(0,r.yg)("p",{parentName:"admonition"},"You can check the availability of our GPUs, and reserve GPU slots in the ",(0,r.yg)("a",{parentName:"p",href:"/gpu-booking"},"GPU booking calendar \ud83d\udcc5"))),(0,r.yg)("h2",{id:"prepare-your-gpu-workspace"},"Prepare your GPU workspace"),(0,r.yg)("p",null,"You will first need to start your workspace without the GPU enabled, you can then prepare your experiments: clone the code, download the data, prepare scripts to install all requirements (the workspace will be restarted when you enable the GPU). "),(0,r.yg)("h3",{id:"about-the-docker-images"},"About the docker images"),(0,r.yg)("p",null,"We are mainly using images provided by Nvidia, with all required drivers and optimizations for GPU pre-installed. You can access the workspace with JupyterLab and VisualStudio Code in your browser, and install dependencies with ",(0,r.yg)("inlineCode",{parentName:"p"},"apt-get"),", ",(0,r.yg)("inlineCode",{parentName:"p"},"conda")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"pip")," in the workspace."),(0,r.yg)("p",null,"We currently mainly use Tensorflow, PyTorch and CUDA, but any image available in the ",(0,r.yg)("a",{parentName:"p",href:"https://ngc.nvidia.com/catalog/containers"},"Nvidia catalog")," should be easy to deploy. Checkout ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/jupyterlab#jupyterlab-on-gpu"},"this documentation")," for more details on how we build the optimized docker images for the DSRI GPUs. And feel free to ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/jupyterlab#extend-an-image"},"extend the images")," to install any software you need."),(0,r.yg)("h3",{id:"deploy-the-workspace"},"Deploy the workspace"),(0,r.yg)("p",null,"You can easily deploy your GPU workspace from the DSRI catalog:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Go to the ",(0,r.yg)("a",{parentName:"li",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"DSRI Catalog web UI"),": Click on ",(0,r.yg)("strong",{parentName:"li"},"Add to Project"),", then ",(0,r.yg)("strong",{parentName:"li"},"Browse Catalog")),(0,r.yg)("li",{parentName:"ol"},'Search the catalog for "GPU", and make sure the Template checkbox is enabled'),(0,r.yg)("li",{parentName:"ol"},"Choose the template: ",(0,r.yg)("strong",{parentName:"li"},"JupyterLab on GPU")),(0,r.yg)("li",{parentName:"ol"},"Follow the instructions to create the template in the DSRI web UI, all information about the images you can use are provided there. The most notable is the base image you want to use for your workspace (",(0,r.yg)("inlineCode",{parentName:"li"},"cuda"),", ",(0,r.yg)("inlineCode",{parentName:"li"},"tensorflow")," or ",(0,r.yg)("inlineCode",{parentName:"li"},"pytorch"),")")),(0,r.yg)("p",null,"Access the workspace from the route created (the small arrow at the top right of your application bubble in the Topology page)."),(0,r.yg)("h3",{id:"prepare-the-workspace"},"Prepare the workspace"),(0,r.yg)("p",null,"You can now add your code and data in the persistent folder to be fully prepared when you will get access to the GPUs."),(0,r.yg)("p",null,"You can install dependencies with ",(0,r.yg)("inlineCode",{parentName:"p"},"apt-get"),", ",(0,r.yg)("inlineCode",{parentName:"p"},"conda")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"pip"),". We recommend your to use scripts stored in the persistent folder to easily install all your requirements, so you can reinstall them when we enable the GPU, as it restarts the workspace."),(0,r.yg)("p",null,"For more information on how to use ",(0,r.yg)("inlineCode",{parentName:"p"},"conda"),"/",(0,r.yg)("inlineCode",{parentName:"p"},"mamba")," to install new dependencies or complete environment (useful if you need to use a different version of python than the one installed by default) checkout ",(0,r.yg)("a",{parentName:"p",href:"/docs/deploy-jupyter#%EF%B8%8F-manage-dependencies-with-conda"},"this page"),". "),(0,r.yg)("p",null,"\u26a0\ufe0f We recommend you to also try and debug your code on small sample using the CPU before getting the GPU, this way you will be able to directly start long running task when you get the GPU, instead of losing time debugging your code (it's probably not going to work on the first try, you know it)."),(0,r.yg)("p",null,"You can find more details on the images we use and how to extend them ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/jupyterlab#jupyterlab-on-gpu"},"in this repository"),"."),(0,r.yg)("admonition",{title:"Storage",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Use the ",(0,r.yg)("strong",{parentName:"p"},(0,r.yg)("inlineCode",{parentName:"strong"},"/workspace/persistent")," folder"),", which is the JupyterLab workspace, to store your code and data persistently. Note that loading data from the persistent storage will be slowly that what you might expected, this is due to the nature of the distributed storage. So try to optimize this part and avoid reloading multiple time your data, and let us know if it is too much of a problem, we have some solution to improve this")),(0,r.yg)("h2",{id:"enable-the-gpu"},"Enable the GPU"),(0,r.yg)("p",null,"You will receive an email when the GPU has been enabled in your project. You can then update your deployment to use the GPUs using either the ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," command-line tool, or by editing the deployment configuration from the web UI"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("strong",{parentName:"li"},"With the Command Line Interface"),", run the following command from the terminal of your laptop after having installed the ",(0,r.yg)("inlineCode",{parentName:"li"},"oc")," command-line tool.")),(0,r.yg)("p",null,"We use ",(0,r.yg)("inlineCode",{parentName:"p"},"jupyterlab-gpu")," as deployment name is in the example, change it to yours if it is different."),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch dc/jupyterlab-gpu --type=json -p=\'[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {"requests": {"nvidia.com/gpu": 1}, "limits": {"nvidia.com/gpu": 1}}}]\'\n')),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},"Or ",(0,r.yg)("strong",{parentName:"li"},"through the web UI"))),(0,r.yg)("p",null,"In the ",(0,r.yg)("strong",{parentName:"p"},"Topology")," view click on the circle representing your GPU application, then click on the ",(0,r.yg)("strong",{parentName:"p"},"Actions")," button in the top right of the screen, and click on ",(0,r.yg)("strong",{parentName:"p"},"Edit Deployment Config")," at the bottom of the list"),(0,r.yg)("p",null,"In the Deployment Config text editor, hit ",(0,r.yg)("inlineCode",{parentName:"p"},"ctrl + f"),' to search for "',(0,r.yg)("strong",{parentName:"p"},"resources"),'". You should see a line ',(0,r.yg)("inlineCode",{parentName:"p"},"- resources: {}")," under ",(0,r.yg)("inlineCode",{parentName:"p"},"containers:"),". You need to change this line to the following to enable GPU in your application (and make sure the indentation match the rest of the file):"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-yaml"}," - resources:\n requests:\n nvidia.com/gpu: 1\n limits:\n nvidia.com/gpu: 1\n")),(0,r.yg)("p",null,"Then wait for the pod to restart, or start it if it was stopped."),(0,r.yg)("p",null,"You can use the following command in the terminal of your container on the DSRI to see the current GPU usage:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"nvidia-smi\n")),(0,r.yg)("admonition",{title:"Windows",type:"info"},(0,r.yg)("p",{parentName:"admonition"},'When using above command with the oc client on windows you might receive an error like:\nerror: unable to parse "\'[{op:": yaml: found unexpected end of stream'),(0,r.yg)("p",{parentName:"admonition"},"This is because the single quotation mark on windows is handled differently. Try replacing the single quotation marks in the command with double quotation marks and the command should work.")),(0,r.yg)("h2",{id:"disable-the-gpu"},"Disable the GPU"),(0,r.yg)("p",null,"The GPU allocated to your workspace will be automatically disabled the after your booking ends at 9:00."),(0,r.yg)("p",null,"You can also manually disable the GPU from your app, the pod will be restarted automatically on a CPU node:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch dc/jupyterlab-gpu --type=json -p=\'[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {}}]\'\n')),(0,r.yg)("h2",{id:"increase-the-number-of-gpus"},"Increase the number of GPUs"),(0,r.yg)("p",null,"If you have been granted a 2nd GPU to speed up your experiment you can easily upgrade the number of GPU used by your workspace:"),(0,r.yg)("p",null,"From the ",(0,r.yg)("strong",{parentName:"p"},"Topology")," view click on your application:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Stop the application, by decreasing the number of pod to 0 (in the ",(0,r.yg)("strong",{parentName:"li"},"Details")," tab)"),(0,r.yg)("li",{parentName:"ol"},"Click on ",(0,r.yg)("strong",{parentName:"li"},"Options")," > ",(0,r.yg)("strong",{parentName:"li"},"Edit Deployment")," > in the YAML of the deployment search for ",(0,r.yg)("inlineCode",{parentName:"li"},"limits")," and change the number of GPU assigned to your deployment to 2:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-yaml"}," resources:\n limits:\n nvidia.com/gpu: '2'\n requests:\n nvidia.com/gpu: '2'\n")),(0,r.yg)("p",null,"You can also do it using the command line, make sure to stop the pod first, and replace ",(0,r.yg)("inlineCode",{parentName:"p"},"jupyterlab-gpu")," by your app name in this command:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch dc/jupyterlab-gpu --type=json -p=\'[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {"requests": {"nvidia.com/gpu": 2}, "limits": {"nvidia.com/gpu": 2}}}]\'\n')),(0,r.yg)("ol",{start:3},(0,r.yg)("li",{parentName:"ol"},"Restart the pod for your application (the same way you stopped it)")),(0,r.yg)("h2",{id:"install-gpu-drivers-in-any-image"},"Install GPU drivers in any image"),(0,r.yg)("p",null,"You can also install the GPU drivers in any image and use this image directly."),(0,r.yg)("p",null,"See the latest official ",(0,r.yg)("a",{parentName:"p",href:"https://nvidia.github.io/nvidia-container-runtime"},"Nvidia docs")," to install the ",(0,r.yg)("inlineCode",{parentName:"p"},"nvidia-container-runtime"),", which should contain all packages and drivers required to access the GPU from your application."),(0,r.yg)("p",null,"Here is an example of commands to add to a debian based ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile")," to install the GPU drivers (note that this is not complete, you will need to check the latest instructions and do some research & development to get it to work):"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-dockerfile"},"RUN curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | \\\n apt-key add - \\ &&\n distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \\ &&\n curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list | \nRUN apt-get update \\ &&\n apt-get install -y nvidia-container-runtime\n")),(0,r.yg)("p",null,"Then, build your image in your DSRI project using ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," from the folder where your put the ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile")," (replace ",(0,r.yg)("inlineCode",{parentName:"p"},"custom-app-gpu")," by your app name):"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc new-build --name custom-app-gpu --binary\noc start-build custom-app-gpu --from-dir=. --follow --wait\noc new-app custom-app-gpu\n")),(0,r.yg)("p",null,"You will then need to edit the deployment to the ",(0,r.yg)("inlineCode",{parentName:"p"},"serviceAccountName: anyuid")," and add a persistent storage"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc edit custom-app-gpu\n")),(0,r.yg)("p",null,"Finally for when your reservation start, checkout the section above about how to enable the GPU in workspace "),(0,r.yg)("p",null,"See also: official ",(0,r.yg)("a",{parentName:"p",href:"https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#debian-installation"},"Nvidia docs for CUDA")))}y.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[248],{5680:(e,t,n)=>{n.d(t,{xA:()=>u,yg:()=>y});var a=n(6540);function o(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function i(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var p=a.createContext({}),s=function(e){var t=a.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},u=function(e){var t=s(e.components);return a.createElement(p.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},c=a.forwardRef((function(e,t){var n=e.components,o=e.mdxType,r=e.originalType,p=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),c=s(n),y=o,g=c["".concat(p,".").concat(y)]||c[y]||d[y]||r;return n?a.createElement(g,i(i({ref:t},u),{},{components:n})):a.createElement(g,i({ref:t},u))}));function y(e,t){var n=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var r=n.length,i=new Array(r);i[0]=c;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:o,i[1]=l;for(var s=2;s{n.r(t),n.d(t,{assets:()=>u,contentTitle:()=>p,default:()=>y,frontMatter:()=>l,metadata:()=>s,toc:()=>d});var a=n(9668),o=n(1367),r=(n(6540),n(5680)),i=["components"],l={id:"deploy-on-gpu",title:"GPU applications"},p=void 0,s={unversionedId:"deploy-on-gpu",id:"deploy-on-gpu",title:"GPU applications",description:"GPUs on the DSRI can only be used by one workspace at a time, and there is a limited number of GPUs (8).",source:"@site/docs/deploy-on-gpu.md",sourceDirName:".",slug:"/deploy-on-gpu",permalink:"/docs/deploy-on-gpu",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-on-gpu.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-on-gpu",title:"GPU applications"},sidebar:"docs",previous:{title:"Monitor your applications",permalink:"/docs/guide-monitoring"},next:{title:"Deploy from a Dockerfile",permalink:"/docs/guide-dockerfile-to-openshift"}},u={},d=[{value:"Prepare your GPU workspace",id:"prepare-your-gpu-workspace",level:2},{value:"About the docker images",id:"about-the-docker-images",level:3},{value:"Deploy the workspace",id:"deploy-the-workspace",level:3},{value:"Prepare the workspace",id:"prepare-the-workspace",level:3},{value:"Enable the GPU",id:"enable-the-gpu",level:2},{value:"Disable the GPU",id:"disable-the-gpu",level:2},{value:"Increase the number of GPUs",id:"increase-the-number-of-gpus",level:2},{value:"Install GPU drivers in any image",id:"install-gpu-drivers-in-any-image",level:2}],c={toc:d};function y(e){var t=e.components,n=(0,o.A)(e,i);return(0,r.yg)("wrapper",(0,a.A)({},c,n,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("p",null,"GPUs on the DSRI can only be used by one workspace at a time, and there is a limited number of GPUs (8)."),(0,r.yg)("p",null,"\u26a0\ufe0f We currently provide a free access to those GPUs, but with the growing demands for GPUs it might get more restricted. As consideration for others, and to help keep this system open, it is important to make a maximum use of your GPUs when you get access to them. "),(0,r.yg)("p",null,"Unfortunately job scheduling is currently not mature enough on Kubernetes, you can look into ",(0,r.yg)("a",{parentName:"p",href:"https://volcano.sh/en/"},"volcano.sh")," if you are interested, but it is still quite experimental."),(0,r.yg)("p",null,"To use the GPU on the DSRI you will go through this process:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Deploy, prepare and debug your GPU workspace"),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("a",{parentName:"li",href:"/gpu-booking"},"Book a GPU")),(0,r.yg)("li",{parentName:"ol"},"Once the booking is done you will receive an email about your reservation, and more emails when it starts and before it ends"),(0,r.yg)("li",{parentName:"ol"},"Enable the GPU in workspace when your booking starts, and make the best use of it!")),(0,r.yg)("admonition",{title:"Book a GPU",type:"warning"},(0,r.yg)("p",{parentName:"admonition"},(0,r.yg)("strong",{parentName:"p"},"By default you do not have the permission to run applications on GPU"),", you need to make a reservation."),(0,r.yg)("p",{parentName:"admonition"},"You can check the availability of our GPUs, and reserve GPU slots in the ",(0,r.yg)("a",{parentName:"p",href:"/gpu-booking"},"GPU booking calendar \ud83d\udcc5"))),(0,r.yg)("h2",{id:"prepare-your-gpu-workspace"},"Prepare your GPU workspace"),(0,r.yg)("p",null,"You will first need to start your workspace without the GPU enabled, you can then prepare your experiments: clone the code, download the data, prepare scripts to install all requirements (the workspace will be restarted when you enable the GPU). "),(0,r.yg)("h3",{id:"about-the-docker-images"},"About the docker images"),(0,r.yg)("p",null,"We are mainly using images provided by Nvidia, with all required drivers and optimizations for GPU pre-installed. You can access the workspace with JupyterLab and VisualStudio Code in your browser, and install dependencies with ",(0,r.yg)("inlineCode",{parentName:"p"},"apt-get"),", ",(0,r.yg)("inlineCode",{parentName:"p"},"conda")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"pip")," in the workspace."),(0,r.yg)("p",null,"We currently mainly use Tensorflow, PyTorch and CUDA, but any image available in the ",(0,r.yg)("a",{parentName:"p",href:"https://ngc.nvidia.com/catalog/containers"},"Nvidia catalog")," should be easy to deploy. Checkout ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/jupyterlab#jupyterlab-on-gpu"},"this documentation")," for more details on how we build the optimized docker images for the DSRI GPUs. And feel free to ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/jupyterlab#extend-an-image"},"extend the images")," to install any software you need."),(0,r.yg)("h3",{id:"deploy-the-workspace"},"Deploy the workspace"),(0,r.yg)("p",null,"You can easily deploy your GPU workspace from the DSRI catalog:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Go to the ",(0,r.yg)("a",{parentName:"li",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"DSRI Catalog web UI"),": Click on ",(0,r.yg)("strong",{parentName:"li"},"Add to Project"),", then ",(0,r.yg)("strong",{parentName:"li"},"Browse Catalog")),(0,r.yg)("li",{parentName:"ol"},'Search the catalog for "GPU", and make sure the Template checkbox is enabled'),(0,r.yg)("li",{parentName:"ol"},"Choose the template: ",(0,r.yg)("strong",{parentName:"li"},"JupyterLab on GPU")),(0,r.yg)("li",{parentName:"ol"},"Follow the instructions to create the template in the DSRI web UI, all information about the images you can use are provided there. The most notable is the base image you want to use for your workspace (",(0,r.yg)("inlineCode",{parentName:"li"},"cuda"),", ",(0,r.yg)("inlineCode",{parentName:"li"},"tensorflow")," or ",(0,r.yg)("inlineCode",{parentName:"li"},"pytorch"),")")),(0,r.yg)("p",null,"Access the workspace from the route created (the small arrow at the top right of your application bubble in the Topology page)."),(0,r.yg)("h3",{id:"prepare-the-workspace"},"Prepare the workspace"),(0,r.yg)("p",null,"You can now add your code and data in the persistent folder to be fully prepared when you will get access to the GPUs."),(0,r.yg)("p",null,"You can install dependencies with ",(0,r.yg)("inlineCode",{parentName:"p"},"apt-get"),", ",(0,r.yg)("inlineCode",{parentName:"p"},"conda")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"pip"),". We recommend your to use scripts stored in the persistent folder to easily install all your requirements, so you can reinstall them when we enable the GPU, as it restarts the workspace."),(0,r.yg)("p",null,"For more information on how to use ",(0,r.yg)("inlineCode",{parentName:"p"},"conda"),"/",(0,r.yg)("inlineCode",{parentName:"p"},"mamba")," to install new dependencies or complete environment (useful if you need to use a different version of python than the one installed by default) checkout ",(0,r.yg)("a",{parentName:"p",href:"/docs/deploy-jupyter#%EF%B8%8F-manage-dependencies-with-conda"},"this page"),". "),(0,r.yg)("p",null,"\u26a0\ufe0f We recommend you to also try and debug your code on small sample using the CPU before getting the GPU, this way you will be able to directly start long running task when you get the GPU, instead of losing time debugging your code (it's probably not going to work on the first try, you know it)."),(0,r.yg)("p",null,"You can find more details on the images we use and how to extend them ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/jupyterlab#jupyterlab-on-gpu"},"in this repository"),"."),(0,r.yg)("admonition",{title:"Storage",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Use the ",(0,r.yg)("strong",{parentName:"p"},(0,r.yg)("inlineCode",{parentName:"strong"},"/workspace/persistent")," folder"),", which is the JupyterLab workspace, to store your code and data persistently. Note that loading data from the persistent storage will be slowly that what you might expected, this is due to the nature of the distributed storage. So try to optimize this part and avoid reloading multiple time your data, and let us know if it is too much of a problem, we have some solution to improve this")),(0,r.yg)("h2",{id:"enable-the-gpu"},"Enable the GPU"),(0,r.yg)("p",null,"You will receive an email when the GPU has been enabled in your project. You can then update your deployment to use the GPUs using either the ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," command-line tool, or by editing the deployment configuration from the web UI"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("strong",{parentName:"li"},"With the Command Line Interface"),", run the following command from the terminal of your laptop after having installed the ",(0,r.yg)("inlineCode",{parentName:"li"},"oc")," command-line tool.")),(0,r.yg)("p",null,"We use ",(0,r.yg)("inlineCode",{parentName:"p"},"jupyterlab-gpu")," as deployment name is in the example, change it to yours if it is different."),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch dc/jupyterlab-gpu --type=json -p=\'[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {"requests": {"nvidia.com/gpu": 1}, "limits": {"nvidia.com/gpu": 1}}}]\'\n')),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},"Or ",(0,r.yg)("strong",{parentName:"li"},"through the web UI"))),(0,r.yg)("p",null,"In the ",(0,r.yg)("strong",{parentName:"p"},"Topology")," view click on the circle representing your GPU application, then click on the ",(0,r.yg)("strong",{parentName:"p"},"Actions")," button in the top right of the screen, and click on ",(0,r.yg)("strong",{parentName:"p"},"Edit Deployment Config")," at the bottom of the list"),(0,r.yg)("p",null,"In the Deployment Config text editor, hit ",(0,r.yg)("inlineCode",{parentName:"p"},"ctrl + f"),' to search for "',(0,r.yg)("strong",{parentName:"p"},"resources"),'". You should see a line ',(0,r.yg)("inlineCode",{parentName:"p"},"- resources: {}")," under ",(0,r.yg)("inlineCode",{parentName:"p"},"containers:"),". You need to change this line to the following to enable GPU in your application (and make sure the indentation match the rest of the file):"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-yaml"}," - resources:\n requests:\n nvidia.com/gpu: 1\n limits:\n nvidia.com/gpu: 1\n")),(0,r.yg)("p",null,"Then wait for the pod to restart, or start it if it was stopped."),(0,r.yg)("p",null,"You can use the following command in the terminal of your container on the DSRI to see the current GPU usage:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"nvidia-smi\n")),(0,r.yg)("admonition",{title:"Windows",type:"info"},(0,r.yg)("p",{parentName:"admonition"},'When using above command with the oc client on windows you might receive an error like:\nerror: unable to parse "\'[{op:": yaml: found unexpected end of stream'),(0,r.yg)("p",{parentName:"admonition"},"This is because the single quotation mark on windows is handled differently. Try replacing the single quotation marks in the command with double quotation marks and the command should work.")),(0,r.yg)("h2",{id:"disable-the-gpu"},"Disable the GPU"),(0,r.yg)("p",null,"The GPU allocated to your workspace will be automatically disabled the after your booking ends at 9:00."),(0,r.yg)("p",null,"You can also manually disable the GPU from your app, the pod will be restarted automatically on a CPU node:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch dc/jupyterlab-gpu --type=json -p=\'[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {}}]\'\n')),(0,r.yg)("h2",{id:"increase-the-number-of-gpus"},"Increase the number of GPUs"),(0,r.yg)("p",null,"If you have been granted a 2nd GPU to speed up your experiment you can easily upgrade the number of GPU used by your workspace:"),(0,r.yg)("p",null,"From the ",(0,r.yg)("strong",{parentName:"p"},"Topology")," view click on your application:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Stop the application, by decreasing the number of pod to 0 (in the ",(0,r.yg)("strong",{parentName:"li"},"Details")," tab)"),(0,r.yg)("li",{parentName:"ol"},"Click on ",(0,r.yg)("strong",{parentName:"li"},"Options")," > ",(0,r.yg)("strong",{parentName:"li"},"Edit Deployment")," > in the YAML of the deployment search for ",(0,r.yg)("inlineCode",{parentName:"li"},"limits")," and change the number of GPU assigned to your deployment to 2:")),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-yaml"}," resources:\n limits:\n nvidia.com/gpu: '2'\n requests:\n nvidia.com/gpu: '2'\n")),(0,r.yg)("p",null,"You can also do it using the command line, make sure to stop the pod first, and replace ",(0,r.yg)("inlineCode",{parentName:"p"},"jupyterlab-gpu")," by your app name in this command:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch dc/jupyterlab-gpu --type=json -p=\'[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {"requests": {"nvidia.com/gpu": 2}, "limits": {"nvidia.com/gpu": 2}}}]\'\n')),(0,r.yg)("ol",{start:3},(0,r.yg)("li",{parentName:"ol"},"Restart the pod for your application (the same way you stopped it)")),(0,r.yg)("h2",{id:"install-gpu-drivers-in-any-image"},"Install GPU drivers in any image"),(0,r.yg)("p",null,"You can also install the GPU drivers in any image and use this image directly."),(0,r.yg)("p",null,"See the latest official ",(0,r.yg)("a",{parentName:"p",href:"https://nvidia.github.io/nvidia-container-runtime"},"Nvidia docs")," to install the ",(0,r.yg)("inlineCode",{parentName:"p"},"nvidia-container-runtime"),", which should contain all packages and drivers required to access the GPU from your application."),(0,r.yg)("p",null,"Here is an example of commands to add to a debian based ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile")," to install the GPU drivers (note that this is not complete, you will need to check the latest instructions and do some research & development to get it to work):"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-dockerfile"},"RUN curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | \\\n apt-key add - \\ &&\n distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \\ &&\n curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list | \nRUN apt-get update \\ &&\n apt-get install -y nvidia-container-runtime\n")),(0,r.yg)("p",null,"Then, build your image in your DSRI project using ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," from the folder where your put the ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile")," (replace ",(0,r.yg)("inlineCode",{parentName:"p"},"custom-app-gpu")," by your app name):"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc new-build --name custom-app-gpu --binary\noc start-build custom-app-gpu --from-dir=. --follow --wait\noc new-app custom-app-gpu\n")),(0,r.yg)("p",null,"You will then need to edit the deployment to the ",(0,r.yg)("inlineCode",{parentName:"p"},"serviceAccountName: anyuid")," and add a persistent storage"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc edit custom-app-gpu\n")),(0,r.yg)("p",null,"Finally for when your reservation start, checkout the section above about how to enable the GPU in workspace "),(0,r.yg)("p",null,"See also: official ",(0,r.yg)("a",{parentName:"p",href:"https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#debian-installation"},"Nvidia docs for CUDA")))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/7a61fedb.8ef3b0cc.js b/assets/js/7a61fedb.b40e5606.js similarity index 99% rename from assets/js/7a61fedb.8ef3b0cc.js rename to assets/js/7a61fedb.b40e5606.js index 40d53775b..3458f6d69 100644 --- a/assets/js/7a61fedb.8ef3b0cc.js +++ b/assets/js/7a61fedb.b40e5606.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5217],{5680:(e,o,t)=>{t.d(o,{xA:()=>p,yg:()=>d});var r=t(6540);function a(e,o,t){return o in e?Object.defineProperty(e,o,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[o]=t,e}function n(e,o){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);o&&(r=r.filter((function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable}))),t.push.apply(t,r)}return t}function l(e){for(var o=1;o=0||(a[t]=e[t]);return a}(e,o);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(a[t]=e[t])}return a}var i=r.createContext({}),g=function(e){var o=r.useContext(i),t=o;return e&&(t="function"==typeof e?e(o):l(l({},o),e)),t},p=function(e){var o=g(e.components);return r.createElement(i.Provider,{value:o},e.children)},u={inlineCode:"code",wrapper:function(e){var o=e.children;return r.createElement(r.Fragment,{},o)}},c=r.forwardRef((function(e,o){var t=e.components,a=e.mdxType,n=e.originalType,i=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),c=g(t),d=a,m=c["".concat(i,".").concat(d)]||c[d]||u[d]||n;return t?r.createElement(m,l(l({ref:o},p),{},{components:t})):r.createElement(m,l({ref:o},p))}));function d(e,o){var t=arguments,a=o&&o.mdxType;if("string"==typeof e||a){var n=t.length,l=new Array(n);l[0]=c;var s={};for(var i in o)hasOwnProperty.call(o,i)&&(s[i]=o[i]);s.originalType=e,s.mdxType="string"==typeof e?e:a,l[1]=s;for(var g=2;g{t.r(o),t.d(o,{assets:()=>p,contentTitle:()=>i,default:()=>d,frontMatter:()=>s,metadata:()=>g,toc:()=>u});var r=t(9668),a=t(1367),n=(t(6540),t(5680)),l=["components"],s={id:"workflows-argo",title:"Run Argo workflows"},i=void 0,g={unversionedId:"workflows-argo",id:"workflows-argo",title:"Run Argo workflows",description:"Argo needs to be installed in your project, contact the DSRI team to request it.",source:"@site/docs/workflows-argo.md",sourceDirName:".",slug:"/workflows-argo",permalink:"/docs/workflows-argo",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/workflows-argo.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"workflows-argo",title:"Run Argo workflows"},sidebar:"docs",previous:{title:"Deploy Airflow",permalink:"/docs/workflows-airflow"},next:{title:"Run Nextflow workflows",permalink:"/docs/workflows-nextflow"}},p={},u=[{value:"Install the argo client",id:"install-the-argo-client",level:2},{value:"On Ubuntu",id:"on-ubuntu",level:3},{value:"On MacOS",id:"on-macos",level:3},{value:"On Windows",id:"on-windows",level:3},{value:"Test Argo",id:"test-argo",level:3},{value:"Install Argo in your project",id:"install-argo-in-your-project",level:2},{value:"Argo workflows with Helm",id:"argo-workflows-with-helm",level:3},{value:"ArgoCD Operator",id:"argocd-operator",level:3},{value:"Uninstall argo",id:"uninstall-argo",level:3},{value:"On Ubuntu",id:"on-ubuntu-1",level:4},{value:"Run workflows to convert structured data to RDF",id:"run-workflows-to-convert-structured-data-to-rdf",level:2},{value:"Clone the repository",id:"clone-the-repository",level:3},{value:"Workflow to convert XML files to RDF",id:"workflow-to-convert-xml-files-to-rdf",level:3},{value:"Workflow to convert CSV files to RDF",id:"workflow-to-convert-csv-files-to-rdf",level:3},{value:"Argo commands",id:"argo-commands",level:2},{value:"List running Argo workflows",id:"list-running-argo-workflows",level:3},{value:"Stop a workflow",id:"stop-a-workflow",level:3},{value:"Delete a workflow",id:"delete-a-workflow",level:3},{value:"Debug a workflow",id:"debug-a-workflow",level:2}],c={toc:u};function d(e){var o=e.components,t=(0,a.A)(e,l);return(0,n.yg)("wrapper",(0,r.A)({},c,t,{components:o,mdxType:"MDXLayout"}),(0,n.yg)("admonition",{title:"Install in your project",type:"warning"},(0,n.yg)("p",{parentName:"admonition"},"Argo needs to be installed in your project, ",(0,n.yg)("a",{parentName:"p",href:"mailto:dsri-support-l@maastrichtuniversity.nl"},"contact the DSRI team")," to request it.")),(0,n.yg)("h2",{id:"install-the-argo-client"},"Install the ",(0,n.yg)("inlineCode",{parentName:"h2"},"argo")," client"),(0,n.yg)("p",null,(0,n.yg)("a",{parentName:"p",href:"https://argoproj.github.io/argo/"},"Argo \ud83e\udd91")," is a container native workflow engine for ",(0,n.yg)("a",{parentName:"p",href:"https://kubernetes.io/"},"Kubernetes")," supporting both ",(0,n.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/examples/readme.html#dag"},"DAG")," and ",(0,n.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/examples/readme.html#steps"},"step based")," workflows."),(0,n.yg)("p",null,"Download and install the ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/argoproj/argo/blob/master/demo.md#1-download-argo"},"Argo client")," on your computer to ",(0,n.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/examples/readme.html"},"start workflows")," on the DSRI."),(0,n.yg)("h3",{id:"on-ubuntu"},"On Ubuntu"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"sudo curl -L -o /usr/local/bin/argo https://github.com/argoproj/argo/releases/download/v2.4.2/argo-linux-amd64\nsudo chmod +x /usr/local/bin/argo\n")),(0,n.yg)("h3",{id:"on-macos"},"On MacOS"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"brew install argoproj/tap/argo\n")),(0,n.yg)("h3",{id:"on-windows"},"On Windows"),(0,n.yg)("p",null,"Get ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/argoproj/argo/releases/download/v2.4.2/argo-windows-amd64"},"Argo executable version 2.4.2")," from ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/argoproj/argo/releases"},"Argo Releases")," on GitHub."),(0,n.yg)("blockquote",null,(0,n.yg)("p",{parentName:"blockquote"},"See ",(0,n.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/demo.html#1-download-argo"},"official Argo documentation"),".")),(0,n.yg)("h3",{id:"test-argo"},"Test Argo"),(0,n.yg)("p",null,"Run Hello world workflow to test if Argo has been properly installed. And take a look at the ",(0,n.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/examples/readme.html"},"examples provided in Argo documentation")," to discover how to use the different features available."),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo submit --watch https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml\n")),(0,n.yg)("admonition",{title:"Logged in",type:"caution"},(0,n.yg)("p",{parentName:"admonition"},"You will need to have the ",(0,n.yg)("inlineCode",{parentName:"p"},"oc")," client installed and be logged in with ",(0,n.yg)("inlineCode",{parentName:"p"},"oc login"),", see the ",(0,n.yg)("a",{parentName:"p",href:"/docs/openshift-install"},"install documentation page"),".")),(0,n.yg)("h2",{id:"install-argo-in-your-project"},"Install Argo in your project"),(0,n.yg)("h3",{id:"argo-workflows-with-helm"},"Argo workflows with Helm"),(0,n.yg)("p",null,"Deploy the ",(0,n.yg)("a",{parentName:"p",href:"https://artifacthub.io/packages/helm/argo/argo"},"Argo Helm chart"),"."),(0,n.yg)("ol",null,(0,n.yg)("li",{parentName:"ol"},"Install and use ",(0,n.yg)("a",{parentName:"li",href:"/docs/helm"},(0,n.yg)("inlineCode",{parentName:"a"},"helm"))),(0,n.yg)("li",{parentName:"ol"},"Add the Helm charts repository:")),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add argo https://argoproj.github.io/argo-helm\n")),(0,n.yg)("ol",{start:3},(0,n.yg)("li",{parentName:"ol"},"Install chart:")),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"helm install my-argo argo/argo --version 0.15.2\n")),(0,n.yg)("h3",{id:"argocd-operator"},"ArgoCD Operator"),(0,n.yg)("p",null,"Ask on the DSRI Slack ",(0,n.yg)("strong",{parentName:"p"},"#helpdesk")," channel to have the ",(0,n.yg)("a",{parentName:"p",href:"https://artifacthub.io/packages/olm/community-operators/argocd-operator"},"ArgoCD Operator")," installed in your project."),(0,n.yg)("h3",{id:"uninstall-argo"},"Uninstall ",(0,n.yg)("inlineCode",{parentName:"h3"},"argo")),(0,n.yg)("h4",{id:"on-ubuntu-1"},"On Ubuntu"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"sudo rm /usr/local/bin/argo\n")),(0,n.yg)("blockquote",null,(0,n.yg)("p",{parentName:"blockquote"},"You can now reinstall a newer version of Argo.")),(0,n.yg)("hr",null),(0,n.yg)("h2",{id:"run-workflows-to-convert-structured-data-to-rdf"},"Run workflows to convert structured data to RDF"),(0,n.yg)("p",null,"We will use examples from the ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/d2s-core"},"MaastrichtU-IDS/d2s-core")," project."),(0,n.yg)("h3",{id:"clone-the-repository"},"Clone the repository"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"git clone --recursive https://github.com/MaastrichtU-IDS/d2s-project-template.git\ncd d2s-project-template\n")),(0,n.yg)("p",null,(0,n.yg)("a",{parentName:"p",href:"/docs/openshift-install"},"Authenticate to the OpenShift cluster")," using ",(0,n.yg)("inlineCode",{parentName:"p"},"oc login")," ."),(0,n.yg)("h3",{id:"workflow-to-convert-xml-files-to-rdf"},"Workflow to convert XML files to RDF"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"Steps-based workflow for XML files, see the example workflow ",(0,n.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/d2s-core/blob/master/argo/d2s-workflow-xml.yaml"},"YAML file on GitHub"),".")),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo submit d2s-core/argo/workflows/d2s-workflow-transform-xml.yaml \\\n -f support/config/config-transform-xml-drugbank.yml\n")),(0,n.yg)("admonition",{title:"Provide config files",type:"info"},(0,n.yg)("p",{parentName:"admonition"},"Config files can be provided using the ",(0,n.yg)("inlineCode",{parentName:"p"},"-f")," arguments, but are not necessary.")),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"DAG workflow for XML files, see the ",(0,n.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/d2s-core/blob/master/argo/d2s-workflow-xml-dag.yaml"},"YAML file on GitHub"),".")),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo submit d2s-core/argo/workflows/d2s-workflow-transform-xml-dag.yaml \\\n -f support/config/config-transform-xml-drugbank.yml\n")),(0,n.yg)("h3",{id:"workflow-to-convert-csv-files-to-rdf"},"Workflow to convert CSV files to RDF"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"Steps-based workflow for CSV files")),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo submit d2s-core/argo/workflows/d2s-workflow-transform-csv.yaml \\\n -f support/config/config-transform-csv-stitch.yml\n")),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"DAG workflow for CSV files")),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo submit d2s-core/argo/workflows/d2s-workflow-transform-csv-dag.yaml \\\n -f support/config/config-transform-csv-stitch.yml\n")),(0,n.yg)("admonition",{title:"Solve issue",type:"caution"},(0,n.yg)("p",{parentName:"admonition"},"Try this to solve issue related to steps services IP: ",(0,n.yg)("inlineCode",{parentName:"p"},"{{steps.nginx-server.pod-ip}}"))),(0,n.yg)("hr",null),(0,n.yg)("h2",{id:"argo-commands"},"Argo commands"),(0,n.yg)("h3",{id:"list-running-argo-workflows"},"List running Argo workflows"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo list\n")),(0,n.yg)("h3",{id:"stop-a-workflow"},"Stop a workflow"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo terminate my-workflow\n")),(0,n.yg)("admonition",{title:"Workflow",type:"caution"},(0,n.yg)("p",{parentName:"admonition"},"This might not stop the workflow, in this case use:"),(0,n.yg)("pre",{parentName:"admonition"},(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"argo delete my-workflow\n"))),(0,n.yg)("h3",{id:"delete-a-workflow"},"Delete a workflow"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo delete my-workflow\n")),(0,n.yg)("hr",null),(0,n.yg)("h2",{id:"debug-a-workflow"},"Debug a workflow"),(0,n.yg)("p",null,"Get into a container, to understand why it bugs, by creating a YAML with the command ",(0,n.yg)("inlineCode",{parentName:"p"},"tail -f /dev/null")," to keep it hanging."),(0,n.yg)("p",null,"See the ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/d2s-core/blob/master/argo/tests/test-devnull-argo.yaml"},"example in the d2s-argo-workflow repository"),":"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-yaml"},'apiVersion: argoproj.io/v1alpha1\nkind: Workflow\nmetadata:\n generateName: test-devnull-argo-\nspec:\n entrypoint: execute-workflow\n\n # Use existing volume\n volumes:\n - name: workdir\n persistentVolumeClaim:\n claimName: pvc-mapr-projects-test-vincent \n\n templates:\n - name: execute-workflow\n steps:\n - - name: run-rdfunit\n template: rdfunit\n \n - name: rdfunit\n container:\n image: umids/rdfunit:latest\n command: [tail]\n args: ["-f", "/dev/null"]\n volumeMounts:\n - name: workdir\n mountPath: /data\n subPath: dqa-workspace\n')),(0,n.yg)("p",null,"Then start the workflow:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo submit --serviceaccount argo tests/test-devnull-argo.yaml\n")),(0,n.yg)("p",null,"And connect with the Shell (change the pod ID to your pod ID):"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"oc rsh test-devnull-argo-pod\n")))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5217],{5680:(e,o,t)=>{t.d(o,{xA:()=>p,yg:()=>d});var r=t(6540);function a(e,o,t){return o in e?Object.defineProperty(e,o,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[o]=t,e}function n(e,o){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);o&&(r=r.filter((function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable}))),t.push.apply(t,r)}return t}function l(e){for(var o=1;o=0||(a[t]=e[t]);return a}(e,o);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(a[t]=e[t])}return a}var i=r.createContext({}),g=function(e){var o=r.useContext(i),t=o;return e&&(t="function"==typeof e?e(o):l(l({},o),e)),t},p=function(e){var o=g(e.components);return r.createElement(i.Provider,{value:o},e.children)},u={inlineCode:"code",wrapper:function(e){var o=e.children;return r.createElement(r.Fragment,{},o)}},c=r.forwardRef((function(e,o){var t=e.components,a=e.mdxType,n=e.originalType,i=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),c=g(t),d=a,m=c["".concat(i,".").concat(d)]||c[d]||u[d]||n;return t?r.createElement(m,l(l({ref:o},p),{},{components:t})):r.createElement(m,l({ref:o},p))}));function d(e,o){var t=arguments,a=o&&o.mdxType;if("string"==typeof e||a){var n=t.length,l=new Array(n);l[0]=c;var s={};for(var i in o)hasOwnProperty.call(o,i)&&(s[i]=o[i]);s.originalType=e,s.mdxType="string"==typeof e?e:a,l[1]=s;for(var g=2;g{t.r(o),t.d(o,{assets:()=>p,contentTitle:()=>i,default:()=>d,frontMatter:()=>s,metadata:()=>g,toc:()=>u});var r=t(9668),a=t(1367),n=(t(6540),t(5680)),l=["components"],s={id:"workflows-argo",title:"Run Argo workflows"},i=void 0,g={unversionedId:"workflows-argo",id:"workflows-argo",title:"Run Argo workflows",description:"Argo needs to be installed in your project, contact the DSRI team to request it.",source:"@site/docs/workflows-argo.md",sourceDirName:".",slug:"/workflows-argo",permalink:"/docs/workflows-argo",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/workflows-argo.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"workflows-argo",title:"Run Argo workflows"},sidebar:"docs",previous:{title:"Deploy Airflow",permalink:"/docs/workflows-airflow"},next:{title:"Run Nextflow workflows",permalink:"/docs/workflows-nextflow"}},p={},u=[{value:"Install the argo client",id:"install-the-argo-client",level:2},{value:"On Ubuntu",id:"on-ubuntu",level:3},{value:"On MacOS",id:"on-macos",level:3},{value:"On Windows",id:"on-windows",level:3},{value:"Test Argo",id:"test-argo",level:3},{value:"Install Argo in your project",id:"install-argo-in-your-project",level:2},{value:"Argo workflows with Helm",id:"argo-workflows-with-helm",level:3},{value:"ArgoCD Operator",id:"argocd-operator",level:3},{value:"Uninstall argo",id:"uninstall-argo",level:3},{value:"On Ubuntu",id:"on-ubuntu-1",level:4},{value:"Run workflows to convert structured data to RDF",id:"run-workflows-to-convert-structured-data-to-rdf",level:2},{value:"Clone the repository",id:"clone-the-repository",level:3},{value:"Workflow to convert XML files to RDF",id:"workflow-to-convert-xml-files-to-rdf",level:3},{value:"Workflow to convert CSV files to RDF",id:"workflow-to-convert-csv-files-to-rdf",level:3},{value:"Argo commands",id:"argo-commands",level:2},{value:"List running Argo workflows",id:"list-running-argo-workflows",level:3},{value:"Stop a workflow",id:"stop-a-workflow",level:3},{value:"Delete a workflow",id:"delete-a-workflow",level:3},{value:"Debug a workflow",id:"debug-a-workflow",level:2}],c={toc:u};function d(e){var o=e.components,t=(0,a.A)(e,l);return(0,n.yg)("wrapper",(0,r.A)({},c,t,{components:o,mdxType:"MDXLayout"}),(0,n.yg)("admonition",{title:"Install in your project",type:"warning"},(0,n.yg)("p",{parentName:"admonition"},"Argo needs to be installed in your project, ",(0,n.yg)("a",{parentName:"p",href:"mailto:dsri-support-l@maastrichtuniversity.nl"},"contact the DSRI team")," to request it.")),(0,n.yg)("h2",{id:"install-the-argo-client"},"Install the ",(0,n.yg)("inlineCode",{parentName:"h2"},"argo")," client"),(0,n.yg)("p",null,(0,n.yg)("a",{parentName:"p",href:"https://argoproj.github.io/argo/"},"Argo \ud83e\udd91")," is a container native workflow engine for ",(0,n.yg)("a",{parentName:"p",href:"https://kubernetes.io/"},"Kubernetes")," supporting both ",(0,n.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/examples/readme.html#dag"},"DAG")," and ",(0,n.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/examples/readme.html#steps"},"step based")," workflows."),(0,n.yg)("p",null,"Download and install the ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/argoproj/argo/blob/master/demo.md#1-download-argo"},"Argo client")," on your computer to ",(0,n.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/examples/readme.html"},"start workflows")," on the DSRI."),(0,n.yg)("h3",{id:"on-ubuntu"},"On Ubuntu"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"sudo curl -L -o /usr/local/bin/argo https://github.com/argoproj/argo/releases/download/v2.4.2/argo-linux-amd64\nsudo chmod +x /usr/local/bin/argo\n")),(0,n.yg)("h3",{id:"on-macos"},"On MacOS"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"brew install argoproj/tap/argo\n")),(0,n.yg)("h3",{id:"on-windows"},"On Windows"),(0,n.yg)("p",null,"Get ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/argoproj/argo/releases/download/v2.4.2/argo-windows-amd64"},"Argo executable version 2.4.2")," from ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/argoproj/argo/releases"},"Argo Releases")," on GitHub."),(0,n.yg)("blockquote",null,(0,n.yg)("p",{parentName:"blockquote"},"See ",(0,n.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/demo.html#1-download-argo"},"official Argo documentation"),".")),(0,n.yg)("h3",{id:"test-argo"},"Test Argo"),(0,n.yg)("p",null,"Run Hello world workflow to test if Argo has been properly installed. And take a look at the ",(0,n.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/examples/readme.html"},"examples provided in Argo documentation")," to discover how to use the different features available."),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo submit --watch https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml\n")),(0,n.yg)("admonition",{title:"Logged in",type:"caution"},(0,n.yg)("p",{parentName:"admonition"},"You will need to have the ",(0,n.yg)("inlineCode",{parentName:"p"},"oc")," client installed and be logged in with ",(0,n.yg)("inlineCode",{parentName:"p"},"oc login"),", see the ",(0,n.yg)("a",{parentName:"p",href:"/docs/openshift-install"},"install documentation page"),".")),(0,n.yg)("h2",{id:"install-argo-in-your-project"},"Install Argo in your project"),(0,n.yg)("h3",{id:"argo-workflows-with-helm"},"Argo workflows with Helm"),(0,n.yg)("p",null,"Deploy the ",(0,n.yg)("a",{parentName:"p",href:"https://artifacthub.io/packages/helm/argo/argo"},"Argo Helm chart"),"."),(0,n.yg)("ol",null,(0,n.yg)("li",{parentName:"ol"},"Install and use ",(0,n.yg)("a",{parentName:"li",href:"/docs/helm"},(0,n.yg)("inlineCode",{parentName:"a"},"helm"))),(0,n.yg)("li",{parentName:"ol"},"Add the Helm charts repository:")),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add argo https://argoproj.github.io/argo-helm\n")),(0,n.yg)("ol",{start:3},(0,n.yg)("li",{parentName:"ol"},"Install chart:")),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"helm install my-argo argo/argo --version 0.15.2\n")),(0,n.yg)("h3",{id:"argocd-operator"},"ArgoCD Operator"),(0,n.yg)("p",null,"Ask on the DSRI Slack ",(0,n.yg)("strong",{parentName:"p"},"#helpdesk")," channel to have the ",(0,n.yg)("a",{parentName:"p",href:"https://artifacthub.io/packages/olm/community-operators/argocd-operator"},"ArgoCD Operator")," installed in your project."),(0,n.yg)("h3",{id:"uninstall-argo"},"Uninstall ",(0,n.yg)("inlineCode",{parentName:"h3"},"argo")),(0,n.yg)("h4",{id:"on-ubuntu-1"},"On Ubuntu"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"sudo rm /usr/local/bin/argo\n")),(0,n.yg)("blockquote",null,(0,n.yg)("p",{parentName:"blockquote"},"You can now reinstall a newer version of Argo.")),(0,n.yg)("hr",null),(0,n.yg)("h2",{id:"run-workflows-to-convert-structured-data-to-rdf"},"Run workflows to convert structured data to RDF"),(0,n.yg)("p",null,"We will use examples from the ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/d2s-core"},"MaastrichtU-IDS/d2s-core")," project."),(0,n.yg)("h3",{id:"clone-the-repository"},"Clone the repository"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"git clone --recursive https://github.com/MaastrichtU-IDS/d2s-project-template.git\ncd d2s-project-template\n")),(0,n.yg)("p",null,(0,n.yg)("a",{parentName:"p",href:"/docs/openshift-install"},"Authenticate to the OpenShift cluster")," using ",(0,n.yg)("inlineCode",{parentName:"p"},"oc login")," ."),(0,n.yg)("h3",{id:"workflow-to-convert-xml-files-to-rdf"},"Workflow to convert XML files to RDF"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"Steps-based workflow for XML files, see the example workflow ",(0,n.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/d2s-core/blob/master/argo/d2s-workflow-xml.yaml"},"YAML file on GitHub"),".")),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo submit d2s-core/argo/workflows/d2s-workflow-transform-xml.yaml \\\n -f support/config/config-transform-xml-drugbank.yml\n")),(0,n.yg)("admonition",{title:"Provide config files",type:"info"},(0,n.yg)("p",{parentName:"admonition"},"Config files can be provided using the ",(0,n.yg)("inlineCode",{parentName:"p"},"-f")," arguments, but are not necessary.")),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"DAG workflow for XML files, see the ",(0,n.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/d2s-core/blob/master/argo/d2s-workflow-xml-dag.yaml"},"YAML file on GitHub"),".")),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo submit d2s-core/argo/workflows/d2s-workflow-transform-xml-dag.yaml \\\n -f support/config/config-transform-xml-drugbank.yml\n")),(0,n.yg)("h3",{id:"workflow-to-convert-csv-files-to-rdf"},"Workflow to convert CSV files to RDF"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"Steps-based workflow for CSV files")),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo submit d2s-core/argo/workflows/d2s-workflow-transform-csv.yaml \\\n -f support/config/config-transform-csv-stitch.yml\n")),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"DAG workflow for CSV files")),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo submit d2s-core/argo/workflows/d2s-workflow-transform-csv-dag.yaml \\\n -f support/config/config-transform-csv-stitch.yml\n")),(0,n.yg)("admonition",{title:"Solve issue",type:"caution"},(0,n.yg)("p",{parentName:"admonition"},"Try this to solve issue related to steps services IP: ",(0,n.yg)("inlineCode",{parentName:"p"},"{{steps.nginx-server.pod-ip}}"))),(0,n.yg)("hr",null),(0,n.yg)("h2",{id:"argo-commands"},"Argo commands"),(0,n.yg)("h3",{id:"list-running-argo-workflows"},"List running Argo workflows"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo list\n")),(0,n.yg)("h3",{id:"stop-a-workflow"},"Stop a workflow"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo terminate my-workflow\n")),(0,n.yg)("admonition",{title:"Workflow",type:"caution"},(0,n.yg)("p",{parentName:"admonition"},"This might not stop the workflow, in this case use:"),(0,n.yg)("pre",{parentName:"admonition"},(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"argo delete my-workflow\n"))),(0,n.yg)("h3",{id:"delete-a-workflow"},"Delete a workflow"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo delete my-workflow\n")),(0,n.yg)("hr",null),(0,n.yg)("h2",{id:"debug-a-workflow"},"Debug a workflow"),(0,n.yg)("p",null,"Get into a container, to understand why it bugs, by creating a YAML with the command ",(0,n.yg)("inlineCode",{parentName:"p"},"tail -f /dev/null")," to keep it hanging."),(0,n.yg)("p",null,"See the ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/d2s-core/blob/master/argo/tests/test-devnull-argo.yaml"},"example in the d2s-argo-workflow repository"),":"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-yaml"},'apiVersion: argoproj.io/v1alpha1\nkind: Workflow\nmetadata:\n generateName: test-devnull-argo-\nspec:\n entrypoint: execute-workflow\n\n # Use existing volume\n volumes:\n - name: workdir\n persistentVolumeClaim:\n claimName: pvc-mapr-projects-test-vincent \n\n templates:\n - name: execute-workflow\n steps:\n - - name: run-rdfunit\n template: rdfunit\n \n - name: rdfunit\n container:\n image: umids/rdfunit:latest\n command: [tail]\n args: ["-f", "/dev/null"]\n volumeMounts:\n - name: workdir\n mountPath: /data\n subPath: dqa-workspace\n')),(0,n.yg)("p",null,"Then start the workflow:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"argo submit --serviceaccount argo tests/test-devnull-argo.yaml\n")),(0,n.yg)("p",null,"And connect with the Shell (change the pod ID to your pod ID):"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-shell"},"oc rsh test-devnull-argo-pod\n")))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/8cf96c0d.7f6201a0.js b/assets/js/8cf96c0d.4e15a67b.js similarity index 99% rename from assets/js/8cf96c0d.7f6201a0.js rename to assets/js/8cf96c0d.4e15a67b.js index 1aa232870..ad4a14363 100644 --- a/assets/js/8cf96c0d.7f6201a0.js +++ b/assets/js/8cf96c0d.4e15a67b.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4062],{5680:(e,t,a)=>{a.d(t,{xA:()=>c,yg:()=>d});var r=a(6540);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function n(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,r)}return a}function l(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var s=r.createContext({}),p=function(e){var t=r.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):l(l({},t),e)),a},c=function(e){var t=p(e.components);return r.createElement(s.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},m=r.forwardRef((function(e,t){var a=e.components,o=e.mdxType,n=e.originalType,s=e.parentName,c=i(e,["components","mdxType","originalType","parentName"]),m=p(a),d=o,f=m["".concat(s,".").concat(d)]||m[d]||u[d]||n;return a?r.createElement(f,l(l({ref:t},c),{},{components:a})):r.createElement(f,l({ref:t},c))}));function d(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var n=a.length,l=new Array(n);l[0]=m;var i={};for(var s in t)hasOwnProperty.call(t,s)&&(i[s]=t[s]);i.originalType=e,i.mdxType="string"==typeof e?e:o,l[1]=i;for(var p=2;p{a.r(t),a.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>d,frontMatter:()=>i,metadata:()=>p,toc:()=>u});var r=a(9668),o=a(1367),n=(a(6540),a(5680)),l=["components"],i={id:"workflows-airflow",title:"Deploy Airflow"},s=void 0,p={unversionedId:"workflows-airflow",id:"workflows-airflow",title:"Deploy Airflow",description:"Deploy Apache Airflow to run workflows (aka. DAGs), hosted in a Git repository, on the DSRI.",source:"@site/docs/workflows-airflow.md",sourceDirName:".",slug:"/workflows-airflow",permalink:"/docs/workflows-airflow",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/workflows-airflow.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"workflows-airflow",title:"Deploy Airflow"},sidebar:"docs",previous:{title:"Deploy GitHub Runners",permalink:"/docs/workflows-github-actions"},next:{title:"Run Argo workflows",permalink:"/docs/workflows-argo"}},c={},u=[{value:"Install the chart",id:"install-the-chart",level:2},{value:"Deploy Airflow",id:"deploy-airflow",level:2},{value:"Example workflows",id:"example-workflows",level:2},{value:"Delete the chart",id:"delete-the-chart",level:2},{value:"See also",id:"see-also",level:2}],m={toc:u};function d(e){var t=e.components,a=(0,o.A)(e,l);return(0,n.yg)("wrapper",(0,r.A)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,n.yg)("p",null,"Deploy ",(0,n.yg)("a",{parentName:"p",href:"https://airflow.apache.org"},"Apache Airflow")," to run workflows (aka. DAGs), hosted in a Git repository, on the DSRI. "),(0,n.yg)("h2",{id:"install-the-chart"},"Install the chart"),(0,n.yg)("p",null,"You will need to have Helm installed on your computer to deploy a Helm chart, see the ",(0,n.yg)("a",{parentName:"p",href:"/docs/helm"},"Helm docs")," for more details."),(0,n.yg)("p",null,"Install the Helm chart to be able to deploy Airflow on the DSRI:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add apache-airflow https://airflow.apache.org\nhelm repo update\n")),(0,n.yg)("h2",{id:"deploy-airflow"},"Deploy Airflow"),(0,n.yg)("p",null,"You can quickly deploy Airflow on the DSRI, with DAGs automatically synchronized with your Git repository. "),(0,n.yg)("p",null,"We use a ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/blob/master/applications/airflow/values.yml"},(0,n.yg)("inlineCode",{parentName:"a"},"values.yml")," file")," with all default parameters pre-defined for the DSRI, so you just need to edit the password and git repository configuration in this command, and run it:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"helm install airflow apache-airflow/airflow \\\n -f https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/airflow/values.yml \\\n --set webserver.defaultUser.password=yourpassword \\\n --set dags.gitSync.repo=https://github.com/bio2kg/bio2kg-etl.git \\\n --set dags.gitSync.branch=main \\\n --set dags.gitSync.subPath=workflows/dags\n")),(0,n.yg)("admonition",{type:"info"},(0,n.yg)("p",{parentName:"admonition"},"If you need to do more configuration you can download the a ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/blob/master/applications/airflow/values.yml"},(0,n.yg)("inlineCode",{parentName:"a"},"values.yml")," file"),", edit it directly to your settings and use this file locally with ",(0,n.yg)("inlineCode",{parentName:"p"},"-f values.yml"))),(0,n.yg)("p",null,"A few seconds after Airflow started to install, you will need to fix the postgresql deployment in a different terminal window (unfortunately setting the ",(0,n.yg)("inlineCode",{parentName:"p"},"serviceAccount.name")," of the sub chart ",(0,n.yg)("inlineCode",{parentName:"p"},"postgresql")," don't work, even if it should be possible according to the ",(0,n.yg)("a",{parentName:"p",href:"https://helm.sh/docs/chart_template_guide/subcharts_and_globals/"},"official helm docs"),"). Run this command to fix postgresql:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch statefulset/airflow-postgresql --patch \'{"spec":{"template":{"spec": {"serviceAccountName": "anyuid"}}}}\'\n')),(0,n.yg)("p",null,"Once Airflow finished to deploy, you can access its web interface temporarily by forwarding the webserver on your machine at http://localhost:8080"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"oc port-forward svc/airflow-webserver 8080:8080\n")),(0,n.yg)("p",null,"Or permanently expose the interface on a URL accessible when logged to the UM VPN, with HTTPS enabled:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},'oc expose svc/airflow-webserver\noc patch route/airflow-webserver --patch \'{"spec":{"tls": {"termination": "edge", "insecureEdgeTerminationPolicy": "Redirect"}}}\'\n')),(0,n.yg)("p",null,"Finally, get the route to the Airflow web interface, or access it via the DSRI web UI:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"oc get routes\n")),(0,n.yg)("h2",{id:"example-workflows"},"Example workflows"),(0,n.yg)("p",null,"You can find example DAGs for bash operator, python operator and Kubernetes pod operator ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/tree/master/applications/airflow/dags"},"here"),"."),(0,n.yg)("p",null,"Here an example of a DAG using the Kubernetes pod operator to run tasks as pods, you will need to change the ",(0,n.yg)("inlineCode",{parentName:"p"},"namespace")," parameter to your DSRI project where Airflow is deployed:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-python"},"from airflow import DAG\nfrom datetime import datetime, timedelta\nfrom airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator\nfrom airflow.operators.dummy_operator import DummyOperator\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime.utcnow(),\n 'email': ['airflow@example.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5)\n}\ndag = DAG(\n 'kubernetes_pod_operator',\n default_args=default_args, \n schedule_interval=None\n # schedule_interval=timedelta(minutes=10)\n)\n\nstart = DummyOperator(task_id='run_this_first', dag=dag)\n\npassing = KubernetesPodOperator(\n namespace='CHANGEME',\n image=\"python:3.6\",\n cmds=[\"python\",\"-c\"],\n arguments=[\"print('hello world')\"],\n labels={\"app\": \"airflow\"},\n name=\"passing-test\",\n task_id=\"passing-task\",\n get_logs=True,\n dag=dag\n)\n\npassing.set_upstream(start)\n")),(0,n.yg)("h2",{id:"delete-the-chart"},"Delete the chart"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"helm uninstall airflow\n")),(0,n.yg)("h2",{id:"see-also"},"See also"),(0,n.yg)("p",null,"Here are a few links for more details on the official Airflow Helm chart:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://airflow.apache.org/docs/helm-chart/stable/index.html"},"Helm chart docs")),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://github.com/apache/airflow/tree/main/chart"},"Helm chart source code")),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://airflow.apache.org/docs/helm-chart/stable/parameters-ref.html"},"Helm chart parameters"))),(0,n.yg)("p",null,"Other ways to deploy Airflow on OpenShift:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://github.com/airflow-helm/charts/tree/main/charts/airflow"},"Community Helm chart GitHub repo")),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://github.com/CSCfi/airflow-openshift"},"Airflow template for OpenShift"))))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4062],{5680:(e,t,a)=>{a.d(t,{xA:()=>c,yg:()=>d});var r=a(6540);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function n(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,r)}return a}function l(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var s=r.createContext({}),p=function(e){var t=r.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):l(l({},t),e)),a},c=function(e){var t=p(e.components);return r.createElement(s.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},m=r.forwardRef((function(e,t){var a=e.components,o=e.mdxType,n=e.originalType,s=e.parentName,c=i(e,["components","mdxType","originalType","parentName"]),m=p(a),d=o,f=m["".concat(s,".").concat(d)]||m[d]||u[d]||n;return a?r.createElement(f,l(l({ref:t},c),{},{components:a})):r.createElement(f,l({ref:t},c))}));function d(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var n=a.length,l=new Array(n);l[0]=m;var i={};for(var s in t)hasOwnProperty.call(t,s)&&(i[s]=t[s]);i.originalType=e,i.mdxType="string"==typeof e?e:o,l[1]=i;for(var p=2;p{a.r(t),a.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>d,frontMatter:()=>i,metadata:()=>p,toc:()=>u});var r=a(9668),o=a(1367),n=(a(6540),a(5680)),l=["components"],i={id:"workflows-airflow",title:"Deploy Airflow"},s=void 0,p={unversionedId:"workflows-airflow",id:"workflows-airflow",title:"Deploy Airflow",description:"Deploy Apache Airflow to run workflows (aka. DAGs), hosted in a Git repository, on the DSRI.",source:"@site/docs/workflows-airflow.md",sourceDirName:".",slug:"/workflows-airflow",permalink:"/docs/workflows-airflow",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/workflows-airflow.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"workflows-airflow",title:"Deploy Airflow"},sidebar:"docs",previous:{title:"Deploy GitHub Runners",permalink:"/docs/workflows-github-actions"},next:{title:"Run Argo workflows",permalink:"/docs/workflows-argo"}},c={},u=[{value:"Install the chart",id:"install-the-chart",level:2},{value:"Deploy Airflow",id:"deploy-airflow",level:2},{value:"Example workflows",id:"example-workflows",level:2},{value:"Delete the chart",id:"delete-the-chart",level:2},{value:"See also",id:"see-also",level:2}],m={toc:u};function d(e){var t=e.components,a=(0,o.A)(e,l);return(0,n.yg)("wrapper",(0,r.A)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,n.yg)("p",null,"Deploy ",(0,n.yg)("a",{parentName:"p",href:"https://airflow.apache.org"},"Apache Airflow")," to run workflows (aka. DAGs), hosted in a Git repository, on the DSRI. "),(0,n.yg)("h2",{id:"install-the-chart"},"Install the chart"),(0,n.yg)("p",null,"You will need to have Helm installed on your computer to deploy a Helm chart, see the ",(0,n.yg)("a",{parentName:"p",href:"/docs/helm"},"Helm docs")," for more details."),(0,n.yg)("p",null,"Install the Helm chart to be able to deploy Airflow on the DSRI:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add apache-airflow https://airflow.apache.org\nhelm repo update\n")),(0,n.yg)("h2",{id:"deploy-airflow"},"Deploy Airflow"),(0,n.yg)("p",null,"You can quickly deploy Airflow on the DSRI, with DAGs automatically synchronized with your Git repository. "),(0,n.yg)("p",null,"We use a ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/blob/master/applications/airflow/values.yml"},(0,n.yg)("inlineCode",{parentName:"a"},"values.yml")," file")," with all default parameters pre-defined for the DSRI, so you just need to edit the password and git repository configuration in this command, and run it:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"helm install airflow apache-airflow/airflow \\\n -f https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/airflow/values.yml \\\n --set webserver.defaultUser.password=yourpassword \\\n --set dags.gitSync.repo=https://github.com/bio2kg/bio2kg-etl.git \\\n --set dags.gitSync.branch=main \\\n --set dags.gitSync.subPath=workflows/dags\n")),(0,n.yg)("admonition",{type:"info"},(0,n.yg)("p",{parentName:"admonition"},"If you need to do more configuration you can download the a ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/blob/master/applications/airflow/values.yml"},(0,n.yg)("inlineCode",{parentName:"a"},"values.yml")," file"),", edit it directly to your settings and use this file locally with ",(0,n.yg)("inlineCode",{parentName:"p"},"-f values.yml"))),(0,n.yg)("p",null,"A few seconds after Airflow started to install, you will need to fix the postgresql deployment in a different terminal window (unfortunately setting the ",(0,n.yg)("inlineCode",{parentName:"p"},"serviceAccount.name")," of the sub chart ",(0,n.yg)("inlineCode",{parentName:"p"},"postgresql")," don't work, even if it should be possible according to the ",(0,n.yg)("a",{parentName:"p",href:"https://helm.sh/docs/chart_template_guide/subcharts_and_globals/"},"official helm docs"),"). Run this command to fix postgresql:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch statefulset/airflow-postgresql --patch \'{"spec":{"template":{"spec": {"serviceAccountName": "anyuid"}}}}\'\n')),(0,n.yg)("p",null,"Once Airflow finished to deploy, you can access its web interface temporarily by forwarding the webserver on your machine at http://localhost:8080"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"oc port-forward svc/airflow-webserver 8080:8080\n")),(0,n.yg)("p",null,"Or permanently expose the interface on a URL accessible when logged to the UM VPN, with HTTPS enabled:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},'oc expose svc/airflow-webserver\noc patch route/airflow-webserver --patch \'{"spec":{"tls": {"termination": "edge", "insecureEdgeTerminationPolicy": "Redirect"}}}\'\n')),(0,n.yg)("p",null,"Finally, get the route to the Airflow web interface, or access it via the DSRI web UI:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"oc get routes\n")),(0,n.yg)("h2",{id:"example-workflows"},"Example workflows"),(0,n.yg)("p",null,"You can find example DAGs for bash operator, python operator and Kubernetes pod operator ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/tree/master/applications/airflow/dags"},"here"),"."),(0,n.yg)("p",null,"Here an example of a DAG using the Kubernetes pod operator to run tasks as pods, you will need to change the ",(0,n.yg)("inlineCode",{parentName:"p"},"namespace")," parameter to your DSRI project where Airflow is deployed:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-python"},"from airflow import DAG\nfrom datetime import datetime, timedelta\nfrom airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator\nfrom airflow.operators.dummy_operator import DummyOperator\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime.utcnow(),\n 'email': ['airflow@example.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5)\n}\ndag = DAG(\n 'kubernetes_pod_operator',\n default_args=default_args, \n schedule_interval=None\n # schedule_interval=timedelta(minutes=10)\n)\n\nstart = DummyOperator(task_id='run_this_first', dag=dag)\n\npassing = KubernetesPodOperator(\n namespace='CHANGEME',\n image=\"python:3.6\",\n cmds=[\"python\",\"-c\"],\n arguments=[\"print('hello world')\"],\n labels={\"app\": \"airflow\"},\n name=\"passing-test\",\n task_id=\"passing-task\",\n get_logs=True,\n dag=dag\n)\n\npassing.set_upstream(start)\n")),(0,n.yg)("h2",{id:"delete-the-chart"},"Delete the chart"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre",className:"language-bash"},"helm uninstall airflow\n")),(0,n.yg)("h2",{id:"see-also"},"See also"),(0,n.yg)("p",null,"Here are a few links for more details on the official Airflow Helm chart:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://airflow.apache.org/docs/helm-chart/stable/index.html"},"Helm chart docs")),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://github.com/apache/airflow/tree/main/chart"},"Helm chart source code")),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://airflow.apache.org/docs/helm-chart/stable/parameters-ref.html"},"Helm chart parameters"))),(0,n.yg)("p",null,"Other ways to deploy Airflow on OpenShift:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://github.com/airflow-helm/charts/tree/main/charts/airflow"},"Community Helm chart GitHub repo")),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://github.com/CSCfi/airflow-openshift"},"Airflow template for OpenShift"))))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/94db8302.cce39f08.js b/assets/js/94db8302.6b3b63a5.js similarity index 99% rename from assets/js/94db8302.cce39f08.js rename to assets/js/94db8302.6b3b63a5.js index 8bd371ff4..7d19453c2 100644 --- a/assets/js/94db8302.cce39f08.js +++ b/assets/js/94db8302.6b3b63a5.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[695],{5680:(e,t,r)=>{r.d(t,{xA:()=>u,yg:()=>g});var n=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var l=n.createContext({}),p=function(e){var t=n.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},u=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,l=e.parentName,u=s(e,["components","mdxType","originalType","parentName"]),d=p(r),g=a,m=d["".concat(l,".").concat(g)]||d[g]||c[g]||o;return r?n.createElement(m,i(i({ref:t},u),{},{components:r})):n.createElement(m,i({ref:t},u))}));function g(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=d;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var p=2;p{r.r(t),r.d(t,{assets:()=>u,contentTitle:()=>l,default:()=>g,frontMatter:()=>s,metadata:()=>p,toc:()=>c});var n=r(9668),a=r(1367),o=(r(6540),r(5680)),i=["components"],s={id:"neuroscience",title:"Neuroscience research"},l=void 0,p={unversionedId:"neuroscience",id:"neuroscience",title:"Neuroscience research",description:"We are not expert in Neuroscience ourselves, please contact us if you see any improvements that could be made to this page, or if you need any help to get it working.",source:"@site/docs/deploy-neurodocker.md",sourceDirName:".",slug:"/neuroscience",permalink:"/docs/neuroscience",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-neurodocker.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"neuroscience",title:"Neuroscience research"},sidebar:"docs",previous:{title:"Run MPI jobs",permalink:"/docs/mpi-jobs"},next:{title:"Genomics",permalink:"/docs/catalog-genomics"}},u={},c=[{value:"JupyterLab with FreeSurfer",id:"jupyterlab-with-freesurfer",level:2},{value:"FreeSurfer and FSL",id:"freesurfer-and-fsl",level:2},{value:"FreeSurfer and AFNI",id:"freesurfer-and-afni",level:2},{value:"Deploy the generated Dockerfile",id:"deploy-the-generated-dockerfile",level:2},{value:"Use the GPUs",id:"use-the-gpus",level:2}],d={toc:c};function g(e){var t=e.components,r=(0,a.A)(e,i);return(0,o.yg)("wrapper",(0,n.A)({},d,r,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("admonition",{title:"Feedbacks welcome",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"We are not expert in Neuroscience ourselves, please contact us if you see any improvements that could be made to this page, or if you need any help to get it working.")),(0,o.yg)("p",null,"The Neurodocker project helps you to create a Docker image with the Neuroscience softwares you need, such as FSL, FreeSurfer, AFNI or ANTs. "),(0,o.yg)("p",null,"Checkout the Neurodocker documentation for more details: ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/ReproNim/neurodocker"},"https://github.com/ReproNim/neurodocker")),(0,o.yg)("admonition",{title:"Examples",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"In this page we will show you how to generate a Docker image with popular Neuroscience research softwares installed such as FreeSurfer and FSL. Feel free to check the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/ReproNim/neurodocker"},"Neurodocker documentation"),", and adapt the installation process to your needs.")),(0,o.yg)("h2",{id:"jupyterlab-with-freesurfer"},"JupyterLab with FreeSurfer"),(0,o.yg)("p",null,"Start a JupyterLab container with Freesurfer pre-installed providing admin (",(0,o.yg)("inlineCode",{parentName:"p"},"sudo"),") privileges to install anything you need from the terminal (e.g. pip or apt packages)"),(0,o.yg)("p",null,"When instantiating the template you can provide a few parameters similar to the standard JupyterLab, such as:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Password")," to access the notebook"),(0,o.yg)("li",{parentName:"ul"},"Optionally you can provide a ",(0,o.yg)("strong",{parentName:"li"},"git repository")," to be automatically cloned in the JupyterLab (if there is a ",(0,o.yg)("inlineCode",{parentName:"li"},"requirements.txt")," packages will be automatically installed with ",(0,o.yg)("inlineCode",{parentName:"li"},"pip"),")"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Docker image")," to use for the notebook (see below for more details on customizing the docker image) "),(0,o.yg)("li",{parentName:"ul"},"Your ",(0,o.yg)("strong",{parentName:"li"},"git username and email")," to automatically configure git")),(0,o.yg)("p",null,"The DSRI will automatically create a persistent volume to store data you will put in the ",(0,o.yg)("inlineCode",{parentName:"p"},"/home/jovyan/work")," folder (the folder used by the notebook interface). You can find the persistent volumes in the DSRI web UI, go to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,o.yg)("strong",{parentName:"p"},"Storage")," > ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claims")),(0,o.yg)("img",{src:"/img/screenshot-freesurfer.png",alt:"Deploy Freesurfer",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"You can also link your git repository to the project for automatic deployment see ",(0,o.yg)("a",{parentName:"p",href:"https://dsri.maastrichtuniversity.nl/docs/deploy-jupyter#-use-git-in-jupyterlab"},"using git in JupyterLab")),(0,o.yg)("p",null,"This can also be deployed using Helm from the terminal, the steps are:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add dsri https://maastrichtu-ids.github.io/dsri-helm-charts/\nhelm repo update\nhelm install freesurfer dsri/jupyterlab \\\n --set serviceAccount.name=anyuid \\\n --set openshiftRoute.enabled=true \\\n --set image.repository=ghcr.io/maastrichtu-ids/jupyterlab \\\n --set image.tag=freesurfer \\\n --set storage.mountPath=/root \\\n --set password=changeme\noc get route --selector app.kubernetes.io/instance=freesurfer --no-headers -o=custom-columns=HOST:.spec.host\n")),(0,o.yg)("p",null,"Log in to the corresponding jupyter notebook and start the terminal, then enter ",(0,o.yg)("inlineCode",{parentName:"p"},"freesurfer")," as a command"),(0,o.yg)("h2",{id:"freesurfer-and-fsl"},"FreeSurfer and FSL"),(0,o.yg)("p",null,"Generate a ",(0,o.yg)("inlineCode",{parentName:"p"},"Dockerfile")," with:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"FreeSurfer 6.0.1"),(0,o.yg)("li",{parentName:"ul"},"FSL 6.0.3")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"docker run --rm repronim/neurodocker:0.7.0 generate docker \\\n --base debian:stretch --pkg-manager apt \\\n --freesurfer version=6.0.1 --fsl version=6.0.3 > Dockerfile\n")),(0,o.yg)("h2",{id:"freesurfer-and-afni"},"FreeSurfer and AFNI"),(0,o.yg)("p",null,"Generate a ",(0,o.yg)("inlineCode",{parentName:"p"},"Dockerfile")," with:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"FreeSurfer 6.0.1"),(0,o.yg)("li",{parentName:"ul"},"AFNI, R and Python3")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"docker run --rm repronim/neurodocker:0.7.0 generate docker \\\n --base debian:stretch --pkg-manager apt \\\n --afni version=latest install_r=true install_r_pkgs=true install_python3=true \\\n --freesurfer version=6.0.1 > Dockerfile\n")),(0,o.yg)("h2",{id:"deploy-the-generated-dockerfile"},"Deploy the generated Dockerfile"),(0,o.yg)("p",null,"Before deploying the ",(0,o.yg)("inlineCode",{parentName:"p"},"Dockerfile")," to the DSRI you can open it, and add commands to install additional package you are interested in, such as nighres or nipype."),(0,o.yg)("p",null,"Checkout the documentation to ",(0,o.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/guide-dockerfile-to-openshift"},"deploy the ",(0,o.yg)("inlineCode",{parentName:"a"},"Dockerfile")," on DSRI"),". "),(0,o.yg)("admonition",{title:"UI with VNC",type:"note"},(0,o.yg)("p",{parentName:"admonition"},"Running a UI with VNC (e.g. FSLeyes) is still a work in progress. See ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/ReproNim/neurodocker/issues/343"},"this issue for more details"),".")),(0,o.yg)("h2",{id:"use-the-gpus"},"Use the GPUs"),(0,o.yg)("p",null,"More details about using GPU with FSL: ",(0,o.yg)("a",{parentName:"p",href:"https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/GPU"},"https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/GPU")))}g.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[695],{5680:(e,t,r)=>{r.d(t,{xA:()=>u,yg:()=>g});var n=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var l=n.createContext({}),p=function(e){var t=n.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},u=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,l=e.parentName,u=s(e,["components","mdxType","originalType","parentName"]),d=p(r),g=a,m=d["".concat(l,".").concat(g)]||d[g]||c[g]||o;return r?n.createElement(m,i(i({ref:t},u),{},{components:r})):n.createElement(m,i({ref:t},u))}));function g(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=d;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var p=2;p{r.r(t),r.d(t,{assets:()=>u,contentTitle:()=>l,default:()=>g,frontMatter:()=>s,metadata:()=>p,toc:()=>c});var n=r(9668),a=r(1367),o=(r(6540),r(5680)),i=["components"],s={id:"neuroscience",title:"Neuroscience research"},l=void 0,p={unversionedId:"neuroscience",id:"neuroscience",title:"Neuroscience research",description:"We are not expert in Neuroscience ourselves, please contact us if you see any improvements that could be made to this page, or if you need any help to get it working.",source:"@site/docs/deploy-neurodocker.md",sourceDirName:".",slug:"/neuroscience",permalink:"/docs/neuroscience",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-neurodocker.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"neuroscience",title:"Neuroscience research"},sidebar:"docs",previous:{title:"Run MPI jobs",permalink:"/docs/mpi-jobs"},next:{title:"Genomics",permalink:"/docs/catalog-genomics"}},u={},c=[{value:"JupyterLab with FreeSurfer",id:"jupyterlab-with-freesurfer",level:2},{value:"FreeSurfer and FSL",id:"freesurfer-and-fsl",level:2},{value:"FreeSurfer and AFNI",id:"freesurfer-and-afni",level:2},{value:"Deploy the generated Dockerfile",id:"deploy-the-generated-dockerfile",level:2},{value:"Use the GPUs",id:"use-the-gpus",level:2}],d={toc:c};function g(e){var t=e.components,r=(0,a.A)(e,i);return(0,o.yg)("wrapper",(0,n.A)({},d,r,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("admonition",{title:"Feedbacks welcome",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"We are not expert in Neuroscience ourselves, please contact us if you see any improvements that could be made to this page, or if you need any help to get it working.")),(0,o.yg)("p",null,"The Neurodocker project helps you to create a Docker image with the Neuroscience softwares you need, such as FSL, FreeSurfer, AFNI or ANTs. "),(0,o.yg)("p",null,"Checkout the Neurodocker documentation for more details: ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/ReproNim/neurodocker"},"https://github.com/ReproNim/neurodocker")),(0,o.yg)("admonition",{title:"Examples",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"In this page we will show you how to generate a Docker image with popular Neuroscience research softwares installed such as FreeSurfer and FSL. Feel free to check the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/ReproNim/neurodocker"},"Neurodocker documentation"),", and adapt the installation process to your needs.")),(0,o.yg)("h2",{id:"jupyterlab-with-freesurfer"},"JupyterLab with FreeSurfer"),(0,o.yg)("p",null,"Start a JupyterLab container with Freesurfer pre-installed providing admin (",(0,o.yg)("inlineCode",{parentName:"p"},"sudo"),") privileges to install anything you need from the terminal (e.g. pip or apt packages)"),(0,o.yg)("p",null,"When instantiating the template you can provide a few parameters similar to the standard JupyterLab, such as:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Password")," to access the notebook"),(0,o.yg)("li",{parentName:"ul"},"Optionally you can provide a ",(0,o.yg)("strong",{parentName:"li"},"git repository")," to be automatically cloned in the JupyterLab (if there is a ",(0,o.yg)("inlineCode",{parentName:"li"},"requirements.txt")," packages will be automatically installed with ",(0,o.yg)("inlineCode",{parentName:"li"},"pip"),")"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Docker image")," to use for the notebook (see below for more details on customizing the docker image) "),(0,o.yg)("li",{parentName:"ul"},"Your ",(0,o.yg)("strong",{parentName:"li"},"git username and email")," to automatically configure git")),(0,o.yg)("p",null,"The DSRI will automatically create a persistent volume to store data you will put in the ",(0,o.yg)("inlineCode",{parentName:"p"},"/home/jovyan/work")," folder (the folder used by the notebook interface). You can find the persistent volumes in the DSRI web UI, go to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,o.yg)("strong",{parentName:"p"},"Storage")," > ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claims")),(0,o.yg)("img",{src:"/img/screenshot-freesurfer.png",alt:"Deploy Freesurfer",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"You can also link your git repository to the project for automatic deployment see ",(0,o.yg)("a",{parentName:"p",href:"https://dsri.maastrichtuniversity.nl/docs/deploy-jupyter#-use-git-in-jupyterlab"},"using git in JupyterLab")),(0,o.yg)("p",null,"This can also be deployed using Helm from the terminal, the steps are:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"helm repo add dsri https://maastrichtu-ids.github.io/dsri-helm-charts/\nhelm repo update\nhelm install freesurfer dsri/jupyterlab \\\n --set serviceAccount.name=anyuid \\\n --set openshiftRoute.enabled=true \\\n --set image.repository=ghcr.io/maastrichtu-ids/jupyterlab \\\n --set image.tag=freesurfer \\\n --set storage.mountPath=/root \\\n --set password=changeme\noc get route --selector app.kubernetes.io/instance=freesurfer --no-headers -o=custom-columns=HOST:.spec.host\n")),(0,o.yg)("p",null,"Log in to the corresponding jupyter notebook and start the terminal, then enter ",(0,o.yg)("inlineCode",{parentName:"p"},"freesurfer")," as a command"),(0,o.yg)("h2",{id:"freesurfer-and-fsl"},"FreeSurfer and FSL"),(0,o.yg)("p",null,"Generate a ",(0,o.yg)("inlineCode",{parentName:"p"},"Dockerfile")," with:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"FreeSurfer 6.0.1"),(0,o.yg)("li",{parentName:"ul"},"FSL 6.0.3")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"docker run --rm repronim/neurodocker:0.7.0 generate docker \\\n --base debian:stretch --pkg-manager apt \\\n --freesurfer version=6.0.1 --fsl version=6.0.3 > Dockerfile\n")),(0,o.yg)("h2",{id:"freesurfer-and-afni"},"FreeSurfer and AFNI"),(0,o.yg)("p",null,"Generate a ",(0,o.yg)("inlineCode",{parentName:"p"},"Dockerfile")," with:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"FreeSurfer 6.0.1"),(0,o.yg)("li",{parentName:"ul"},"AFNI, R and Python3")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"docker run --rm repronim/neurodocker:0.7.0 generate docker \\\n --base debian:stretch --pkg-manager apt \\\n --afni version=latest install_r=true install_r_pkgs=true install_python3=true \\\n --freesurfer version=6.0.1 > Dockerfile\n")),(0,o.yg)("h2",{id:"deploy-the-generated-dockerfile"},"Deploy the generated Dockerfile"),(0,o.yg)("p",null,"Before deploying the ",(0,o.yg)("inlineCode",{parentName:"p"},"Dockerfile")," to the DSRI you can open it, and add commands to install additional package you are interested in, such as nighres or nipype."),(0,o.yg)("p",null,"Checkout the documentation to ",(0,o.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/guide-dockerfile-to-openshift"},"deploy the ",(0,o.yg)("inlineCode",{parentName:"a"},"Dockerfile")," on DSRI"),". "),(0,o.yg)("admonition",{title:"UI with VNC",type:"note"},(0,o.yg)("p",{parentName:"admonition"},"Running a UI with VNC (e.g. FSLeyes) is still a work in progress. See ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/ReproNim/neurodocker/issues/343"},"this issue for more details"),".")),(0,o.yg)("h2",{id:"use-the-gpus"},"Use the GPUs"),(0,o.yg)("p",null,"More details about using GPU with FSL: ",(0,o.yg)("a",{parentName:"p",href:"https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/GPU"},"https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/GPU")))}g.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/97d64f0a.285b90ae.js b/assets/js/97d64f0a.9ca2404d.js similarity index 99% rename from assets/js/97d64f0a.285b90ae.js rename to assets/js/97d64f0a.9ca2404d.js index 13af3d6bb..f9a8a2f6d 100644 --- a/assets/js/97d64f0a.285b90ae.js +++ b/assets/js/97d64f0a.9ca2404d.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6970],{5680:(e,t,a)=>{a.d(t,{xA:()=>c,yg:()=>d});var n=a(6540);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function r(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var l=n.createContext({}),p=function(e){var t=n.useContext(l),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},c=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},y=n.forwardRef((function(e,t){var a=e.components,o=e.mdxType,r=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),y=p(a),d=o,h=y["".concat(l,".").concat(d)]||y[d]||u[d]||r;return a?n.createElement(h,i(i({ref:t},c),{},{components:a})):n.createElement(h,i({ref:t},c))}));function d(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var r=a.length,i=new Array(r);i[0]=y;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:o,i[1]=s;for(var p=2;p{a.r(t),a.d(t,{assets:()=>c,contentTitle:()=>l,default:()=>d,frontMatter:()=>s,metadata:()=>p,toc:()=>u});var n=a(9668),o=a(1367),r=(a(6540),a(5680)),i=["components"],s={id:"start-workspace",title:"Start your workspace"},l=void 0,p={unversionedId:"start-workspace",id:"start-workspace",title:"Start your workspace",description:"This page will help you to start a workspace to run your code and experiments on the DSRI in a container.",source:"@site/docs/start-workspace.md",sourceDirName:".",slug:"/start-workspace",permalink:"/docs/start-workspace",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/start-workspace.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"start-workspace",title:"Start your workspace"},sidebar:"docs",previous:{title:"Access the DSRI",permalink:"/docs/access-dsri"},next:{title:"Install the client",permalink:"/docs/openshift-install"}},c={},u=[{value:"Introduction to containers",id:"introduction-to-containers",level:2},{value:"Choose your interface",id:"choose-your-interface",level:2},{value:"Start your workspace",id:"start-your-workspace",level:2},{value:"Upload your code and data",id:"upload-your-code-and-data",level:2},{value:"Install your dependencies",id:"install-your-dependencies",level:2},{value:"Run your code",id:"run-your-code",level:2},{value:"Stop your application",id:"stop-your-application",level:2},{value:"Start your application",id:"start-your-application",level:2},{value:"Optional: define a docker image",id:"optional-define-a-docker-image",level:2}],y={toc:u};function d(e){var t=e.components,a=(0,o.A)(e,i);return(0,r.yg)("wrapper",(0,n.A)({},y,a,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("p",null,"This page will help you to start a workspace to run your code and experiments on the DSRI in a container. "),(0,r.yg)("h2",{id:"introduction-to-containers"},"Introduction to containers"),(0,r.yg)("p",null,"Anything running in DSRI needs to be running in a docker container. Docker containers are namespaces that share the kernel on a linux system, you can see them as a clean minimalist Linux computers with only what you need to run your programs installed. This allows you to completely control the environment where your code runs, and avoid conflicts."),(0,r.yg)("p",null,"When running experiments we can start from existing images that have been already published for popular data science applications with a web interface. You can use, for example, JupyterLab when running python, RStudio when running R, or VisualStudio Code if you prefer."),(0,r.yg)("p",null,"Once you access a running container, you can install anything you need like if it was a linux/ubuntu computer (most of them runs with admin privileges), and run anything via the notebook/RStudio/VSCode interface, or the terminal."),(0,r.yg)("h2",{id:"choose-your-interface"},"Choose your interface"),(0,r.yg)("p",null,"First step to get your code running on the DSRI is to pick the base interface you want to use to access your workspace on the DSRI."),(0,r.yg)("p",null,"We prepared generic Docker images for data science workspaces with your favorite web UI pre-installed to easily deploy your workspace. So you just need to choose your favorite workspace, start the container, access it, add your code, and install your dependencies."),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Login to the DSRI dashboard"),(0,r.yg)("li",{parentName:"ol"},"Select your project, or create one with a meaningful short name representing your project, e.g. ",(0,r.yg)("inlineCode",{parentName:"li"},"workspace-yourname")),(0,r.yg)("li",{parentName:"ol"},"Go to the ",(0,r.yg)("strong",{parentName:"li"},"+Add")," page, and select to add ",(0,r.yg)("strong",{parentName:"li"},"From Developer Catalog => All services"))),(0,r.yg)("img",{src:"/img/screenshot_access_catalog_manu17112024.png",alt:"Access catalog",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("ol",{start:4},(0,r.yg)("li",{parentName:"ol"},"Search for templates corresponding to the application you want to deploy among the one described below (make sure the filter for templates is properly checked). ")),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"JupyterLab"),": Access and run your code using the popular Jupyter notebooks, with kernel for python, java, R, julia. It also provides a good web interface to access the terminal, upload and browse the files."),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"VisualStudio Code"),": Your daily IDE, but in your browser, running on the DSRI. "),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"RStudio"),": R users favorite's."),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"The terminal"),": For people who are used to the terminal and just want to run scripts, it provides smaller and more stable images, which makes installation and deployment easier. You can use the ",(0,r.yg)("strong",{parentName:"p"},"Ubuntu")," template to start a basic ubuntu image and access it from the terminal."),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"Any web interface"),": You can easily run and access most programs with a web interface on the DSRI. You can use the template ",(0,r.yg)("strong",{parentName:"p"},"Custom workspace")," if your application is exposed on port 8888. Otherwise visit the page ",(0,r.yg)("a",{parentName:"p",href:"/docs/anatomy-of-an-application"},"Anatomy of a DSRI application")," for more details. "),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"Desktop interface"),": there is the possibility to start a container as a Linux operating system with a graphical desktop interface. It can be useful to deploy software like Matlab, but the setup can be a bit more complex. You will get an Ubuntu computer with a basic Desktop interface, running on the DSRI, that you can access directly in your web browser. The desktop interface is accessed through a web application by using noVNC, which exposes the VNC connection without needing a VNC client."),(0,r.yg)("admonition",{title:"More applications",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"You can also find more documentation on the different applications that can be deployed from the DSRI under ",(0,r.yg)("strong",{parentName:"p"},"Deploy applications")," in the menu on the left.")),(0,r.yg)("h2",{id:"start-your-workspace"},"Start your workspace"),(0,r.yg)("p",null,"Once you chose your favorite way to run your experiments, you can click on the application you want to use for your workspace. Checkout the description to learn more details about the application that will be deployed. "),(0,r.yg)("p",null,"Then click on ",(0,r.yg)("strong",{parentName:"p"},"Instantiate Template"),", and fill the parameters, such as the password to access the web UI. Note that the application name needs to be unique in the project. Finally click on the ",(0,r.yg)("strong",{parentName:"p"},"Create")," button."),(0,r.yg)("img",{src:"/img/screenshot_template_configuration.png",alt:"Filter templates catalog",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"You should see your application in your project dashboard, it can take a few seconds to a few minutes to pull the docker image and start the application. "),(0,r.yg)("p",null,"Once the application has started you will be able to access it by clicking on its circle, then click the ",(0,r.yg)("strong",{parentName:"p"},"Route"),", that has been automatically generated for the web interface, in the Resources tab."),(0,r.yg)("admonition",{title:"Check the workshop",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"For a more detailed tutorial, you can follow the ",(0,r.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-workshop-start-app/"},"workshop to start Data Science applications on the DSRI"))),(0,r.yg)("h2",{id:"upload-your-code-and-data"},"Upload your code and data"),(0,r.yg)("p",null,"We recommend you to use ",(0,r.yg)("inlineCode",{parentName:"p"},"git")," to clone your project code in your workspace, as it helps sharing and managing the evolution of your project. "),(0,r.yg)("p",null,"It will be preinstalled in most images, otherwise you can install it easily with ",(0,r.yg)("inlineCode",{parentName:"p"},"apt-get install git")),(0,r.yg)("p",null,"With web interface like JupyterLab, VisualStudio Code and Rstudio you can easily upload small and medium size files directly through the UI with a drag and drop."),(0,r.yg)("p",null,"Otherwise you can use the terminal, install the ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," client, and use the ",(0,r.yg)("inlineCode",{parentName:"p"},"oc cp")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"oc rsync")," commands to upload large files to your workspace on the DSRI. See the Upload data page for more details."),(0,r.yg)("h2",{id:"install-your-dependencies"},"Install your dependencies"),(0,r.yg)("p",null,"Once the workspace is started, you can install the different dependencies you need to run your experiments."),(0,r.yg)("p",null,"It is recommended to save all the commands you used to install the different requirements in a script (e.g. ",(0,r.yg)("inlineCode",{parentName:"p"},"install.sh"),"). This will insure you can reinstall the environment easily and faithfully if the container is restarted. You can also use them to create a Docker image with everything prepared for your application."),(0,r.yg)("p",null,"Most containers for science are based on debian/ubuntu, so you can install new packages with ",(0,r.yg)("inlineCode",{parentName:"p"},"apt-get"),":"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"apt-get update\napt-get install -y build-essentials wget curl\n")),(0,r.yg)("h2",{id:"run-your-code"},"Run your code"),(0,r.yg)("p",null,"You can use your web interface to run your code as you like to do: notebooks, rstudio, execute via VSCode"),(0,r.yg)("p",null,"Note that for jobs which are running for a long time the web UI is not always the best solution, e.g. Jupyter notebooks can be quite instable when running a 30 min codeblock."),(0,r.yg)("p",null,"A quick solution for that is to run your code in scripts, using the bash terminal. You can use the ",(0,r.yg)("inlineCode",{parentName:"p"},"nohup")," prefix, and ",(0,r.yg)("inlineCode",{parentName:"p"},"&")," suffix to run your script in the background, so that you can even disconnect, and come back later to check the results and logs."),(0,r.yg)("p",null,"For example with a python script, you would do:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"nohup python my_script.py &\n")),(0,r.yg)("p",null,"The script will run in the background, and all terminal output will be stored in the file ",(0,r.yg)("inlineCode",{parentName:"p"},"nohup.out")),(0,r.yg)("p",null,"You can also check if the process is currently running by typing ",(0,r.yg)("inlineCode",{parentName:"p"},"ps aux")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"top")," "),(0,r.yg)("p",null,"You can kill the process by getting the process ID (PID) using the previous commands, and then: ",(0,r.yg)("inlineCode",{parentName:"p"},"kill -9 PID")),(0,r.yg)("h2",{id:"stop-your-application"},"Stop your application"),(0,r.yg)("p",null,"When you are not using your application anymore you can stop the pod. If you are using a Dynamic or Persistent storage you can restart the pod and continue working with all your data in the same state as you left it."),(0,r.yg)("admonition",{title:"Do not waste resources",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"Please think of stopping applications you are not using to avoid consuming unnecessary resources.")),(0,r.yg)("p",null,"On the ",(0,r.yg)("strong",{parentName:"p"},"Topology")," page click on the down arrow \u2b07\ufe0f next to the number of pods deployed."),(0,r.yg)("img",{src:"/img/screenshot_scaledown_pod.png",alt:"Scale down pod",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"You can then restart the pod by clicking the up arrow \u2b06\ufe0f"),(0,r.yg)("p",null,"Note that starting more than 1 pod will not increase the amount of resources you have access to, most of the time it will only waste resources and might ends up in weird behavior on your side. The web UI will randomly assign you to 1 of the pod started when you access it. This only works for clusters with multiple workers, such as Apache Flink and Spark. Or if you connect directly to each pod with the terminal to run different processes."),(0,r.yg)("h2",{id:"start-your-application"},"Start your application"),(0,r.yg)("p",null,"When you try to access your workspace and you encounter the page below, usually this indicates that your pod is not running. For example, this will be the case if you stopped your pod, or if there was maintenance on the cluster."),(0,r.yg)("img",{src:"/img/screenshot_application_unavailable.png",alt:"Screenshot of page that says Application is not available",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"To start the pod, go to the ",(0,r.yg)("strong",{parentName:"p"},"Topology")," page, and click on the up arrow \u2b06\ufe0f next to the number of pods deployed. Make sure you scale it to 1. Scaling it to more than 1 will not increase the amount of resources you have access to, most of the time it will only waste resources and causes weird behavior on your side."),(0,r.yg)("img",{src:"/img/screenshot_scaledown_pod.png",alt:"Scale down pod",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("admonition",{title:"Do not waste resources",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"Please only scale up resources you're using, and scale down when you're not using them anymore. Consuming resources consumes unnecessary power and might prevent other users from using the DSRI.")),(0,r.yg)("h2",{id:"optional-define-a-docker-image"},"Optional: define a docker image"),(0,r.yg)("p",null,"Once you have tested your workspace and you know how to set it up it can be helpful to define a ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile")," to build and publish a Docker image with everything directly installed (instead of installing your requirements after starting a generic workspace)"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Start from an existing generic Docker image, depending on the base technologies you need, such as Debian, Ubuntu, Python, JupyterLab, VisualStudio Code, RStudio..."),(0,r.yg)("li",{parentName:"ol"},"Add your source code in the Docker image using ",(0,r.yg)("inlineCode",{parentName:"li"},"ADD . .")," or ",(0,r.yg)("inlineCode",{parentName:"li"},"COPY . .")),(0,r.yg)("li",{parentName:"ol"},"Install dependencies (e.g. ",(0,r.yg)("inlineCode",{parentName:"li"},"RUN apt-get install gfortran"),")"),(0,r.yg)("li",{parentName:"ol"},"Define which command to run when starting the container (e.g. ",(0,r.yg)("inlineCode",{parentName:"li"},'ENTRYPOINT["jupyter", "lab"]'),")")),(0,r.yg)("p",null,"Here is a simple example ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile")," for a python application:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-dockerfile"},'# The base image to start from, choose the one with everything you need installed\nFROM python:3.8\n\n# Change the user and working directory to make sure we are using root\nUSER root\nWORKDIR /root\n\n# Install additional packages\nRUN apt-get update && \\\n apt-get install build-essentials\n\n# This line will copy all files and folder that are in the same folder as the Dockerfile (usually the code you want to run in the container)\nADD . . \n\n# This line will install all the python packages described in the requirements.txt of your source code\nRUN pip install -r requirements.txt && \\\n pip install notebook jupyterlab\n\n# Command to run when the container is started, here it starts JupyterLab as a service\nENTRYPOINT [ "jupyter", "lab" ]\n')),(0,r.yg)("p",null,"Here are some examples of ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile")," for various type of web applications:"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/jupyterlab/blob/main/Dockerfile"},"Custom JupyterLab")," based on the official ",(0,r.yg)("a",{parentName:"li",href:"https://github.com/jupyter/docker-stacks"},"jupyter/docker-stacks")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/rstudio/blob/main/Dockerfile"},"Custom RStudio")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/code-server/blob/main/Dockerfile"},"VisualStudio Code server")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/knowledge-collaboratory-api/blob/master/Dockerfile"},"Python web app"))),(0,r.yg)("p",null,"See the guide to ",(0,r.yg)("a",{parentName:"p",href:"/docs/guide-publish-image"},"Publish a Docker image")," for more details on this topic."))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6970],{5680:(e,t,a)=>{a.d(t,{xA:()=>c,yg:()=>d});var n=a(6540);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function r(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var l=n.createContext({}),p=function(e){var t=n.useContext(l),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},c=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},y=n.forwardRef((function(e,t){var a=e.components,o=e.mdxType,r=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),y=p(a),d=o,h=y["".concat(l,".").concat(d)]||y[d]||u[d]||r;return a?n.createElement(h,i(i({ref:t},c),{},{components:a})):n.createElement(h,i({ref:t},c))}));function d(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var r=a.length,i=new Array(r);i[0]=y;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:o,i[1]=s;for(var p=2;p{a.r(t),a.d(t,{assets:()=>c,contentTitle:()=>l,default:()=>d,frontMatter:()=>s,metadata:()=>p,toc:()=>u});var n=a(9668),o=a(1367),r=(a(6540),a(5680)),i=["components"],s={id:"start-workspace",title:"Start your workspace"},l=void 0,p={unversionedId:"start-workspace",id:"start-workspace",title:"Start your workspace",description:"This page will help you to start a workspace to run your code and experiments on the DSRI in a container.",source:"@site/docs/start-workspace.md",sourceDirName:".",slug:"/start-workspace",permalink:"/docs/start-workspace",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/start-workspace.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"start-workspace",title:"Start your workspace"},sidebar:"docs",previous:{title:"Access the DSRI",permalink:"/docs/access-dsri"},next:{title:"Install the client",permalink:"/docs/openshift-install"}},c={},u=[{value:"Introduction to containers",id:"introduction-to-containers",level:2},{value:"Choose your interface",id:"choose-your-interface",level:2},{value:"Start your workspace",id:"start-your-workspace",level:2},{value:"Upload your code and data",id:"upload-your-code-and-data",level:2},{value:"Install your dependencies",id:"install-your-dependencies",level:2},{value:"Run your code",id:"run-your-code",level:2},{value:"Stop your application",id:"stop-your-application",level:2},{value:"Start your application",id:"start-your-application",level:2},{value:"Optional: define a docker image",id:"optional-define-a-docker-image",level:2}],y={toc:u};function d(e){var t=e.components,a=(0,o.A)(e,i);return(0,r.yg)("wrapper",(0,n.A)({},y,a,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("p",null,"This page will help you to start a workspace to run your code and experiments on the DSRI in a container. "),(0,r.yg)("h2",{id:"introduction-to-containers"},"Introduction to containers"),(0,r.yg)("p",null,"Anything running in DSRI needs to be running in a docker container. Docker containers are namespaces that share the kernel on a linux system, you can see them as a clean minimalist Linux computers with only what you need to run your programs installed. This allows you to completely control the environment where your code runs, and avoid conflicts."),(0,r.yg)("p",null,"When running experiments we can start from existing images that have been already published for popular data science applications with a web interface. You can use, for example, JupyterLab when running python, RStudio when running R, or VisualStudio Code if you prefer."),(0,r.yg)("p",null,"Once you access a running container, you can install anything you need like if it was a linux/ubuntu computer (most of them runs with admin privileges), and run anything via the notebook/RStudio/VSCode interface, or the terminal."),(0,r.yg)("h2",{id:"choose-your-interface"},"Choose your interface"),(0,r.yg)("p",null,"First step to get your code running on the DSRI is to pick the base interface you want to use to access your workspace on the DSRI."),(0,r.yg)("p",null,"We prepared generic Docker images for data science workspaces with your favorite web UI pre-installed to easily deploy your workspace. So you just need to choose your favorite workspace, start the container, access it, add your code, and install your dependencies."),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Login to the DSRI dashboard"),(0,r.yg)("li",{parentName:"ol"},"Select your project, or create one with a meaningful short name representing your project, e.g. ",(0,r.yg)("inlineCode",{parentName:"li"},"workspace-yourname")),(0,r.yg)("li",{parentName:"ol"},"Go to the ",(0,r.yg)("strong",{parentName:"li"},"+Add")," page, and select to add ",(0,r.yg)("strong",{parentName:"li"},"From Developer Catalog => All services"))),(0,r.yg)("img",{src:"/img/screenshot_access_catalog_manu17112024.png",alt:"Access catalog",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("ol",{start:4},(0,r.yg)("li",{parentName:"ol"},"Search for templates corresponding to the application you want to deploy among the one described below (make sure the filter for templates is properly checked). ")),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"JupyterLab"),": Access and run your code using the popular Jupyter notebooks, with kernel for python, java, R, julia. It also provides a good web interface to access the terminal, upload and browse the files."),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"VisualStudio Code"),": Your daily IDE, but in your browser, running on the DSRI. "),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"RStudio"),": R users favorite's."),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"The terminal"),": For people who are used to the terminal and just want to run scripts, it provides smaller and more stable images, which makes installation and deployment easier. You can use the ",(0,r.yg)("strong",{parentName:"p"},"Ubuntu")," template to start a basic ubuntu image and access it from the terminal."),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"Any web interface"),": You can easily run and access most programs with a web interface on the DSRI. You can use the template ",(0,r.yg)("strong",{parentName:"p"},"Custom workspace")," if your application is exposed on port 8888. Otherwise visit the page ",(0,r.yg)("a",{parentName:"p",href:"/docs/anatomy-of-an-application"},"Anatomy of a DSRI application")," for more details. "),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"Desktop interface"),": there is the possibility to start a container as a Linux operating system with a graphical desktop interface. It can be useful to deploy software like Matlab, but the setup can be a bit more complex. You will get an Ubuntu computer with a basic Desktop interface, running on the DSRI, that you can access directly in your web browser. The desktop interface is accessed through a web application by using noVNC, which exposes the VNC connection without needing a VNC client."),(0,r.yg)("admonition",{title:"More applications",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"You can also find more documentation on the different applications that can be deployed from the DSRI under ",(0,r.yg)("strong",{parentName:"p"},"Deploy applications")," in the menu on the left.")),(0,r.yg)("h2",{id:"start-your-workspace"},"Start your workspace"),(0,r.yg)("p",null,"Once you chose your favorite way to run your experiments, you can click on the application you want to use for your workspace. Checkout the description to learn more details about the application that will be deployed. "),(0,r.yg)("p",null,"Then click on ",(0,r.yg)("strong",{parentName:"p"},"Instantiate Template"),", and fill the parameters, such as the password to access the web UI. Note that the application name needs to be unique in the project. Finally click on the ",(0,r.yg)("strong",{parentName:"p"},"Create")," button."),(0,r.yg)("img",{src:"/img/screenshot_template_configuration.png",alt:"Filter templates catalog",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"You should see your application in your project dashboard, it can take a few seconds to a few minutes to pull the docker image and start the application. "),(0,r.yg)("p",null,"Once the application has started you will be able to access it by clicking on its circle, then click the ",(0,r.yg)("strong",{parentName:"p"},"Route"),", that has been automatically generated for the web interface, in the Resources tab."),(0,r.yg)("admonition",{title:"Check the workshop",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"For a more detailed tutorial, you can follow the ",(0,r.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-workshop-start-app/"},"workshop to start Data Science applications on the DSRI"))),(0,r.yg)("h2",{id:"upload-your-code-and-data"},"Upload your code and data"),(0,r.yg)("p",null,"We recommend you to use ",(0,r.yg)("inlineCode",{parentName:"p"},"git")," to clone your project code in your workspace, as it helps sharing and managing the evolution of your project. "),(0,r.yg)("p",null,"It will be preinstalled in most images, otherwise you can install it easily with ",(0,r.yg)("inlineCode",{parentName:"p"},"apt-get install git")),(0,r.yg)("p",null,"With web interface like JupyterLab, VisualStudio Code and Rstudio you can easily upload small and medium size files directly through the UI with a drag and drop."),(0,r.yg)("p",null,"Otherwise you can use the terminal, install the ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," client, and use the ",(0,r.yg)("inlineCode",{parentName:"p"},"oc cp")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"oc rsync")," commands to upload large files to your workspace on the DSRI. See the Upload data page for more details."),(0,r.yg)("h2",{id:"install-your-dependencies"},"Install your dependencies"),(0,r.yg)("p",null,"Once the workspace is started, you can install the different dependencies you need to run your experiments."),(0,r.yg)("p",null,"It is recommended to save all the commands you used to install the different requirements in a script (e.g. ",(0,r.yg)("inlineCode",{parentName:"p"},"install.sh"),"). This will insure you can reinstall the environment easily and faithfully if the container is restarted. You can also use them to create a Docker image with everything prepared for your application."),(0,r.yg)("p",null,"Most containers for science are based on debian/ubuntu, so you can install new packages with ",(0,r.yg)("inlineCode",{parentName:"p"},"apt-get"),":"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"apt-get update\napt-get install -y build-essentials wget curl\n")),(0,r.yg)("h2",{id:"run-your-code"},"Run your code"),(0,r.yg)("p",null,"You can use your web interface to run your code as you like to do: notebooks, rstudio, execute via VSCode"),(0,r.yg)("p",null,"Note that for jobs which are running for a long time the web UI is not always the best solution, e.g. Jupyter notebooks can be quite instable when running a 30 min codeblock."),(0,r.yg)("p",null,"A quick solution for that is to run your code in scripts, using the bash terminal. You can use the ",(0,r.yg)("inlineCode",{parentName:"p"},"nohup")," prefix, and ",(0,r.yg)("inlineCode",{parentName:"p"},"&")," suffix to run your script in the background, so that you can even disconnect, and come back later to check the results and logs."),(0,r.yg)("p",null,"For example with a python script, you would do:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"nohup python my_script.py &\n")),(0,r.yg)("p",null,"The script will run in the background, and all terminal output will be stored in the file ",(0,r.yg)("inlineCode",{parentName:"p"},"nohup.out")),(0,r.yg)("p",null,"You can also check if the process is currently running by typing ",(0,r.yg)("inlineCode",{parentName:"p"},"ps aux")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"top")," "),(0,r.yg)("p",null,"You can kill the process by getting the process ID (PID) using the previous commands, and then: ",(0,r.yg)("inlineCode",{parentName:"p"},"kill -9 PID")),(0,r.yg)("h2",{id:"stop-your-application"},"Stop your application"),(0,r.yg)("p",null,"When you are not using your application anymore you can stop the pod. If you are using a Dynamic or Persistent storage you can restart the pod and continue working with all your data in the same state as you left it."),(0,r.yg)("admonition",{title:"Do not waste resources",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"Please think of stopping applications you are not using to avoid consuming unnecessary resources.")),(0,r.yg)("p",null,"On the ",(0,r.yg)("strong",{parentName:"p"},"Topology")," page click on the down arrow \u2b07\ufe0f next to the number of pods deployed."),(0,r.yg)("img",{src:"/img/screenshot_scaledown_pod.png",alt:"Scale down pod",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"You can then restart the pod by clicking the up arrow \u2b06\ufe0f"),(0,r.yg)("p",null,"Note that starting more than 1 pod will not increase the amount of resources you have access to, most of the time it will only waste resources and might ends up in weird behavior on your side. The web UI will randomly assign you to 1 of the pod started when you access it. This only works for clusters with multiple workers, such as Apache Flink and Spark. Or if you connect directly to each pod with the terminal to run different processes."),(0,r.yg)("h2",{id:"start-your-application"},"Start your application"),(0,r.yg)("p",null,"When you try to access your workspace and you encounter the page below, usually this indicates that your pod is not running. For example, this will be the case if you stopped your pod, or if there was maintenance on the cluster."),(0,r.yg)("img",{src:"/img/screenshot_application_unavailable.png",alt:"Screenshot of page that says Application is not available",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"To start the pod, go to the ",(0,r.yg)("strong",{parentName:"p"},"Topology")," page, and click on the up arrow \u2b06\ufe0f next to the number of pods deployed. Make sure you scale it to 1. Scaling it to more than 1 will not increase the amount of resources you have access to, most of the time it will only waste resources and causes weird behavior on your side."),(0,r.yg)("img",{src:"/img/screenshot_scaledown_pod.png",alt:"Scale down pod",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("admonition",{title:"Do not waste resources",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"Please only scale up resources you're using, and scale down when you're not using them anymore. Consuming resources consumes unnecessary power and might prevent other users from using the DSRI.")),(0,r.yg)("h2",{id:"optional-define-a-docker-image"},"Optional: define a docker image"),(0,r.yg)("p",null,"Once you have tested your workspace and you know how to set it up it can be helpful to define a ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile")," to build and publish a Docker image with everything directly installed (instead of installing your requirements after starting a generic workspace)"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Start from an existing generic Docker image, depending on the base technologies you need, such as Debian, Ubuntu, Python, JupyterLab, VisualStudio Code, RStudio..."),(0,r.yg)("li",{parentName:"ol"},"Add your source code in the Docker image using ",(0,r.yg)("inlineCode",{parentName:"li"},"ADD . .")," or ",(0,r.yg)("inlineCode",{parentName:"li"},"COPY . .")),(0,r.yg)("li",{parentName:"ol"},"Install dependencies (e.g. ",(0,r.yg)("inlineCode",{parentName:"li"},"RUN apt-get install gfortran"),")"),(0,r.yg)("li",{parentName:"ol"},"Define which command to run when starting the container (e.g. ",(0,r.yg)("inlineCode",{parentName:"li"},'ENTRYPOINT["jupyter", "lab"]'),")")),(0,r.yg)("p",null,"Here is a simple example ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile")," for a python application:"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-dockerfile"},'# The base image to start from, choose the one with everything you need installed\nFROM python:3.8\n\n# Change the user and working directory to make sure we are using root\nUSER root\nWORKDIR /root\n\n# Install additional packages\nRUN apt-get update && \\\n apt-get install build-essentials\n\n# This line will copy all files and folder that are in the same folder as the Dockerfile (usually the code you want to run in the container)\nADD . . \n\n# This line will install all the python packages described in the requirements.txt of your source code\nRUN pip install -r requirements.txt && \\\n pip install notebook jupyterlab\n\n# Command to run when the container is started, here it starts JupyterLab as a service\nENTRYPOINT [ "jupyter", "lab" ]\n')),(0,r.yg)("p",null,"Here are some examples of ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile")," for various type of web applications:"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/jupyterlab/blob/main/Dockerfile"},"Custom JupyterLab")," based on the official ",(0,r.yg)("a",{parentName:"li",href:"https://github.com/jupyter/docker-stacks"},"jupyter/docker-stacks")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/rstudio/blob/main/Dockerfile"},"Custom RStudio")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/code-server/blob/main/Dockerfile"},"VisualStudio Code server")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/knowledge-collaboratory-api/blob/master/Dockerfile"},"Python web app"))),(0,r.yg)("p",null,"See the guide to ",(0,r.yg)("a",{parentName:"p",href:"/docs/guide-publish-image"},"Publish a Docker image")," for more details on this topic."))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/99b0f44e.48e96a60.js b/assets/js/99b0f44e.eb4f1c72.js similarity index 99% rename from assets/js/99b0f44e.48e96a60.js rename to assets/js/99b0f44e.eb4f1c72.js index 91d6412df..d695f2438 100644 --- a/assets/js/99b0f44e.48e96a60.js +++ b/assets/js/99b0f44e.eb4f1c72.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7668],{5680:(e,o,t)=>{t.d(o,{xA:()=>u,yg:()=>f});var n=t(6540);function r(e,o,t){return o in e?Object.defineProperty(e,o,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[o]=t,e}function a(e,o){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);o&&(n=n.filter((function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable}))),t.push.apply(t,n)}return t}function i(e){for(var o=1;o=0||(r[t]=e[t]);return r}(e,o);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=n.createContext({}),p=function(e){var o=n.useContext(s),t=o;return e&&(t="function"==typeof e?e(o):i(i({},o),e)),t},u=function(e){var o=p(e.components);return n.createElement(s.Provider,{value:o},e.children)},c={inlineCode:"code",wrapper:function(e){var o=e.children;return n.createElement(n.Fragment,{},o)}},d=n.forwardRef((function(e,o){var t=e.components,r=e.mdxType,a=e.originalType,s=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),d=p(t),f=r,g=d["".concat(s,".").concat(f)]||d[f]||c[f]||a;return t?n.createElement(g,i(i({ref:o},u),{},{components:t})):n.createElement(g,i({ref:o},u))}));function f(e,o){var t=arguments,r=o&&o.mdxType;if("string"==typeof e||r){var a=t.length,i=new Array(a);i[0]=d;var l={};for(var s in o)hasOwnProperty.call(o,s)&&(l[s]=o[s]);l.originalType=e,l.mdxType="string"==typeof e?e:r,i[1]=l;for(var p=2;p{t.r(o),t.d(o,{assets:()=>u,contentTitle:()=>s,default:()=>f,frontMatter:()=>l,metadata:()=>p,toc:()=>c});var n=t(9668),r=t(1367),a=(t(6540),t(5680)),i=["components"],l={id:"workflows-introduction",title:"Introduction to workflows"},s=void 0,p={unversionedId:"workflows-introduction",id:"workflows-introduction",title:"Introduction to workflows",description:"Running workflows on the DSRI is a work in progress. It usually requires some knowledge about how to orchestrate containers.",source:"@site/docs/workflows-introduction.md",sourceDirName:".",slug:"/workflows-introduction",permalink:"/docs/workflows-introduction",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/workflows-introduction.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"workflows-introduction",title:"Introduction to workflows"},sidebar:"docs",previous:{title:"Glossary",permalink:"/docs/glossary"},next:{title:"Deploy GitHub Runners",permalink:"/docs/workflows-github-actions"}},u={},c=[{value:"Introduction",id:"introduction",level:2},{value:"Current solutions on the DSRI",id:"current-solutions-on-the-dsri",level:2},{value:"GitHub Actions workflows",id:"github-actions-workflows",level:3},{value:"Apache Airflow",id:"apache-airflow",level:3},{value:"Argo",id:"argo",level:3},{value:"More options",id:"more-options",level:2},{value:"Kubeflow",id:"kubeflow",level:3},{value:"Apache Airflow",id:"apache-airflow-1",level:3},{value:"Volcano",id:"volcano",level:3},{value:"Nextflow",id:"nextflow",level:3},{value:"CWL",id:"cwl",level:3},{value:"KubeGene",id:"kubegene",level:3},{value:"Seldon",id:"seldon",level:3}],d={toc:c};function f(e){var o=e.components,t=(0,r.A)(e,i);return(0,a.yg)("wrapper",(0,n.A)({},d,t,{components:o,mdxType:"MDXLayout"}),(0,a.yg)("admonition",{title:"Work in progress",type:"warning"},(0,a.yg)("p",{parentName:"admonition"},"Running workflows on the DSRI is a work in progress. It usually requires some knowledge about how to orchestrate containers.")),(0,a.yg)("h2",{id:"introduction"},"Introduction"),(0,a.yg)("p",null,"Multiple technologies are available to run workflows on OpenShift/Kubernetes clusters. Each has its strengths and weaknesses in different areas."),(0,a.yg)("admonition",{title:"Use-case dependant",type:"caution"},(0,a.yg)("p",{parentName:"admonition"},"The technology to use needs to be ",(0,a.yg)("strong",{parentName:"p"},"chosen depending on your use-case"),".")),(0,a.yg)("h2",{id:"current-solutions-on-the-dsri"},"Current solutions on the DSRI"),(0,a.yg)("p",null,"Those solutions can easily be deployed on the DSRI. Let"),(0,a.yg)("h3",{id:"github-actions-workflows"},"GitHub Actions workflows"),(0,a.yg)("p",null,"GitHub Actions allows you to define automatically containerized workflows through a simple YAML file hosted in your GitHub repository."),(0,a.yg)("p",null,"See the ",(0,a.yg)("a",{parentName:"p",href:"/docs/workflows-github-actions"},"page about GitHub Actions runners")," for more details, and to deploy runners on the DSRI."),(0,a.yg)("h3",{id:"apache-airflow"},"Apache Airflow"),(0,a.yg)("p",null,"Airflow is a platform to programmatically author, schedule and monitor workflows, aka. DAGs (directed acyclic graphs)."),(0,a.yg)("p",null,"See the ",(0,a.yg)("a",{parentName:"p",href:"/docs/workflows-airflow"},"page about Airflow")," for more details, and to deploy Airflow on the DSRI."),(0,a.yg)("h3",{id:"argo"},"Argo"),(0,a.yg)("p",null,(0,a.yg)("a",{parentName:"p",href:"https://argoproj.github.io/argo/"},"Argo")," is a container native workflow engine for ",(0,a.yg)("a",{parentName:"p",href:"https://kubernetes.io/"},"Kubernetes")," supporting both ",(0,a.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/examples/readme.html#dag"},"DAG")," and ",(0,a.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/examples/readme.html#steps"},"step based")," workflows."),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"Workflows easy to define using Kubernetes-like YAML files."),(0,a.yg)("li",{parentName:"ul"},"Easy to define if your workflow is composed of Docker containers to run with arguments.")),(0,a.yg)("admonition",{title:"Contact us",type:"info"},(0,a.yg)("p",{parentName:"admonition"},(0,a.yg)("a",{parentName:"p",href:"/help"},"Contact us")," if you want to run Argo workflow on the DSRI")),(0,a.yg)("h2",{id:"more-options"},"More options"),(0,a.yg)("p",null,"Let us know if you are interested in deploying, and using, any of those workflows on the DSRI."),(0,a.yg)("h3",{id:"kubeflow"},"Kubeflow"),(0,a.yg)("p",null,"Optimized for Tensorflow workflows on Kubernetes."),(0,a.yg)("p",null,"Pipelines written in Python."),(0,a.yg)("h3",{id:"apache-airflow-1"},"Apache Airflow"),(0,a.yg)("p",null,"Define, schedule and run workflows. "),(0,a.yg)("p",null,"Can be deployed with OpenDataHub, see also ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/majordomusio/openshift-airflow"},"this deployment for OpenShift"),"."),(0,a.yg)("p",null,"See also: Airflow on ",(0,a.yg)("a",{parentName:"p",href:"https://kubernetes.io/blog/2018/06/28/airflow-on-kubernetes-part-1-a-different-kind-of-operator/"},"Kubernetes blog"),", and Kubernetes in ",(0,a.yg)("a",{parentName:"p",href:"https://airflow.apache.org/docs/stable/kubernetes.html"},"Airflow documentation"),"."),(0,a.yg)("h3",{id:"volcano"},"Volcano"),(0,a.yg)("p",null,"Run batch pipelines on Kubernetes with ",(0,a.yg)("a",{parentName:"p",href:"https://volcano.sh/"},"Volcano"),". "),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"More a scheduler than a workflow engine. ")),(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"Volcano can be used to run Spark, Kubeflow or KubeGene workflows."))),(0,a.yg)("h3",{id:"nextflow"},"Nextflow"),(0,a.yg)("p",null,(0,a.yg)("a",{parentName:"p",href:"https://www.nextflow.io/"},"Nextflow")," has been developed by the genomic research scientific community and is built to run bioinformatics pipeline."),(0,a.yg)("p",null,"Define your workflow in a Bash script fashion, providing input, output and the command to run. Without the need to create and use Docker container for Conda pipelines."),(0,a.yg)("h3",{id:"cwl"},"CWL"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"Developed by the genomic research scientific community."),(0,a.yg)("li",{parentName:"ul"},"Good support for provenance description (export as RDF)."),(0,a.yg)("li",{parentName:"ul"},"Support on OpenShift still in development",(0,a.yg)("ul",{parentName:"li"},(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("a",{parentName:"li",href:"https://airflow.apache.org/docs/stable/kubernetes.html"},"Apache Airflow")),(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("a",{parentName:"li",href:"https://github.com/Duke-GCB/calrissian/"},"workflows-cwl")))),(0,a.yg)("li",{parentName:"ul"},"Propose a GUI to build the workflows: ",(0,a.yg)("a",{parentName:"li",href:"https://rabix.io/"},"Rabix Composer"))),(0,a.yg)("h3",{id:"kubegene"},"KubeGene"),(0,a.yg)("p",null,(0,a.yg)("a",{parentName:"p",href:"https://kubegene.io/"},"KubeGene")," is a turn-key genome sequencing workflow management framework."),(0,a.yg)("p",null,"See the ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubegene/kubegene/blob/master/example/simple-sample/simple-sample.yaml"},"Workflow example"),", and how to ",(0,a.yg)("a",{parentName:"p",href:"https://kubegene.io/docs/guides/tool/"},"define a tool"),"."),(0,a.yg)("h3",{id:"seldon"},"Seldon"),(0,a.yg)("p",null,(0,a.yg)("a",{parentName:"p",href:"https://www.seldon.io/tech/"},"Open-source platform")," for rapidly deploying machine learning models on Kubernetes. Manage, serve and scale models built in any framework on Kubernetes."),(0,a.yg)("admonition",{title:"Contact us",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"Feel free to ",(0,a.yg)("a",{parentName:"p",href:"/help"},"contact us")," if you have any questions about running workflows on DSRI or to request the support of a new technology.")))}f.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7668],{5680:(e,o,t)=>{t.d(o,{xA:()=>u,yg:()=>f});var n=t(6540);function r(e,o,t){return o in e?Object.defineProperty(e,o,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[o]=t,e}function a(e,o){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);o&&(n=n.filter((function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable}))),t.push.apply(t,n)}return t}function i(e){for(var o=1;o=0||(r[t]=e[t]);return r}(e,o);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=n.createContext({}),p=function(e){var o=n.useContext(s),t=o;return e&&(t="function"==typeof e?e(o):i(i({},o),e)),t},u=function(e){var o=p(e.components);return n.createElement(s.Provider,{value:o},e.children)},c={inlineCode:"code",wrapper:function(e){var o=e.children;return n.createElement(n.Fragment,{},o)}},d=n.forwardRef((function(e,o){var t=e.components,r=e.mdxType,a=e.originalType,s=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),d=p(t),f=r,g=d["".concat(s,".").concat(f)]||d[f]||c[f]||a;return t?n.createElement(g,i(i({ref:o},u),{},{components:t})):n.createElement(g,i({ref:o},u))}));function f(e,o){var t=arguments,r=o&&o.mdxType;if("string"==typeof e||r){var a=t.length,i=new Array(a);i[0]=d;var l={};for(var s in o)hasOwnProperty.call(o,s)&&(l[s]=o[s]);l.originalType=e,l.mdxType="string"==typeof e?e:r,i[1]=l;for(var p=2;p{t.r(o),t.d(o,{assets:()=>u,contentTitle:()=>s,default:()=>f,frontMatter:()=>l,metadata:()=>p,toc:()=>c});var n=t(9668),r=t(1367),a=(t(6540),t(5680)),i=["components"],l={id:"workflows-introduction",title:"Introduction to workflows"},s=void 0,p={unversionedId:"workflows-introduction",id:"workflows-introduction",title:"Introduction to workflows",description:"Running workflows on the DSRI is a work in progress. It usually requires some knowledge about how to orchestrate containers.",source:"@site/docs/workflows-introduction.md",sourceDirName:".",slug:"/workflows-introduction",permalink:"/docs/workflows-introduction",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/workflows-introduction.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"workflows-introduction",title:"Introduction to workflows"},sidebar:"docs",previous:{title:"Glossary",permalink:"/docs/glossary"},next:{title:"Deploy GitHub Runners",permalink:"/docs/workflows-github-actions"}},u={},c=[{value:"Introduction",id:"introduction",level:2},{value:"Current solutions on the DSRI",id:"current-solutions-on-the-dsri",level:2},{value:"GitHub Actions workflows",id:"github-actions-workflows",level:3},{value:"Apache Airflow",id:"apache-airflow",level:3},{value:"Argo",id:"argo",level:3},{value:"More options",id:"more-options",level:2},{value:"Kubeflow",id:"kubeflow",level:3},{value:"Apache Airflow",id:"apache-airflow-1",level:3},{value:"Volcano",id:"volcano",level:3},{value:"Nextflow",id:"nextflow",level:3},{value:"CWL",id:"cwl",level:3},{value:"KubeGene",id:"kubegene",level:3},{value:"Seldon",id:"seldon",level:3}],d={toc:c};function f(e){var o=e.components,t=(0,r.A)(e,i);return(0,a.yg)("wrapper",(0,n.A)({},d,t,{components:o,mdxType:"MDXLayout"}),(0,a.yg)("admonition",{title:"Work in progress",type:"warning"},(0,a.yg)("p",{parentName:"admonition"},"Running workflows on the DSRI is a work in progress. It usually requires some knowledge about how to orchestrate containers.")),(0,a.yg)("h2",{id:"introduction"},"Introduction"),(0,a.yg)("p",null,"Multiple technologies are available to run workflows on OpenShift/Kubernetes clusters. Each has its strengths and weaknesses in different areas."),(0,a.yg)("admonition",{title:"Use-case dependant",type:"caution"},(0,a.yg)("p",{parentName:"admonition"},"The technology to use needs to be ",(0,a.yg)("strong",{parentName:"p"},"chosen depending on your use-case"),".")),(0,a.yg)("h2",{id:"current-solutions-on-the-dsri"},"Current solutions on the DSRI"),(0,a.yg)("p",null,"Those solutions can easily be deployed on the DSRI. Let"),(0,a.yg)("h3",{id:"github-actions-workflows"},"GitHub Actions workflows"),(0,a.yg)("p",null,"GitHub Actions allows you to define automatically containerized workflows through a simple YAML file hosted in your GitHub repository."),(0,a.yg)("p",null,"See the ",(0,a.yg)("a",{parentName:"p",href:"/docs/workflows-github-actions"},"page about GitHub Actions runners")," for more details, and to deploy runners on the DSRI."),(0,a.yg)("h3",{id:"apache-airflow"},"Apache Airflow"),(0,a.yg)("p",null,"Airflow is a platform to programmatically author, schedule and monitor workflows, aka. DAGs (directed acyclic graphs)."),(0,a.yg)("p",null,"See the ",(0,a.yg)("a",{parentName:"p",href:"/docs/workflows-airflow"},"page about Airflow")," for more details, and to deploy Airflow on the DSRI."),(0,a.yg)("h3",{id:"argo"},"Argo"),(0,a.yg)("p",null,(0,a.yg)("a",{parentName:"p",href:"https://argoproj.github.io/argo/"},"Argo")," is a container native workflow engine for ",(0,a.yg)("a",{parentName:"p",href:"https://kubernetes.io/"},"Kubernetes")," supporting both ",(0,a.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/examples/readme.html#dag"},"DAG")," and ",(0,a.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/examples/readme.html#steps"},"step based")," workflows."),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"Workflows easy to define using Kubernetes-like YAML files."),(0,a.yg)("li",{parentName:"ul"},"Easy to define if your workflow is composed of Docker containers to run with arguments.")),(0,a.yg)("admonition",{title:"Contact us",type:"info"},(0,a.yg)("p",{parentName:"admonition"},(0,a.yg)("a",{parentName:"p",href:"/help"},"Contact us")," if you want to run Argo workflow on the DSRI")),(0,a.yg)("h2",{id:"more-options"},"More options"),(0,a.yg)("p",null,"Let us know if you are interested in deploying, and using, any of those workflows on the DSRI."),(0,a.yg)("h3",{id:"kubeflow"},"Kubeflow"),(0,a.yg)("p",null,"Optimized for Tensorflow workflows on Kubernetes."),(0,a.yg)("p",null,"Pipelines written in Python."),(0,a.yg)("h3",{id:"apache-airflow-1"},"Apache Airflow"),(0,a.yg)("p",null,"Define, schedule and run workflows. "),(0,a.yg)("p",null,"Can be deployed with OpenDataHub, see also ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/majordomusio/openshift-airflow"},"this deployment for OpenShift"),"."),(0,a.yg)("p",null,"See also: Airflow on ",(0,a.yg)("a",{parentName:"p",href:"https://kubernetes.io/blog/2018/06/28/airflow-on-kubernetes-part-1-a-different-kind-of-operator/"},"Kubernetes blog"),", and Kubernetes in ",(0,a.yg)("a",{parentName:"p",href:"https://airflow.apache.org/docs/stable/kubernetes.html"},"Airflow documentation"),"."),(0,a.yg)("h3",{id:"volcano"},"Volcano"),(0,a.yg)("p",null,"Run batch pipelines on Kubernetes with ",(0,a.yg)("a",{parentName:"p",href:"https://volcano.sh/"},"Volcano"),". "),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"More a scheduler than a workflow engine. ")),(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"Volcano can be used to run Spark, Kubeflow or KubeGene workflows."))),(0,a.yg)("h3",{id:"nextflow"},"Nextflow"),(0,a.yg)("p",null,(0,a.yg)("a",{parentName:"p",href:"https://www.nextflow.io/"},"Nextflow")," has been developed by the genomic research scientific community and is built to run bioinformatics pipeline."),(0,a.yg)("p",null,"Define your workflow in a Bash script fashion, providing input, output and the command to run. Without the need to create and use Docker container for Conda pipelines."),(0,a.yg)("h3",{id:"cwl"},"CWL"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"Developed by the genomic research scientific community."),(0,a.yg)("li",{parentName:"ul"},"Good support for provenance description (export as RDF)."),(0,a.yg)("li",{parentName:"ul"},"Support on OpenShift still in development",(0,a.yg)("ul",{parentName:"li"},(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("a",{parentName:"li",href:"https://airflow.apache.org/docs/stable/kubernetes.html"},"Apache Airflow")),(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("a",{parentName:"li",href:"https://github.com/Duke-GCB/calrissian/"},"workflows-cwl")))),(0,a.yg)("li",{parentName:"ul"},"Propose a GUI to build the workflows: ",(0,a.yg)("a",{parentName:"li",href:"https://rabix.io/"},"Rabix Composer"))),(0,a.yg)("h3",{id:"kubegene"},"KubeGene"),(0,a.yg)("p",null,(0,a.yg)("a",{parentName:"p",href:"https://kubegene.io/"},"KubeGene")," is a turn-key genome sequencing workflow management framework."),(0,a.yg)("p",null,"See the ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubegene/kubegene/blob/master/example/simple-sample/simple-sample.yaml"},"Workflow example"),", and how to ",(0,a.yg)("a",{parentName:"p",href:"https://kubegene.io/docs/guides/tool/"},"define a tool"),"."),(0,a.yg)("h3",{id:"seldon"},"Seldon"),(0,a.yg)("p",null,(0,a.yg)("a",{parentName:"p",href:"https://www.seldon.io/tech/"},"Open-source platform")," for rapidly deploying machine learning models on Kubernetes. Manage, serve and scale models built in any framework on Kubernetes."),(0,a.yg)("admonition",{title:"Contact us",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"Feel free to ",(0,a.yg)("a",{parentName:"p",href:"/help"},"contact us")," if you have any questions about running workflows on DSRI or to request the support of a new technology.")))}f.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/9bfda053.12c4ad81.js b/assets/js/9bfda053.43b3657e.js similarity index 99% rename from assets/js/9bfda053.12c4ad81.js rename to assets/js/9bfda053.43b3657e.js index e1e2e1a58..dd4053241 100644 --- a/assets/js/9bfda053.12c4ad81.js +++ b/assets/js/9bfda053.43b3657e.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[8151],{5680:(e,t,a)=>{a.d(t,{xA:()=>m,yg:()=>u});var r=a(6540);function n(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,r)}return a}function o(e){for(var t=1;t=0||(n[a]=e[a]);return n}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(n[a]=e[a])}return n}var s=r.createContext({}),g=function(e){var t=r.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):o(o({},t),e)),a},m=function(e){var t=g(e.components);return r.createElement(s.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var a=e.components,n=e.mdxType,i=e.originalType,s=e.parentName,m=l(e,["components","mdxType","originalType","parentName"]),d=g(a),u=n,c=d["".concat(s,".").concat(u)]||d[u]||p[u]||i;return a?r.createElement(c,o(o({ref:t},m),{},{components:a})):r.createElement(c,o({ref:t},m))}));function u(e,t){var a=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var i=a.length,o=new Array(i);o[0]=d;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:n,o[1]=l;for(var g=2;g{a.r(t),a.d(t,{assets:()=>m,contentTitle:()=>s,default:()=>u,frontMatter:()=>l,metadata:()=>g,toc:()=>p});var r=a(9668),n=a(1367),i=(a(6540),a(5680)),o=["components"],l={id:"dask-cluster",title:"Deploy Dask Cluster"},s=void 0,g={unversionedId:"dask-cluster",id:"dask-cluster",title:"Deploy Dask Cluster",description:"\ud83e\uddca Installation with Helm",source:"@site/docs/dask-cluster.md",sourceDirName:".",slug:"/dask-cluster",permalink:"/docs/dask-cluster",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/dask-cluster.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"dask-cluster",title:"Deploy Dask Cluster"},sidebar:"docs",previous:{title:"JupyterHub",permalink:"/docs/deploy-jupyterhub"},next:{title:"Spark cluster",permalink:"/docs/deploy-spark"}},m={},p=[{value:"\ud83e\uddca Installation with Helm",id:"-installation-with-helm",level:2},{value:"\ud83e\ude90 Configure a Route for the Cluster",id:"-configure-a-route-for-the-cluster",level:3},{value:"\ud83e\ude90 Access the Jupyter Password/Token",id:"-access-the-jupyter-passwordtoken",level:3}],d={toc:p};function u(e){var t=e.components,a=(0,n.A)(e,o);return(0,i.yg)("wrapper",(0,r.A)({},d,a,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("h2",{id:"-installation-with-helm"},"\ud83e\uddca Installation with Helm"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},"Go to the ",(0,i.yg)("strong",{parentName:"li"},"+Add")," page, and select to add ",(0,i.yg)("strong",{parentName:"li"},"Helm Chart"))),(0,i.yg)("img",{src:"/img/dask-init1.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("ol",{start:2},(0,i.yg)("li",{parentName:"ol"},"Search and Select the ",(0,i.yg)("strong",{parentName:"li"},"Dask chart")," then click on ",(0,i.yg)("strong",{parentName:"li"},"Create"))),(0,i.yg)("img",{src:"/img/dask-init2.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("img",{src:"/img/dask-init3.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("ol",{start:3},(0,i.yg)("li",{parentName:"ol"},"Configure the Yaml file, while under the ",(0,i.yg)("inlineCode",{parentName:"li"},"Jupyter")," section:",(0,i.yg)("ul",{parentName:"li"},(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},'Command: ["jupyter", "lab", "--allow-root", "--ip=0.0.0.0", "--port=8888", "--no-browser"]')),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"servicePort: 8888"))))),(0,i.yg)("img",{src:"/img/dask-init4.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("img",{src:"/img/dask-init5.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("ol",{start:4},(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("strong",{parentName:"li"},"Add Storage")," to the ",(0,i.yg)("strong",{parentName:"li"},"dask-jupyter")," pod as shown below")),(0,i.yg)("img",{src:"/img/dask-init6.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("ol",{start:5},(0,i.yg)("li",{parentName:"ol"},"Set up a new ",(0,i.yg)("strong",{parentName:"li"},"Persistent Volume Claim")," for the cluster as shown below")),(0,i.yg)("img",{src:"/img/dask-init7.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("h3",{id:"-configure-a-route-for-the-cluster"},"\ud83e\ude90 Configure a Route for the Cluster"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},"Switch to the ",(0,i.yg)("strong",{parentName:"li"},"Administrator")," view and navigate to ",(0,i.yg)("strong",{parentName:"li"},"Route"))),(0,i.yg)("img",{src:"/img/dask-route1.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("img",{src:"/img/dask-route2.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("ol",{start:2},(0,i.yg)("li",{parentName:"ol"},"Create a new route by clicking the button ",(0,i.yg)("strong",{parentName:"li"},"Create Route")," with the setup as shown below")),(0,i.yg)("img",{src:"/img/dask-route3.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("img",{src:"/img/dask-route4.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("img",{src:"/img/dask-route5.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("ol",{start:3},(0,i.yg)("li",{parentName:"ol"},"Navigate the provided link to access your local cluster")),(0,i.yg)("img",{src:"/img/dask-route6.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("h3",{id:"-access-the-jupyter-passwordtoken"},"\ud83e\ude90 Access the Jupyter Password/Token"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},"Start up the ",(0,i.yg)("strong",{parentName:"li"},"terminal"),(0,i.yg)("ul",{parentName:"li"},(0,i.yg)("li",{parentName:"ul"},"Run ",(0,i.yg)("inlineCode",{parentName:"li"},"oc get pods")," to find the full podname of the ",(0,i.yg)("strong",{parentName:"li"},"dask-jupyter")),(0,i.yg)("li",{parentName:"ul"},"Run ",(0,i.yg)("inlineCode",{parentName:"li"},"oc logs ")," and copy the token used to access the jupyter notebook")))),(0,i.yg)("img",{src:"/img/dask-route7.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}))}u.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[8151],{5680:(e,t,a)=>{a.d(t,{xA:()=>m,yg:()=>u});var r=a(6540);function n(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,r)}return a}function o(e){for(var t=1;t=0||(n[a]=e[a]);return n}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(n[a]=e[a])}return n}var s=r.createContext({}),g=function(e){var t=r.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):o(o({},t),e)),a},m=function(e){var t=g(e.components);return r.createElement(s.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var a=e.components,n=e.mdxType,i=e.originalType,s=e.parentName,m=l(e,["components","mdxType","originalType","parentName"]),d=g(a),u=n,c=d["".concat(s,".").concat(u)]||d[u]||p[u]||i;return a?r.createElement(c,o(o({ref:t},m),{},{components:a})):r.createElement(c,o({ref:t},m))}));function u(e,t){var a=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var i=a.length,o=new Array(i);o[0]=d;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:n,o[1]=l;for(var g=2;g{a.r(t),a.d(t,{assets:()=>m,contentTitle:()=>s,default:()=>u,frontMatter:()=>l,metadata:()=>g,toc:()=>p});var r=a(9668),n=a(1367),i=(a(6540),a(5680)),o=["components"],l={id:"dask-cluster",title:"Deploy Dask Cluster"},s=void 0,g={unversionedId:"dask-cluster",id:"dask-cluster",title:"Deploy Dask Cluster",description:"\ud83e\uddca Installation with Helm",source:"@site/docs/dask-cluster.md",sourceDirName:".",slug:"/dask-cluster",permalink:"/docs/dask-cluster",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/dask-cluster.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"dask-cluster",title:"Deploy Dask Cluster"},sidebar:"docs",previous:{title:"JupyterHub",permalink:"/docs/deploy-jupyterhub"},next:{title:"Spark cluster",permalink:"/docs/deploy-spark"}},m={},p=[{value:"\ud83e\uddca Installation with Helm",id:"-installation-with-helm",level:2},{value:"\ud83e\ude90 Configure a Route for the Cluster",id:"-configure-a-route-for-the-cluster",level:3},{value:"\ud83e\ude90 Access the Jupyter Password/Token",id:"-access-the-jupyter-passwordtoken",level:3}],d={toc:p};function u(e){var t=e.components,a=(0,n.A)(e,o);return(0,i.yg)("wrapper",(0,r.A)({},d,a,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("h2",{id:"-installation-with-helm"},"\ud83e\uddca Installation with Helm"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},"Go to the ",(0,i.yg)("strong",{parentName:"li"},"+Add")," page, and select to add ",(0,i.yg)("strong",{parentName:"li"},"Helm Chart"))),(0,i.yg)("img",{src:"/img/dask-init1.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("ol",{start:2},(0,i.yg)("li",{parentName:"ol"},"Search and Select the ",(0,i.yg)("strong",{parentName:"li"},"Dask chart")," then click on ",(0,i.yg)("strong",{parentName:"li"},"Create"))),(0,i.yg)("img",{src:"/img/dask-init2.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("img",{src:"/img/dask-init3.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("ol",{start:3},(0,i.yg)("li",{parentName:"ol"},"Configure the Yaml file, while under the ",(0,i.yg)("inlineCode",{parentName:"li"},"Jupyter")," section:",(0,i.yg)("ul",{parentName:"li"},(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},'Command: ["jupyter", "lab", "--allow-root", "--ip=0.0.0.0", "--port=8888", "--no-browser"]')),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("inlineCode",{parentName:"li"},"servicePort: 8888"))))),(0,i.yg)("img",{src:"/img/dask-init4.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("img",{src:"/img/dask-init5.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("ol",{start:4},(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("strong",{parentName:"li"},"Add Storage")," to the ",(0,i.yg)("strong",{parentName:"li"},"dask-jupyter")," pod as shown below")),(0,i.yg)("img",{src:"/img/dask-init6.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("ol",{start:5},(0,i.yg)("li",{parentName:"ol"},"Set up a new ",(0,i.yg)("strong",{parentName:"li"},"Persistent Volume Claim")," for the cluster as shown below")),(0,i.yg)("img",{src:"/img/dask-init7.png",alt:"dask init",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("h3",{id:"-configure-a-route-for-the-cluster"},"\ud83e\ude90 Configure a Route for the Cluster"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},"Switch to the ",(0,i.yg)("strong",{parentName:"li"},"Administrator")," view and navigate to ",(0,i.yg)("strong",{parentName:"li"},"Route"))),(0,i.yg)("img",{src:"/img/dask-route1.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("img",{src:"/img/dask-route2.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("ol",{start:2},(0,i.yg)("li",{parentName:"ol"},"Create a new route by clicking the button ",(0,i.yg)("strong",{parentName:"li"},"Create Route")," with the setup as shown below")),(0,i.yg)("img",{src:"/img/dask-route3.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("img",{src:"/img/dask-route4.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("img",{src:"/img/dask-route5.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("ol",{start:3},(0,i.yg)("li",{parentName:"ol"},"Navigate the provided link to access your local cluster")),(0,i.yg)("img",{src:"/img/dask-route6.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,i.yg)("h3",{id:"-access-the-jupyter-passwordtoken"},"\ud83e\ude90 Access the Jupyter Password/Token"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},"Start up the ",(0,i.yg)("strong",{parentName:"li"},"terminal"),(0,i.yg)("ul",{parentName:"li"},(0,i.yg)("li",{parentName:"ul"},"Run ",(0,i.yg)("inlineCode",{parentName:"li"},"oc get pods")," to find the full podname of the ",(0,i.yg)("strong",{parentName:"li"},"dask-jupyter")),(0,i.yg)("li",{parentName:"ul"},"Run ",(0,i.yg)("inlineCode",{parentName:"li"},"oc logs ")," and copy the token used to access the jupyter notebook")))),(0,i.yg)("img",{src:"/img/dask-route7.png",alt:"dask route",style:{maxWidth:"100%",maxHeight:"100%"}}))}u.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/9e298cf7.254832af.js b/assets/js/9e298cf7.379a0bd6.js similarity index 98% rename from assets/js/9e298cf7.254832af.js rename to assets/js/9e298cf7.379a0bd6.js index ec4f61dd7..8bd3b4706 100644 --- a/assets/js/9e298cf7.254832af.js +++ b/assets/js/9e298cf7.379a0bd6.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7512],{5680:(e,t,r)=>{r.d(t,{xA:()=>c,yg:()=>u});var n=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var s=n.createContext({}),p=function(e){var t=n.useContext(s),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=p(e.components);return n.createElement(s.Provider,{value:t},e.children)},g={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,s=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),m=p(r),u=a,f=m["".concat(s,".").concat(u)]||m[u]||g[u]||o;return r?n.createElement(f,i(i({ref:t},c),{},{components:r})):n.createElement(f,i({ref:t},c))}));function u(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=m;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:a,i[1]=l;for(var p=2;p{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>u,frontMatter:()=>l,metadata:()=>p,toc:()=>g});var n=r(9668),a=r(1367),o=(r(6540),r(5680)),i=["components"],l={id:"catalog-imaging",title:"Imaging softwares"},s=void 0,p={unversionedId:"catalog-imaging",id:"catalog-imaging",title:"Imaging softwares",description:"Feel free to propose new services using pull requests, or to request them by creating new issues.",source:"@site/docs/catalog-imaging.md",sourceDirName:".",slug:"/catalog-imaging",permalink:"/docs/catalog-imaging",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/catalog-imaging.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"catalog-imaging",title:"Imaging softwares"},sidebar:"docs",previous:{title:"Genomics",permalink:"/docs/catalog-genomics"},next:{title:"Utilities",permalink:"/docs/catalog-utilities"}},c={},g=[{value:"CellProfiler",id:"cellprofiler",level:2}],m={toc:g};function u(e){var t=e.components,r=(0,a.A)(e,i);return(0,o.yg)("wrapper",(0,n.A)({},m,r,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"Feel free to propose new services using ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/pulls"},"pull requests"),", or to request them by creating ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues"},"new issues"),"."),(0,o.yg)("h2",{id:"cellprofiler"},"CellProfiler"),(0,o.yg)("p",null,"Cell image analysis software. See ",(0,o.yg)("a",{parentName:"p",href:"https://cellprofiler.org/home"},"their website"),"."),(0,o.yg)("p",null,"You can start a container using the ",(0,o.yg)("strong",{parentName:"p"},"CellProfiler")," template in the ",(0,o.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,o.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,o.yg)("p",null,"This template uses the ",(0,o.yg)("a",{parentName:"p",href:"https://hub.docker.com/r/cellprofiler/cellprofiler"},"official CellProfiler image")," hosted on DockerHub"),(0,o.yg)("admonition",{title:"Persistent data folder",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"\ud83d\udcc2 Use the ",(0,o.yg)("inlineCode",{parentName:"p"},"/usr/local/src/work")," folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,o.yg)("strong",{parentName:"p"},"Storage")," > ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),".")),(0,o.yg)("p",null,"Once the CellProfiler has been started you can access it through the pod terminal (in the DSRI web UI, or using ",(0,o.yg)("inlineCode",{parentName:"p"},"oc rsh POD_ID"),")"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"cellprofiler --help\ncellprofiler --run\ncellprofiler --run-headless\n")),(0,o.yg)("admonition",{title:"Getting Started",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"\ud83d\udc2c For more information using cell profiler from the command line see ",(0,o.yg)("a",{parentName:"p",href:"https://carpenter-singh-lab.broadinstitute.org/blog/getting-started-using-cellprofiler-command-line"},"this post"))))}u.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7512],{5680:(e,t,r)=>{r.d(t,{xA:()=>c,yg:()=>u});var n=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var s=n.createContext({}),p=function(e){var t=n.useContext(s),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=p(e.components);return n.createElement(s.Provider,{value:t},e.children)},g={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,s=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),m=p(r),u=a,f=m["".concat(s,".").concat(u)]||m[u]||g[u]||o;return r?n.createElement(f,i(i({ref:t},c),{},{components:r})):n.createElement(f,i({ref:t},c))}));function u(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=m;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:a,i[1]=l;for(var p=2;p{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>u,frontMatter:()=>l,metadata:()=>p,toc:()=>g});var n=r(9668),a=r(1367),o=(r(6540),r(5680)),i=["components"],l={id:"catalog-imaging",title:"Imaging softwares"},s=void 0,p={unversionedId:"catalog-imaging",id:"catalog-imaging",title:"Imaging softwares",description:"Feel free to propose new services using pull requests, or to request them by creating new issues.",source:"@site/docs/catalog-imaging.md",sourceDirName:".",slug:"/catalog-imaging",permalink:"/docs/catalog-imaging",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/catalog-imaging.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"catalog-imaging",title:"Imaging softwares"},sidebar:"docs",previous:{title:"Genomics",permalink:"/docs/catalog-genomics"},next:{title:"Utilities",permalink:"/docs/catalog-utilities"}},c={},g=[{value:"CellProfiler",id:"cellprofiler",level:2}],m={toc:g};function u(e){var t=e.components,r=(0,a.A)(e,i);return(0,o.yg)("wrapper",(0,n.A)({},m,r,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"Feel free to propose new services using ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/pulls"},"pull requests"),", or to request them by creating ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues"},"new issues"),"."),(0,o.yg)("h2",{id:"cellprofiler"},"CellProfiler"),(0,o.yg)("p",null,"Cell image analysis software. See ",(0,o.yg)("a",{parentName:"p",href:"https://cellprofiler.org/home"},"their website"),"."),(0,o.yg)("p",null,"You can start a container using the ",(0,o.yg)("strong",{parentName:"p"},"CellProfiler")," template in the ",(0,o.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,o.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,o.yg)("p",null,"This template uses the ",(0,o.yg)("a",{parentName:"p",href:"https://hub.docker.com/r/cellprofiler/cellprofiler"},"official CellProfiler image")," hosted on DockerHub"),(0,o.yg)("admonition",{title:"Persistent data folder",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"\ud83d\udcc2 Use the ",(0,o.yg)("inlineCode",{parentName:"p"},"/usr/local/src/work")," folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,o.yg)("strong",{parentName:"p"},"Storage")," > ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),".")),(0,o.yg)("p",null,"Once the CellProfiler has been started you can access it through the pod terminal (in the DSRI web UI, or using ",(0,o.yg)("inlineCode",{parentName:"p"},"oc rsh POD_ID"),")"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"cellprofiler --help\ncellprofiler --run\ncellprofiler --run-headless\n")),(0,o.yg)("admonition",{title:"Getting Started",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"\ud83d\udc2c For more information using cell profiler from the command line see ",(0,o.yg)("a",{parentName:"p",href:"https://carpenter-singh-lab.broadinstitute.org/blog/getting-started-using-cellprofiler-command-line"},"this post"))))}u.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/9f389101.a1aff6ed.js b/assets/js/9f389101.1c470da1.js similarity index 98% rename from assets/js/9f389101.a1aff6ed.js rename to assets/js/9f389101.1c470da1.js index c3729178b..06d1bdbdc 100644 --- a/assets/js/9f389101.a1aff6ed.js +++ b/assets/js/9f389101.1c470da1.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5020],{5680:(e,t,n)=>{n.d(t,{xA:()=>p,yg:()=>u});var o=n(6540);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function l(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,o)}return n}function a(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var s=o.createContext({}),c=function(e){var t=o.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):a(a({},t),e)),n},p=function(e){var t=c(e.components);return o.createElement(s.Provider,{value:t},e.children)},f={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},w=o.forwardRef((function(e,t){var n=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,p=i(e,["components","mdxType","originalType","parentName"]),w=c(n),u=r,d=w["".concat(s,".").concat(u)]||w[u]||f[u]||l;return n?o.createElement(d,a(a({ref:t},p),{},{components:n})):o.createElement(d,a({ref:t},p))}));function u(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var l=n.length,a=new Array(l);a[0]=w;var i={};for(var s in t)hasOwnProperty.call(t,s)&&(i[s]=t[s]);i.originalType=e,i.mdxType="string"==typeof e?e:r,a[1]=i;for(var c=2;c{n.r(t),n.d(t,{assets:()=>p,contentTitle:()=>s,default:()=>u,frontMatter:()=>i,metadata:()=>c,toc:()=>f});var o=n(9668),r=n(1367),l=(n(6540),n(5680)),a=["components"],i={id:"workflows-nextflow",title:"Run Nextflow workflows"},s=void 0,c={unversionedId:"workflows-nextflow",id:"workflows-nextflow",title:"Run Nextflow workflows",description:"Nextflow enables scalable and reproducible scientific workflows using software containers. It allows the adaptation of pipelines written in the most common scripting languages.",source:"@site/docs/workflows-nextflow.md",sourceDirName:".",slug:"/workflows-nextflow",permalink:"/docs/workflows-nextflow",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/workflows-nextflow.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"workflows-nextflow",title:"Run Nextflow workflows"},sidebar:"docs",previous:{title:"Run Argo workflows",permalink:"/docs/workflows-argo"},next:{title:"Run CWL workflows",permalink:"/docs/workflows-cwl"}},p={},f=[{value:"Install Nextflow",id:"install-nextflow",level:2},{value:"Run workflow",id:"run-workflow",level:2}],w={toc:f};function u(e){var t=e.components,n=(0,r.A)(e,a);return(0,l.yg)("wrapper",(0,o.A)({},w,n,{components:t,mdxType:"MDXLayout"}),(0,l.yg)("p",null,(0,l.yg)("a",{parentName:"p",href:"https://www.nextflow.io/"},"Nextflow")," enables scalable and reproducible scientific workflows using software containers. It allows the adaptation of pipelines written in the most common scripting languages."),(0,l.yg)("p",null,"Nextflow has been developed by the genomic research scientific community and is built to run bioinformatics pipeline."),(0,l.yg)("p",null,"Define your workflow in a Bash script fashion, providing input, output and the command to run. Without the need to create and use Docker container for Conda pipelines"),(0,l.yg)("h2",{id:"install-nextflow"},"Install Nextflow"),(0,l.yg)("p",null,"Install the ",(0,l.yg)("inlineCode",{parentName:"p"},"nextflow")," client on your computer:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"wget -qO- https://get.nextflow.io | bash\n")),(0,l.yg)("admonition",{title:"Official documentation",type:"info"},(0,l.yg)("p",{parentName:"admonition"},"See the ",(0,l.yg)("a",{parentName:"p",href:"https://www.nextflow.io/docs/latest/getstarted.html#installation"},"Nextflow documentation"),".")),(0,l.yg)("h2",{id:"run-workflow"},"Run workflow"),(0,l.yg)("p",null,"Try the hello world workflow from Nextflow using an existing storage:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"nextflow kuberun https://github.com/nextflow-io/hello -v pvc-mapr-projects-showcase:/data\n")),(0,l.yg)("admonition",{title:"Use Conda environments",type:"tip"},(0,l.yg)("p",{parentName:"admonition"},"You can easily ",(0,l.yg)("a",{parentName:"p",href:"https://www.nextflow.io/docs/latest/conda.html"},"define Conda environments and workflows")," with Nextflow.")))}u.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5020],{5680:(e,t,n)=>{n.d(t,{xA:()=>p,yg:()=>u});var o=n(6540);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function l(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,o)}return n}function a(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var s=o.createContext({}),c=function(e){var t=o.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):a(a({},t),e)),n},p=function(e){var t=c(e.components);return o.createElement(s.Provider,{value:t},e.children)},f={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},w=o.forwardRef((function(e,t){var n=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,p=i(e,["components","mdxType","originalType","parentName"]),w=c(n),u=r,d=w["".concat(s,".").concat(u)]||w[u]||f[u]||l;return n?o.createElement(d,a(a({ref:t},p),{},{components:n})):o.createElement(d,a({ref:t},p))}));function u(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var l=n.length,a=new Array(l);a[0]=w;var i={};for(var s in t)hasOwnProperty.call(t,s)&&(i[s]=t[s]);i.originalType=e,i.mdxType="string"==typeof e?e:r,a[1]=i;for(var c=2;c{n.r(t),n.d(t,{assets:()=>p,contentTitle:()=>s,default:()=>u,frontMatter:()=>i,metadata:()=>c,toc:()=>f});var o=n(9668),r=n(1367),l=(n(6540),n(5680)),a=["components"],i={id:"workflows-nextflow",title:"Run Nextflow workflows"},s=void 0,c={unversionedId:"workflows-nextflow",id:"workflows-nextflow",title:"Run Nextflow workflows",description:"Nextflow enables scalable and reproducible scientific workflows using software containers. It allows the adaptation of pipelines written in the most common scripting languages.",source:"@site/docs/workflows-nextflow.md",sourceDirName:".",slug:"/workflows-nextflow",permalink:"/docs/workflows-nextflow",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/workflows-nextflow.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"workflows-nextflow",title:"Run Nextflow workflows"},sidebar:"docs",previous:{title:"Run Argo workflows",permalink:"/docs/workflows-argo"},next:{title:"Run CWL workflows",permalink:"/docs/workflows-cwl"}},p={},f=[{value:"Install Nextflow",id:"install-nextflow",level:2},{value:"Run workflow",id:"run-workflow",level:2}],w={toc:f};function u(e){var t=e.components,n=(0,r.A)(e,a);return(0,l.yg)("wrapper",(0,o.A)({},w,n,{components:t,mdxType:"MDXLayout"}),(0,l.yg)("p",null,(0,l.yg)("a",{parentName:"p",href:"https://www.nextflow.io/"},"Nextflow")," enables scalable and reproducible scientific workflows using software containers. It allows the adaptation of pipelines written in the most common scripting languages."),(0,l.yg)("p",null,"Nextflow has been developed by the genomic research scientific community and is built to run bioinformatics pipeline."),(0,l.yg)("p",null,"Define your workflow in a Bash script fashion, providing input, output and the command to run. Without the need to create and use Docker container for Conda pipelines"),(0,l.yg)("h2",{id:"install-nextflow"},"Install Nextflow"),(0,l.yg)("p",null,"Install the ",(0,l.yg)("inlineCode",{parentName:"p"},"nextflow")," client on your computer:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"wget -qO- https://get.nextflow.io | bash\n")),(0,l.yg)("admonition",{title:"Official documentation",type:"info"},(0,l.yg)("p",{parentName:"admonition"},"See the ",(0,l.yg)("a",{parentName:"p",href:"https://www.nextflow.io/docs/latest/getstarted.html#installation"},"Nextflow documentation"),".")),(0,l.yg)("h2",{id:"run-workflow"},"Run workflow"),(0,l.yg)("p",null,"Try the hello world workflow from Nextflow using an existing storage:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"nextflow kuberun https://github.com/nextflow-io/hello -v pvc-mapr-projects-showcase:/data\n")),(0,l.yg)("admonition",{title:"Use Conda environments",type:"tip"},(0,l.yg)("p",{parentName:"admonition"},"You can easily ",(0,l.yg)("a",{parentName:"p",href:"https://www.nextflow.io/docs/latest/conda.html"},"define Conda environments and workflows")," with Nextflow.")))}u.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/a09c2993.d90872b2.js b/assets/js/a09c2993.ccaeace5.js similarity index 99% rename from assets/js/a09c2993.d90872b2.js rename to assets/js/a09c2993.ccaeace5.js index cd7ee6733..6f64666be 100644 --- a/assets/js/a09c2993.d90872b2.js +++ b/assets/js/a09c2993.ccaeace5.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5899],{5680:(e,t,a)=>{a.d(t,{xA:()=>c,yg:()=>g});var r=a(6540);function n(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,r)}return a}function i(e){for(var t=1;t=0||(n[a]=e[a]);return n}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(n[a]=e[a])}return n}var s=r.createContext({}),u=function(e){var t=r.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},c=function(e){var t=u(e.components);return r.createElement(s.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var a=e.components,n=e.mdxType,o=e.originalType,s=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),d=u(a),g=n,y=d["".concat(s,".").concat(g)]||d[g]||p[g]||o;return a?r.createElement(y,i(i({ref:t},c),{},{components:a})):r.createElement(y,i({ref:t},c))}));function g(e,t){var a=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var o=a.length,i=new Array(o);i[0]=d;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:n,i[1]=l;for(var u=2;u{a.r(t),a.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>g,frontMatter:()=>l,metadata:()=>u,toc:()=>p});var r=a(9668),n=a(1367),o=(a(6540),a(5680)),i=["components"],l={id:"introduction",title:"Introduction",slug:"/"},s=void 0,u={unversionedId:"introduction",id:"introduction",title:"Introduction",description:"The Data Science Research Infrastructure is a cluster of servers to deploy workspaces and applications for Data Science.",source:"@site/docs/introduction.md",sourceDirName:".",slug:"/",permalink:"/docs/",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/introduction.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"introduction",title:"Introduction",slug:"/"},sidebar:"docs",next:{title:"Access the DSRI",permalink:"/docs/access-dsri"}},c={},p=[{value:"Getting started",id:"getting-started",level:2},{value:"\u2705 What can be done on the DSRI",id:"-what-can-be-done-on-the-dsri",level:3},{value:"\u274c What cannot be done",id:"-what-cannot-be-done",level:3},{value:"The DSRI architecture",id:"the-dsri-architecture",level:2},{value:"The DSRI specifications",id:"the-dsri-specifications",level:2},{value:"Software",id:"software",level:3},{value:"Hardware",id:"hardware",level:3},{value:"Learn more about DSRI",id:"learn-more-about-dsri",level:2}],d={toc:p};function g(e){var t=e.components,a=(0,n.A)(e,i);return(0,o.yg)("wrapper",(0,r.A)({},d,a,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"The Data Science Research Infrastructure is a cluster of servers to deploy workspaces and applications for Data Science. "),(0,o.yg)("p",null,"It works by starting workspaces and applications in Docker containers that are automatically deployed to a powerful server on the cluster using Kubernetes, a container orchestration system. You can then access your workspace or application through an URL automatically generated."),(0,o.yg)("h2",{id:"getting-started"},"Getting started"),(0,o.yg)("h3",{id:"-what-can-be-done-on-the-dsri"},"\u2705 What can be done on the DSRI"),(0,o.yg)("p",null,"The DSRI is particularly useful if you need to:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Gain access to more computing resources (memory and CPUs), which enables you to load larger amount of data, or use more threads for parallelized tasks"),(0,o.yg)("li",{parentName:"ul"},"Run jobs that takes a long time to complete"),(0,o.yg)("li",{parentName:"ul"},"Deploy any database or service you need, and connect to it from your workspace easily"),(0,o.yg)("li",{parentName:"ul"},"Book and start a workspace that uses one of our GPUs")),(0,o.yg)("p",null,"The DSRI proposes a number of popular workspaces to work with data:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Multiple flavors of ",(0,o.yg)("strong",{parentName:"li"},"JupyterLab")," (scipy, tensorflow, all-spark, and more)"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"VisualStudio Code")," server (also available within the JupyterLab workspaces)"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"RStudio"),", with a complementary Shiny server"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Matlab")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Ubuntu")," Desktop ")),(0,o.yg)("p",null,"You can then install anything you want in your workspace using ",(0,o.yg)("inlineCode",{parentName:"p"},"conda"),", ",(0,o.yg)("inlineCode",{parentName:"p"},"pip"),", or ",(0,o.yg)("inlineCode",{parentName:"p"},"apt"),"."),(0,o.yg)("admonition",{title:"Data storage",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},(0,o.yg)("strong",{parentName:"p"},"DSRI is a computing infrastructure"),", built and used to run data science workloads. DSRI stores data in a persistent manner, but all data stored on the DSRI is susceptible to be altered by the workloads you are running, and we cannot guarantee its immutability."),(0,o.yg)("p",{parentName:"admonition"},(0,o.yg)("strong",{parentName:"p"},"Always keep a safe copy of your data outside the DSRI"),". And don't rely on the DSRI for long term storage.")),(0,o.yg)("h3",{id:"-what-cannot-be-done"},"\u274c What cannot be done"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Since DSRI can only be accessed when using the ",(0,o.yg)("a",{parentName:"li",href:"https://vpn.maastrichtuniversity.nl/"},"UM VPN"),", deployed services will not be available on the public Internet \ud83d\udd12"),(0,o.yg)("li",{parentName:"ul"},"All activities must be legal in basis. You must closely examine and abide by the terms and conditions of any data, software, or web service that you use as part of your work \ud83d\udcdc"),(0,o.yg)("li",{parentName:"ul"},"You cannot reach data or servers hosted at Maastricht University from the DSRI by default. You will need to request access in advance ",(0,o.yg)("a",{parentName:"li",href:"/docs/prepare-project-for-dsri#request-access-to-internal-um-servers"},"here \ud83d\udcec\ufe0f")),(0,o.yg)("li",{parentName:"ul"},"Right now it is not possible to reach the central UM fileservices (MFS)")),(0,o.yg)("admonition",{title:"Request an account",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"If you are working at Maastricht University, ",(0,o.yg)("strong",{parentName:"p"},"see ",(0,o.yg)("a",{parentName:"strong",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/access-dsri"},"this page")," to request an account"),", and run your services on the DSRI.")),(0,o.yg)("h2",{id:"the-dsri-architecture"},"The DSRI architecture"),(0,o.yg)("p",null,"Here is a diagram providing a simplified explanation of how the DSRI works, using popular data science applications as examples (JupyterLab, RStudio, VSCode server)"),(0,o.yg)("img",{src:"/img/dsri_simplified_overview.png",alt:"DSRI in a nutshell ",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h2",{id:"the-dsri-specifications"},"The DSRI specifications"),(0,o.yg)("h3",{id:"software"},"Software"),(0,o.yg)("p",null,"We use ",(0,o.yg)("a",{parentName:"p",href:"https://www.okd.io/"},(0,o.yg)("strong",{parentName:"a"},"OKD 4.11")),", the Origin Community Distribution of Kubernetes that powers ",(0,o.yg)("a",{parentName:"p",href:"https://www.openshift.com/"},"RedHat OpenShift"),", a distribution of the Kubernetes container orchestration tool. Kubernetes takes care of deploying the Docker containers on the cluster of servers, the OKD distribution extends it to improve security, and provide a user-friendly web UI to manage your applications."),(0,o.yg)("p",null,"We use ",(0,o.yg)("a",{parentName:"p",href:"https://www.redhat.com/fr/technologies/storage/ceph"},(0,o.yg)("strong",{parentName:"a"},"RedHat Ceph storage"))," for the distributed storage."),(0,o.yg)("h3",{id:"hardware"},"Hardware"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"16 CPU nodes")),(0,o.yg)("table",null,(0,o.yg)("thead",{parentName:"table"},(0,o.yg)("tr",{parentName:"thead"},(0,o.yg)("th",{parentName:"tr",align:null}),(0,o.yg)("th",{parentName:"tr",align:null},"RAM (GB)"),(0,o.yg)("th",{parentName:"tr",align:null},"CPU (cores)"),(0,o.yg)("th",{parentName:"tr",align:null},"Storage (TB)"))),(0,o.yg)("tbody",{parentName:"table"},(0,o.yg)("tr",{parentName:"tbody"},(0,o.yg)("td",{parentName:"tr",align:null},"Node capacity"),(0,o.yg)("td",{parentName:"tr",align:null},"512 GB"),(0,o.yg)("td",{parentName:"tr",align:null},"64 cores (128 threads)"),(0,o.yg)("td",{parentName:"tr",align:null},"120 TB")),(0,o.yg)("tr",{parentName:"tbody"},(0,o.yg)("td",{parentName:"tr",align:null},"Total capacity"),(0,o.yg)("td",{parentName:"tr",align:null},"8 192 GB"),(0,o.yg)("td",{parentName:"tr",align:null},"1 024 cores"),(0,o.yg)("td",{parentName:"tr",align:null},"1 920 TB")))),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"1 GPU node: ",(0,o.yg)("strong",{parentName:"li"},"Nvidia DGX1")," 8x Tesla V100 - 32GB GPU")),(0,o.yg)("table",null,(0,o.yg)("thead",{parentName:"table"},(0,o.yg)("tr",{parentName:"thead"},(0,o.yg)("th",{parentName:"tr",align:null}),(0,o.yg)("th",{parentName:"tr",align:null},"GPUs"),(0,o.yg)("th",{parentName:"tr",align:null},"RAM (GB)"),(0,o.yg)("th",{parentName:"tr",align:null},"CPU (cores)"))),(0,o.yg)("tbody",{parentName:"table"},(0,o.yg)("tr",{parentName:"tbody"},(0,o.yg)("td",{parentName:"tr",align:null},"GPU node capacity"),(0,o.yg)("td",{parentName:"tr",align:null},"8"),(0,o.yg)("td",{parentName:"tr",align:null},"512 GB"),(0,o.yg)("td",{parentName:"tr",align:null},"40 cores")))),(0,o.yg)("img",{src:"/img/DSRI_infrastructure_architecture_overview.png",alt:"DSRI infrastructure ",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h2",{id:"learn-more-about-dsri"},"Learn more about DSRI"),(0,o.yg)("p",null,"See the following presentation about the Data Science Research Infrastructure "),(0,o.yg)("a",{href:"/resource/2021-04-DSRI-Community-Event.pdf",target:"_blank",rel:"noopener noreferrer"},(0,o.yg)("img",{src:"/resource/DSRI-community-event.png",style:{maxWidth:"100%",maxHeight:"100%"},alt:"DSRI April 2021 Community Event Presentation"})))}g.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5899],{5680:(e,t,a)=>{a.d(t,{xA:()=>c,yg:()=>g});var r=a(6540);function n(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,r)}return a}function i(e){for(var t=1;t=0||(n[a]=e[a]);return n}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(n[a]=e[a])}return n}var s=r.createContext({}),u=function(e){var t=r.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},c=function(e){var t=u(e.components);return r.createElement(s.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var a=e.components,n=e.mdxType,o=e.originalType,s=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),d=u(a),g=n,y=d["".concat(s,".").concat(g)]||d[g]||p[g]||o;return a?r.createElement(y,i(i({ref:t},c),{},{components:a})):r.createElement(y,i({ref:t},c))}));function g(e,t){var a=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var o=a.length,i=new Array(o);i[0]=d;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:n,i[1]=l;for(var u=2;u{a.r(t),a.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>g,frontMatter:()=>l,metadata:()=>u,toc:()=>p});var r=a(9668),n=a(1367),o=(a(6540),a(5680)),i=["components"],l={id:"introduction",title:"Introduction",slug:"/"},s=void 0,u={unversionedId:"introduction",id:"introduction",title:"Introduction",description:"The Data Science Research Infrastructure is a cluster of servers to deploy workspaces and applications for Data Science.",source:"@site/docs/introduction.md",sourceDirName:".",slug:"/",permalink:"/docs/",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/introduction.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"introduction",title:"Introduction",slug:"/"},sidebar:"docs",next:{title:"Access the DSRI",permalink:"/docs/access-dsri"}},c={},p=[{value:"Getting started",id:"getting-started",level:2},{value:"\u2705 What can be done on the DSRI",id:"-what-can-be-done-on-the-dsri",level:3},{value:"\u274c What cannot be done",id:"-what-cannot-be-done",level:3},{value:"The DSRI architecture",id:"the-dsri-architecture",level:2},{value:"The DSRI specifications",id:"the-dsri-specifications",level:2},{value:"Software",id:"software",level:3},{value:"Hardware",id:"hardware",level:3},{value:"Learn more about DSRI",id:"learn-more-about-dsri",level:2}],d={toc:p};function g(e){var t=e.components,a=(0,n.A)(e,i);return(0,o.yg)("wrapper",(0,r.A)({},d,a,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"The Data Science Research Infrastructure is a cluster of servers to deploy workspaces and applications for Data Science. "),(0,o.yg)("p",null,"It works by starting workspaces and applications in Docker containers that are automatically deployed to a powerful server on the cluster using Kubernetes, a container orchestration system. You can then access your workspace or application through an URL automatically generated."),(0,o.yg)("h2",{id:"getting-started"},"Getting started"),(0,o.yg)("h3",{id:"-what-can-be-done-on-the-dsri"},"\u2705 What can be done on the DSRI"),(0,o.yg)("p",null,"The DSRI is particularly useful if you need to:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Gain access to more computing resources (memory and CPUs), which enables you to load larger amount of data, or use more threads for parallelized tasks"),(0,o.yg)("li",{parentName:"ul"},"Run jobs that takes a long time to complete"),(0,o.yg)("li",{parentName:"ul"},"Deploy any database or service you need, and connect to it from your workspace easily"),(0,o.yg)("li",{parentName:"ul"},"Book and start a workspace that uses one of our GPUs")),(0,o.yg)("p",null,"The DSRI proposes a number of popular workspaces to work with data:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Multiple flavors of ",(0,o.yg)("strong",{parentName:"li"},"JupyterLab")," (scipy, tensorflow, all-spark, and more)"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"VisualStudio Code")," server (also available within the JupyterLab workspaces)"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"RStudio"),", with a complementary Shiny server"),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Matlab")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("strong",{parentName:"li"},"Ubuntu")," Desktop ")),(0,o.yg)("p",null,"You can then install anything you want in your workspace using ",(0,o.yg)("inlineCode",{parentName:"p"},"conda"),", ",(0,o.yg)("inlineCode",{parentName:"p"},"pip"),", or ",(0,o.yg)("inlineCode",{parentName:"p"},"apt"),"."),(0,o.yg)("admonition",{title:"Data storage",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},(0,o.yg)("strong",{parentName:"p"},"DSRI is a computing infrastructure"),", built and used to run data science workloads. DSRI stores data in a persistent manner, but all data stored on the DSRI is susceptible to be altered by the workloads you are running, and we cannot guarantee its immutability."),(0,o.yg)("p",{parentName:"admonition"},(0,o.yg)("strong",{parentName:"p"},"Always keep a safe copy of your data outside the DSRI"),". And don't rely on the DSRI for long term storage.")),(0,o.yg)("h3",{id:"-what-cannot-be-done"},"\u274c What cannot be done"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Since DSRI can only be accessed when using the ",(0,o.yg)("a",{parentName:"li",href:"https://vpn.maastrichtuniversity.nl/"},"UM VPN"),", deployed services will not be available on the public Internet \ud83d\udd12"),(0,o.yg)("li",{parentName:"ul"},"All activities must be legal in basis. You must closely examine and abide by the terms and conditions of any data, software, or web service that you use as part of your work \ud83d\udcdc"),(0,o.yg)("li",{parentName:"ul"},"You cannot reach data or servers hosted at Maastricht University from the DSRI by default. You will need to request access in advance ",(0,o.yg)("a",{parentName:"li",href:"/docs/prepare-project-for-dsri#request-access-to-internal-um-servers"},"here \ud83d\udcec\ufe0f")),(0,o.yg)("li",{parentName:"ul"},"Right now it is not possible to reach the central UM fileservices (MFS)")),(0,o.yg)("admonition",{title:"Request an account",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"If you are working at Maastricht University, ",(0,o.yg)("strong",{parentName:"p"},"see ",(0,o.yg)("a",{parentName:"strong",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/access-dsri"},"this page")," to request an account"),", and run your services on the DSRI.")),(0,o.yg)("h2",{id:"the-dsri-architecture"},"The DSRI architecture"),(0,o.yg)("p",null,"Here is a diagram providing a simplified explanation of how the DSRI works, using popular data science applications as examples (JupyterLab, RStudio, VSCode server)"),(0,o.yg)("img",{src:"/img/dsri_simplified_overview.png",alt:"DSRI in a nutshell ",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h2",{id:"the-dsri-specifications"},"The DSRI specifications"),(0,o.yg)("h3",{id:"software"},"Software"),(0,o.yg)("p",null,"We use ",(0,o.yg)("a",{parentName:"p",href:"https://www.okd.io/"},(0,o.yg)("strong",{parentName:"a"},"OKD 4.11")),", the Origin Community Distribution of Kubernetes that powers ",(0,o.yg)("a",{parentName:"p",href:"https://www.openshift.com/"},"RedHat OpenShift"),", a distribution of the Kubernetes container orchestration tool. Kubernetes takes care of deploying the Docker containers on the cluster of servers, the OKD distribution extends it to improve security, and provide a user-friendly web UI to manage your applications."),(0,o.yg)("p",null,"We use ",(0,o.yg)("a",{parentName:"p",href:"https://www.redhat.com/fr/technologies/storage/ceph"},(0,o.yg)("strong",{parentName:"a"},"RedHat Ceph storage"))," for the distributed storage."),(0,o.yg)("h3",{id:"hardware"},"Hardware"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"16 CPU nodes")),(0,o.yg)("table",null,(0,o.yg)("thead",{parentName:"table"},(0,o.yg)("tr",{parentName:"thead"},(0,o.yg)("th",{parentName:"tr",align:null}),(0,o.yg)("th",{parentName:"tr",align:null},"RAM (GB)"),(0,o.yg)("th",{parentName:"tr",align:null},"CPU (cores)"),(0,o.yg)("th",{parentName:"tr",align:null},"Storage (TB)"))),(0,o.yg)("tbody",{parentName:"table"},(0,o.yg)("tr",{parentName:"tbody"},(0,o.yg)("td",{parentName:"tr",align:null},"Node capacity"),(0,o.yg)("td",{parentName:"tr",align:null},"512 GB"),(0,o.yg)("td",{parentName:"tr",align:null},"64 cores (128 threads)"),(0,o.yg)("td",{parentName:"tr",align:null},"120 TB")),(0,o.yg)("tr",{parentName:"tbody"},(0,o.yg)("td",{parentName:"tr",align:null},"Total capacity"),(0,o.yg)("td",{parentName:"tr",align:null},"8 192 GB"),(0,o.yg)("td",{parentName:"tr",align:null},"1 024 cores"),(0,o.yg)("td",{parentName:"tr",align:null},"1 920 TB")))),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"1 GPU node: ",(0,o.yg)("strong",{parentName:"li"},"Nvidia DGX1")," 8x Tesla V100 - 32GB GPU")),(0,o.yg)("table",null,(0,o.yg)("thead",{parentName:"table"},(0,o.yg)("tr",{parentName:"thead"},(0,o.yg)("th",{parentName:"tr",align:null}),(0,o.yg)("th",{parentName:"tr",align:null},"GPUs"),(0,o.yg)("th",{parentName:"tr",align:null},"RAM (GB)"),(0,o.yg)("th",{parentName:"tr",align:null},"CPU (cores)"))),(0,o.yg)("tbody",{parentName:"table"},(0,o.yg)("tr",{parentName:"tbody"},(0,o.yg)("td",{parentName:"tr",align:null},"GPU node capacity"),(0,o.yg)("td",{parentName:"tr",align:null},"8"),(0,o.yg)("td",{parentName:"tr",align:null},"512 GB"),(0,o.yg)("td",{parentName:"tr",align:null},"40 cores")))),(0,o.yg)("img",{src:"/img/DSRI_infrastructure_architecture_overview.png",alt:"DSRI infrastructure ",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h2",{id:"learn-more-about-dsri"},"Learn more about DSRI"),(0,o.yg)("p",null,"See the following presentation about the Data Science Research Infrastructure "),(0,o.yg)("a",{href:"/resource/2021-04-DSRI-Community-Event.pdf",target:"_blank",rel:"noopener noreferrer"},(0,o.yg)("img",{src:"/resource/DSRI-community-event.png",style:{maxWidth:"100%",maxHeight:"100%"},alt:"DSRI April 2021 Community Event Presentation"})))}g.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/a1c3d222.14eb0f1e.js b/assets/js/a1c3d222.4714947a.js similarity index 99% rename from assets/js/a1c3d222.14eb0f1e.js rename to assets/js/a1c3d222.4714947a.js index 1b5379304..6009c9208 100644 --- a/assets/js/a1c3d222.14eb0f1e.js +++ b/assets/js/a1c3d222.4714947a.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[8433],{5680:(e,t,n)=>{n.d(t,{xA:()=>c,yg:()=>y});var o=n(6540);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,o)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var l=o.createContext({}),p=function(e){var t=o.useContext(l),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},c=function(e){var t=p(e.components);return o.createElement(l.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},g=o.forwardRef((function(e,t){var n=e.components,a=e.mdxType,r=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),g=p(n),y=a,m=g["".concat(l,".").concat(y)]||g[y]||u[y]||r;return n?o.createElement(m,i(i({ref:t},c),{},{components:n})):o.createElement(m,i({ref:t},c))}));function y(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var r=n.length,i=new Array(r);i[0]=g;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var p=2;p{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>l,default:()=>y,frontMatter:()=>s,metadata:()=>p,toc:()=>u});var o=n(9668),a=n(1367),r=(n(6540),n(5680)),i=["components"],s={id:"access-dsri",title:"Access the DSRI"},l=void 0,p={unversionedId:"access-dsri",id:"access-dsri",title:"Access the DSRI",description:"Request an account",source:"@site/docs/access-dsri.md",sourceDirName:".",slug:"/access-dsri",permalink:"/docs/access-dsri",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/access-dsri.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"access-dsri",title:"Access the DSRI"},sidebar:"docs",previous:{title:"Introduction",permalink:"/docs/"},next:{title:"Start your workspace",permalink:"/docs/start-workspace"}},c={},u=[{value:"Request an account",id:"request-an-account",level:2},{value:"Connect to the UM network",id:"connect-to-the-um-network",level:2},{value:"Access the web UI",id:"access-the-web-ui",level:2},{value:"Access your project",id:"access-your-project",level:2},{value:"About the web UI",id:"about-the-web-ui",level:2},{value:"Accessing the Developer perspective",id:"accessing-the-developer-perspective",level:3}],g={toc:u};function y(e){var t=e.components,n=(0,a.A)(e,i);return(0,r.yg)("wrapper",(0,o.A)({},g,n,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("h2",{id:"request-an-account"},"Request an account"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"You will need to have an account at Maastricht University with an email ending with ",(0,r.yg)("inlineCode",{parentName:"p"},"@maastrichtuniversity.nl")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"@student.maastrichtuniversity.nl"),".")),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Request access to the DSRI for your account Please fill this ",(0,r.yg)("a",{parentName:"p",href:"/register"},"form \ud83d\udcec")," to provide us some information on what you plan to do with the DSRI. Once you fill the form, you will receive an email with detailed instructions on how to log in."))),(0,r.yg)("h2",{id:"connect-to-the-um-network"},"Connect to the UM network"),(0,r.yg)("p",null,"You need to be connected to the UM network to access the DSRI."),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"\ud83d\udc27 On Linux"),": use ",(0,r.yg)("inlineCode",{parentName:"p"},"openconnect")," to connect to the UM VPN. You can easily install it on Ubuntu and Debian distributions with ",(0,r.yg)("inlineCode",{parentName:"p"},"apt"),":"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"sudo apt install openconnect\nsudo openconnect -u YOUR.USER --authgroup 01-Employees vpn.maastrichtuniversity.nl\n")),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"\ud83c\udf4e On MacOS and Windows"),": download and install the ",(0,r.yg)("strong",{parentName:"p"},"Maastricht University VPN")," client available at ",(0,r.yg)("strong",{parentName:"p"},(0,r.yg)("a",{parentName:"strong",href:"https://vpn.maastrichtuniversity.nl/"},"vpn.maastrichtuniversity.nl"))),(0,r.yg)("details",null,(0,r.yg)("summary",null,"\u26a0\ufe0f If your are a ",(0,r.yg)("b",null,"student")," you will need to request access to the UM VPN first"),(0,r.yg)("ul",null,(0,r.yg)("li",null,"You can try to use the Athena Student Desktop at ",(0,r.yg)("a",{href:"https://athenadesktop.maastrichtuniversity.nl"},"athenadesktop.maastrichtuniversity.nl"),", to access the VPN through a virtual desktop"),(0,r.yg)("li",null,"Or ask one of your teachers to request VPN access for you. You will need to send an email to the IT helpdesk of your department with the following information: "),(0,r.yg)("ul",null,(0,r.yg)("li",null,"Email of the student who will get VPN"),(0,r.yg)("li",null," for which course (provide the course ID) or project does the student need the VPN"),(0,r.yg)("li",null,"until which date the student will need the VPN.")))),(0,r.yg)("h2",{id:"access-the-web-ui"},"Access the web UI"),(0,r.yg)("p",null,"Access the DSRI web UI at ",(0,r.yg)("strong",{parentName:"p"},(0,r.yg)("a",{parentName:"strong",href:"https://console-openshift-console.apps.dsri2.unimaas.nl"},"https://console-openshift-console.apps.dsri2.unimaas.nl"))),(0,r.yg)("admonition",{title:"Password",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Use your general UM password.")),(0,r.yg)("p",null,"If you do not have access to the DSRI ",(0,r.yg)("a",{parentName:"p",href:"mailto:dsri-support-l@maastrichtuniversity.nl"},"contact us"),"."),(0,r.yg)("p",null,"You will be able to login at ",(0,r.yg)("strong",{parentName:"p"},(0,r.yg)("a",{parentName:"strong",href:"https://console-openshift-console.apps.dsri2.unimaas.nl"},"https://console-openshift-console.apps.dsri2.unimaas.nl"))," using the standard maastricht portal upon clicking the login button:"),(0,r.yg)("img",{src:"/img/screenshot_login_screen.png",alt:"Login screen",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("img",{src:"/img/screenshot_um_login_screen.png",alt:"Login screen",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("admonition",{title:"Command line interface",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"We recommend you to install the ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," command line interface to perform additional operations on your applications, such as loading large amount of data using ",(0,r.yg)("inlineCode",{parentName:"p"},"oc cp"),", or deploying an application from a local ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile"),". Instructions on installing the client can be found ",(0,r.yg)("a",{parentName:"p",href:"/docs/openshift-install"},"\u27a1 here"))),(0,r.yg)("h2",{id:"access-your-project"},"Access your project"),(0,r.yg)("p",null,"In the DSRI OpenShift web UI, applications are deployed in projects."),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Create a new project with a meaningful name describing what you are doing, such as ",(0,r.yg)("inlineCode",{parentName:"p"},"workspace-yourname"),".")),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Go to your project (applications are deployed in a project)."))),(0,r.yg)("admonition",{title:"Reuse your project",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"Only create new projects when it is necessary (for a new project). You can easily ",(0,r.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/project-management#delete-a-project-using-the-web-ui"},"clean up your current project")," instead of creating a new one every time you want to try something.")),(0,r.yg)("img",{src:"/img/screenshot_go_to_project.png",alt:"Login screen",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("admonition",{title:"Access permissions for developers to your project",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"You can use the ",(0,r.yg)("strong",{parentName:"p"},"Project")," view in the ",(0,r.yg)("strong",{parentName:"p"},"Developer")," perspective to grant or revoke access permissions to your project collaborators. For More Info: ",(0,r.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/project-management/#access-permissions-for-developers-to-your-project"},"Access permissions for developers to your project"))),(0,r.yg)("h2",{id:"about-the-web-ui"},"About the web UI"),(0,r.yg)("p",null,"Developers can use the web console to ",(0,r.yg)("strong",{parentName:"p"},"visualize"),", ",(0,r.yg)("strong",{parentName:"p"},"browse"),", and ",(0,r.yg)("strong",{parentName:"p"},"manage")," the contents of projects in new version of OKD4. "),(0,r.yg)("p",null,"The ",(0,r.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.6/web_console/odc-about-developer-perspective.html"},"OpenShift Container Platform web console")," provides two perspectives; "),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},"the ",(0,r.yg)("strong",{parentName:"li"},"Administrator")," perspective "),(0,r.yg)("li",{parentName:"ul"},"the ",(0,r.yg)("strong",{parentName:"li"},"Developer")," perspective.")),(0,r.yg)("p",null,"The Developer perspective provides workflows specific to developer use cases, such as the ability to:"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Create and deploy applications on OpenShift Container Platform by importing existing codebases, images, and dockerfiles.")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Visually interact with applications, components, and services associated with them within a project and monitor their deployment and build status.")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Group components within an application and connect the components within and across applications."))),(0,r.yg)("h3",{id:"accessing-the-developer-perspective"},"Accessing the Developer perspective"),(0,r.yg)("p",null,"You can access the ",(0,r.yg)("strong",{parentName:"p"},"Developer")," perspective from the web console as follows:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Log in to the OpenShift Container Platform web console using your login credentials. "),(0,r.yg)("ul",{parentName:"li"},(0,r.yg)("li",{parentName:"ul"},"The default view for the OpenShift Container Platform web console is the ",(0,r.yg)("strong",{parentName:"li"},"Administrator")," perspective."))),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Use the perspective switcher to switch to the ",(0,r.yg)("strong",{parentName:"p"},"Developer")," perspective. The ",(0,r.yg)("strong",{parentName:"p"},"Topology")," view with a list of all the projects in your cluster is displayed."),(0,r.yg)("img",{src:"/img/screenshot_developer_perspective.png",alt:"Developer Perspective",style:{maxWidth:"100%",maxHeight:"100%"}})),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Select an existing project from the list or use the ",(0,r.yg)("strong",{parentName:"p"},"Project")," drop-down list to create a new project."))),(0,r.yg)("admonition",{type:"info"},(0,r.yg)("p",{parentName:"admonition"},"If you have no workloads or applications in the project, the ",(0,r.yg)("strong",{parentName:"p"},"Topology")," view displays the available options to create applications. If you have existing workloads, the ",(0,r.yg)("strong",{parentName:"p"},"Topology")," view graphically displays your workload nodes.")),(0,r.yg)("img",{src:"/img/screenshot_topology_view.png",alt:"Topology View",style:{maxWidth:"100%",maxHeight:"100%"}}))}y.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[8433],{5680:(e,t,n)=>{n.d(t,{xA:()=>c,yg:()=>y});var o=n(6540);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,o)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var l=o.createContext({}),p=function(e){var t=o.useContext(l),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},c=function(e){var t=p(e.components);return o.createElement(l.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},g=o.forwardRef((function(e,t){var n=e.components,a=e.mdxType,r=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),g=p(n),y=a,m=g["".concat(l,".").concat(y)]||g[y]||u[y]||r;return n?o.createElement(m,i(i({ref:t},c),{},{components:n})):o.createElement(m,i({ref:t},c))}));function y(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var r=n.length,i=new Array(r);i[0]=g;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var p=2;p{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>l,default:()=>y,frontMatter:()=>s,metadata:()=>p,toc:()=>u});var o=n(9668),a=n(1367),r=(n(6540),n(5680)),i=["components"],s={id:"access-dsri",title:"Access the DSRI"},l=void 0,p={unversionedId:"access-dsri",id:"access-dsri",title:"Access the DSRI",description:"Request an account",source:"@site/docs/access-dsri.md",sourceDirName:".",slug:"/access-dsri",permalink:"/docs/access-dsri",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/access-dsri.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"access-dsri",title:"Access the DSRI"},sidebar:"docs",previous:{title:"Introduction",permalink:"/docs/"},next:{title:"Start your workspace",permalink:"/docs/start-workspace"}},c={},u=[{value:"Request an account",id:"request-an-account",level:2},{value:"Connect to the UM network",id:"connect-to-the-um-network",level:2},{value:"Access the web UI",id:"access-the-web-ui",level:2},{value:"Access your project",id:"access-your-project",level:2},{value:"About the web UI",id:"about-the-web-ui",level:2},{value:"Accessing the Developer perspective",id:"accessing-the-developer-perspective",level:3}],g={toc:u};function y(e){var t=e.components,n=(0,a.A)(e,i);return(0,r.yg)("wrapper",(0,o.A)({},g,n,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("h2",{id:"request-an-account"},"Request an account"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"You will need to have an account at Maastricht University with an email ending with ",(0,r.yg)("inlineCode",{parentName:"p"},"@maastrichtuniversity.nl")," or ",(0,r.yg)("inlineCode",{parentName:"p"},"@student.maastrichtuniversity.nl"),".")),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Request access to the DSRI for your account Please fill this ",(0,r.yg)("a",{parentName:"p",href:"/register"},"form \ud83d\udcec")," to provide us some information on what you plan to do with the DSRI. Once you fill the form, you will receive an email with detailed instructions on how to log in."))),(0,r.yg)("h2",{id:"connect-to-the-um-network"},"Connect to the UM network"),(0,r.yg)("p",null,"You need to be connected to the UM network to access the DSRI."),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"\ud83d\udc27 On Linux"),": use ",(0,r.yg)("inlineCode",{parentName:"p"},"openconnect")," to connect to the UM VPN. You can easily install it on Ubuntu and Debian distributions with ",(0,r.yg)("inlineCode",{parentName:"p"},"apt"),":"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"sudo apt install openconnect\nsudo openconnect -u YOUR.USER --authgroup 01-Employees vpn.maastrichtuniversity.nl\n")),(0,r.yg)("p",null,(0,r.yg)("strong",{parentName:"p"},"\ud83c\udf4e On MacOS and Windows"),": download and install the ",(0,r.yg)("strong",{parentName:"p"},"Maastricht University VPN")," client available at ",(0,r.yg)("strong",{parentName:"p"},(0,r.yg)("a",{parentName:"strong",href:"https://vpn.maastrichtuniversity.nl/"},"vpn.maastrichtuniversity.nl"))),(0,r.yg)("details",null,(0,r.yg)("summary",null,"\u26a0\ufe0f If your are a ",(0,r.yg)("b",null,"student")," you will need to request access to the UM VPN first"),(0,r.yg)("ul",null,(0,r.yg)("li",null,"You can try to use the Athena Student Desktop at ",(0,r.yg)("a",{href:"https://athenadesktop.maastrichtuniversity.nl"},"athenadesktop.maastrichtuniversity.nl"),", to access the VPN through a virtual desktop"),(0,r.yg)("li",null,"Or ask one of your teachers to request VPN access for you. You will need to send an email to the IT helpdesk of your department with the following information: "),(0,r.yg)("ul",null,(0,r.yg)("li",null,"Email of the student who will get VPN"),(0,r.yg)("li",null," for which course (provide the course ID) or project does the student need the VPN"),(0,r.yg)("li",null,"until which date the student will need the VPN.")))),(0,r.yg)("h2",{id:"access-the-web-ui"},"Access the web UI"),(0,r.yg)("p",null,"Access the DSRI web UI at ",(0,r.yg)("strong",{parentName:"p"},(0,r.yg)("a",{parentName:"strong",href:"https://console-openshift-console.apps.dsri2.unimaas.nl"},"https://console-openshift-console.apps.dsri2.unimaas.nl"))),(0,r.yg)("admonition",{title:"Password",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Use your general UM password.")),(0,r.yg)("p",null,"If you do not have access to the DSRI ",(0,r.yg)("a",{parentName:"p",href:"mailto:dsri-support-l@maastrichtuniversity.nl"},"contact us"),"."),(0,r.yg)("p",null,"You will be able to login at ",(0,r.yg)("strong",{parentName:"p"},(0,r.yg)("a",{parentName:"strong",href:"https://console-openshift-console.apps.dsri2.unimaas.nl"},"https://console-openshift-console.apps.dsri2.unimaas.nl"))," using the standard maastricht portal upon clicking the login button:"),(0,r.yg)("img",{src:"/img/screenshot_login_screen.png",alt:"Login screen",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("img",{src:"/img/screenshot_um_login_screen.png",alt:"Login screen",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("admonition",{title:"Command line interface",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"We recommend you to install the ",(0,r.yg)("inlineCode",{parentName:"p"},"oc")," command line interface to perform additional operations on your applications, such as loading large amount of data using ",(0,r.yg)("inlineCode",{parentName:"p"},"oc cp"),", or deploying an application from a local ",(0,r.yg)("inlineCode",{parentName:"p"},"Dockerfile"),". Instructions on installing the client can be found ",(0,r.yg)("a",{parentName:"p",href:"/docs/openshift-install"},"\u27a1 here"))),(0,r.yg)("h2",{id:"access-your-project"},"Access your project"),(0,r.yg)("p",null,"In the DSRI OpenShift web UI, applications are deployed in projects."),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Create a new project with a meaningful name describing what you are doing, such as ",(0,r.yg)("inlineCode",{parentName:"p"},"workspace-yourname"),".")),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Go to your project (applications are deployed in a project)."))),(0,r.yg)("admonition",{title:"Reuse your project",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"Only create new projects when it is necessary (for a new project). You can easily ",(0,r.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/project-management#delete-a-project-using-the-web-ui"},"clean up your current project")," instead of creating a new one every time you want to try something.")),(0,r.yg)("img",{src:"/img/screenshot_go_to_project.png",alt:"Login screen",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("admonition",{title:"Access permissions for developers to your project",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"You can use the ",(0,r.yg)("strong",{parentName:"p"},"Project")," view in the ",(0,r.yg)("strong",{parentName:"p"},"Developer")," perspective to grant or revoke access permissions to your project collaborators. For More Info: ",(0,r.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/project-management/#access-permissions-for-developers-to-your-project"},"Access permissions for developers to your project"))),(0,r.yg)("h2",{id:"about-the-web-ui"},"About the web UI"),(0,r.yg)("p",null,"Developers can use the web console to ",(0,r.yg)("strong",{parentName:"p"},"visualize"),", ",(0,r.yg)("strong",{parentName:"p"},"browse"),", and ",(0,r.yg)("strong",{parentName:"p"},"manage")," the contents of projects in new version of OKD4. "),(0,r.yg)("p",null,"The ",(0,r.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.6/web_console/odc-about-developer-perspective.html"},"OpenShift Container Platform web console")," provides two perspectives; "),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},"the ",(0,r.yg)("strong",{parentName:"li"},"Administrator")," perspective "),(0,r.yg)("li",{parentName:"ul"},"the ",(0,r.yg)("strong",{parentName:"li"},"Developer")," perspective.")),(0,r.yg)("p",null,"The Developer perspective provides workflows specific to developer use cases, such as the ability to:"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Create and deploy applications on OpenShift Container Platform by importing existing codebases, images, and dockerfiles.")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Visually interact with applications, components, and services associated with them within a project and monitor their deployment and build status.")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("p",{parentName:"li"},"Group components within an application and connect the components within and across applications."))),(0,r.yg)("h3",{id:"accessing-the-developer-perspective"},"Accessing the Developer perspective"),(0,r.yg)("p",null,"You can access the ",(0,r.yg)("strong",{parentName:"p"},"Developer")," perspective from the web console as follows:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Log in to the OpenShift Container Platform web console using your login credentials. "),(0,r.yg)("ul",{parentName:"li"},(0,r.yg)("li",{parentName:"ul"},"The default view for the OpenShift Container Platform web console is the ",(0,r.yg)("strong",{parentName:"li"},"Administrator")," perspective."))),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Use the perspective switcher to switch to the ",(0,r.yg)("strong",{parentName:"p"},"Developer")," perspective. The ",(0,r.yg)("strong",{parentName:"p"},"Topology")," view with a list of all the projects in your cluster is displayed."),(0,r.yg)("img",{src:"/img/screenshot_developer_perspective.png",alt:"Developer Perspective",style:{maxWidth:"100%",maxHeight:"100%"}})),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Select an existing project from the list or use the ",(0,r.yg)("strong",{parentName:"p"},"Project")," drop-down list to create a new project."))),(0,r.yg)("admonition",{type:"info"},(0,r.yg)("p",{parentName:"admonition"},"If you have no workloads or applications in the project, the ",(0,r.yg)("strong",{parentName:"p"},"Topology")," view displays the available options to create applications. If you have existing workloads, the ",(0,r.yg)("strong",{parentName:"p"},"Topology")," view graphically displays your workload nodes.")),(0,r.yg)("img",{src:"/img/screenshot_topology_view.png",alt:"Topology View",style:{maxWidth:"100%",maxHeight:"100%"}}))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/b258fc9c.612ce544.js b/assets/js/b258fc9c.ab7ec905.js similarity index 99% rename from assets/js/b258fc9c.612ce544.js rename to assets/js/b258fc9c.ab7ec905.js index 71d2b48f9..1af1c7e97 100644 --- a/assets/js/b258fc9c.612ce544.js +++ b/assets/js/b258fc9c.ab7ec905.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9725],{5680:(e,t,n)=>{n.d(t,{xA:()=>c,yg:()=>g});var l=n(6540);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function r(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var i=l.createContext({}),u=function(e){var t=l.useContext(i),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},c=function(e){var t=u(e.components);return l.createElement(i.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},d=l.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,i=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),d=u(n),g=a,m=d["".concat(i,".").concat(g)]||d[g]||p[g]||o;return n?l.createElement(m,r(r({ref:t},c),{},{components:n})):l.createElement(m,r({ref:t},c))}));function g(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,r=new Array(o);r[0]=d;var s={};for(var i in t)hasOwnProperty.call(t,i)&&(s[i]=t[i]);s.originalType=e,s.mdxType="string"==typeof e?e:a,r[1]=s;for(var u=2;u{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>g,frontMatter:()=>s,metadata:()=>u,toc:()=>p});var l=n(9668),a=n(1367),o=(n(6540),n(5680)),r=["components"],s={id:"guide-local-install",title:"Install local OpenShift"},i=void 0,u={unversionedId:"guide-local-install",id:"guide-local-install",title:"Install local OpenShift",description:"OpenShift and Kubernetes can be installed locally on a single machine for test purpose. The installation requires knowledge of your OS administration, and can be quite complex. We recommend to install it locally only if really required. Otherwise we recommend you to simply use Docker to test images, then deploy them on the DSRI.",source:"@site/docs/guide-local-install.md",sourceDirName:".",slug:"/guide-local-install",permalink:"/docs/guide-local-install",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-local-install.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-local-install",title:"Install local OpenShift"}},c={},p=[{value:"Install MiniShift",id:"install-minishift",level:2},{value:"Start MiniShift",id:"start-minishift",level:3},{value:"Login",id:"login",level:3},{value:"Stop",id:"stop",level:3},{value:"Reset",id:"reset",level:3},{value:"Install kubectl",id:"install-kubectl",level:2},{value:"kubectl on Ubuntu",id:"kubectl-on-ubuntu",level:3},{value:"kubectl on MacOS & Windows",id:"kubectl-on-macos--windows",level:3},{value:"Install the Dashboard UI",id:"install-the-dashboard-ui",level:3},{value:"Run kubectl",id:"run-kubectl",level:3},{value:"Enable internet",id:"enable-internet",level:3},{value:"Create persistent volume",id:"create-persistent-volume",level:3},{value:"Uninstall",id:"uninstall",level:3},{value:"Install Argo workflows",id:"install-argo-workflows",level:2},{value:"Install on your local Kubernetes",id:"install-on-your-local-kubernetes",level:3},{value:"Install the client",id:"install-the-client",level:3},{value:"Expose the UI",id:"expose-the-ui",level:3}],d={toc:p};function g(e){var t=e.components,s=(0,a.A)(e,r);return(0,o.yg)("wrapper",(0,l.A)({},d,s,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"OpenShift and Kubernetes can be installed locally on a single machine for test purpose. The installation requires knowledge of your OS administration, and can be quite complex. We recommend to install it locally only if really required. Otherwise we recommend you to simply use Docker to test images, then deploy them on the DSRI."),(0,o.yg)("h2",{id:"install-minishift"},"Install MiniShift"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://docs.okd.io/latest/minishift/getting-started/installing.html"})),(0,o.yg)("p",null,"You will need to ",(0,o.yg)("a",{parentName:"p",href:"https://docs.okd.io/latest/minishift/getting-started/setting-up-virtualization-environment.html"},"set up the virtualization environment")," before ",(0,o.yg)("a",{parentName:"p",href:"https://docs.okd.io/latest/minishift/getting-started/installing.html"},"installing MiniShift"),"."),(0,o.yg)("p",null,"Download ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/minishift/minishift/releases"},"MiniShift")," and unzip it."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"# For Ubuntu 18.04 and older\nsudo apt install -y libvirt-bin qemu-kvm\n# For Ubuntu 18.10 and newer (replace libvirtd by libvirt in next commands)\nsudo apt install -y qemu-kvm libvirt-daemon libvirt-daemon-system\n\n# Create group if does not exist\nsudo addgroup libvirtd\nsudo adduser $(whoami) libvirtd\n\nsudo usermod -a -G libvirtd $(whoami)\nnewgrp libvirtd\ncurl -L https://github.com/dhiltgen/docker-machine-kvm/releases/download/v0.10.0/docker-machine-driver-kvm-ubuntu16.04 -o /usr/local/bin/docker-machine-driver-kvm\nsudo chmod +x /usr/local/bin/docker-machine-driver-kvm\n\n# Check if libvirtd running\nsystemctl is-active libvirtd\n# Start if inactive\nsudo systemctl start libvirtd\n\n# Copy MiniShift in your path\ncp minishift-1.34.1-linux-amd64/minishift /usr/local/bin\n")),(0,o.yg)("h3",{id:"start-minishift"},"Start MiniShift"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"minishift start\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Get your local OpenShift cluster URL after the command complete.")),(0,o.yg)("h3",{id:"login"},"Login"),(0,o.yg)("p",null,"Go to your local cluster URL."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"E.g. ",(0,o.yg)("a",{parentName:"p",href:"https://192.168.42.58:8443/console/catalog"},"https://192.168.42.58:8443/console/catalog"),".")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Username: ",(0,o.yg)("inlineCode",{parentName:"p"},"admin")," or ",(0,o.yg)("inlineCode",{parentName:"p"},"developer")),(0,o.yg)("p",{parentName:"blockquote"},"Password: anything will work")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"# As admin\noc login -u system:admin\n")),(0,o.yg)("h3",{id:"stop"},"Stop"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"minishift stop\n")),(0,o.yg)("h3",{id:"reset"},"Reset"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"minishift delete -f\n")),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"install-kubectl"},"Install kubectl"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://kubernetes.io/"},(0,o.yg)("img",{alt:"Kubernetes",src:n(647).A,width:"209",height:"203"}))),(0,o.yg)("h3",{id:"kubectl-on-ubuntu"},"kubectl on Ubuntu"),(0,o.yg)("p",null,"For more details: read the official ",(0,o.yg)("a",{parentName:"p",href:"https://tutorials.ubuntu.com/tutorial/install-a-local-kubernetes-with-microk8s#0"},"install Kubernetes on Ubuntu tutorial")," or see the official ",(0,o.yg)("a",{parentName:"p",href:"https://ubuntu.com/kubernetes/install"},"Ubuntu Kubernetes install documentation"),"."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"sudo snap install microk8s --classic\nsudo usermod -a -G microk8s $USER\n# Restart your machine\nmkdir -p ~/.kube\nmicrok8s.kubectl config view --raw > $HOME/.kube/config\n\n# Make sure this works for dashboard on Ubuntu\nmicrok8s.enable dashboard dns\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"To do only if kubectl is not already installed on your machine:"),(0,o.yg)("pre",{parentName:"blockquote"},(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"sudo snap alias microk8s.kubectl kubectl\n"))),(0,o.yg)("h3",{id:"kubectl-on-macos--windows"},"kubectl on MacOS & Windows"),(0,o.yg)("p",null,"Included in Docker installation. Use the installer provided by DockerHub."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Activate it in Docker Preferences > Kubernetes.")),(0,o.yg)("p",null,"For Windows you will need to download the ",(0,o.yg)("inlineCode",{parentName:"p"},"kubectl.exe")," and place it in your PATH."),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/windows/amd64/kubectl.exe"},"https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/windows/amd64/kubectl.exe"))),(0,o.yg)("p",null,"We recommend to create a ",(0,o.yg)("inlineCode",{parentName:"p"},"kubectl")," directory in ",(0,o.yg)("inlineCode",{parentName:"p"},"C:/")," and add this ",(0,o.yg)("inlineCode",{parentName:"p"},"C:/kubectl")," to the Path environment variable in System properties > Advanced > Environment Variables > Path"),(0,o.yg)("h3",{id:"install-the-dashboard-ui"},"Install the Dashboard UI"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"# Install Kubernetes UI\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yaml\nkubectl apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/d2s-core/master/argo/roles/kube-dashboard-adminuser-sa.yml\nkubectl apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/d2s-core/master/argo/roles/kube-adminuser-rolebinding.yml\n\n# Get the Token to access the dashboard\nkubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')\n\n\n# Windows user will need to execute the 2 commands manually:\nkubectl -n kube-system get secret \n# And get the token containing 'admin-user'\nkubectl -n kube-system describe secret\n# For Windows: give the anonymous user global access\nkubectl create clusterrolebinding cluster-system-anonymous --clusterrole=admin --user=system:anonymous\n# Note: this could be improved. I think only the Dashboard UI didn't have the required permissions.\n\n# Finally, start the web UI, and chose the Token connection\nkubectl proxy\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/"),(0,o.yg)("p",{parentName:"blockquote"},"And provide the previously obtained token.")),(0,o.yg)("p",null,(0,o.yg)("strong",{parentName:"p"},"Warning:")," you will need to save the token to login again next time (use the password save from your browser if possible)."),(0,o.yg)("h3",{id:"run-kubectl"},"Run kubectl"),(0,o.yg)("p",null,(0,o.yg)("inlineCode",{parentName:"p"},"kubectl")," should be running at start."),(0,o.yg)("p",null,"Just restart the web UI"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"kubectl proxy\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/")),(0,o.yg)("h3",{id:"enable-internet"},"Enable internet"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/"},"Debug DNS on Ubuntu")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"microk8s.enable dns\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Restart your machine.")),(0,o.yg)("p",null,"You might need to change your firewall configuration"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"On Ubuntu")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"sudo ufw allow in on cni0\nsudo ufw allow out on cni0\nsudo ufw default allow routed\n")),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Try to connect to the internet from Kubernetes with the ",(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/d2s-core/blob/master/argo/tests/test-busybox.yaml"},"test-busybox pod"),".")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"kubectl exec -ti busybox -- /bin/sh\nping google.com\n")),(0,o.yg)("h3",{id:"create-persistent-volume"},"Create persistent volume"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"# Create volume\nkubectl apply -n argo -f d2s-core/argo/storage/storage-mac.yaml\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Not working at the moment.")),(0,o.yg)("h3",{id:"uninstall"},"Uninstall"),(0,o.yg)("p",null,"Clean uninstall before 2.2."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"kubectl get cm workflow-controller-configmap -o yaml -n kube-system --export | kubectl apply -n argo -f -\nkubectl delete -n kube-system cm workflow-controller-configmap\nkubectl delete -n kube-system deploy workflow-controller argo-ui\nkubectl delete -n kube-system sa argo argo-ui\nkubectl delete -n kube-system svc argo-ui\n")),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"install-argo-workflows"},"Install Argo workflows"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://argoproj.github.io/argo/"},(0,o.yg)("img",{alt:"Argo project",src:n(4122).A,width:"300",height:"137"}))),(0,o.yg)("h3",{id:"install-on-your-local-kubernetes"},"Install on your local Kubernetes"),(0,o.yg)("p",null,"Argo workflows will be installed on the ",(0,o.yg)("inlineCode",{parentName:"p"},"argo")," namespace. See the ",(0,o.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/demo.html#2-install-the-controller-and-ui"},"official Argo documentation")," for more details."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"kubectl create ns argo\nkubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo/v2.4.2/manifests/install.yaml\n\n# Configure service account to run workflow\nkubectl create rolebinding default-admin --clusterrole=admin --serviceaccount=default:default\n\n# Test run\nargo submit --watch https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"See ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/vemonet/argo/master/manifests/namespace-install.yaml"},"custom configuration")," for namespace install."),(0,o.yg)("pre",{parentName:"blockquote"},(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"kubectl apply -n argo -f https://raw.githubusercontent.com/vemonet/argo/master/manifests/namespace-install.yaml\n"))),(0,o.yg)("h3",{id:"install-the-client"},"Install the client"),(0,o.yg)("p",null,"See the ",(0,o.yg)("a",{parentName:"p",href:"/docs/workflows-argo"},"Argo workflows documentation"),"."),(0,o.yg)("h3",{id:"expose-the-ui"},"Expose the UI"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"kubectl -n argo port-forward deployment/argo-ui 8002:8001\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Access on http://localhost:8002.")))}g.isMDXComponent=!0},647:(e,t,n)=>{n.d(t,{A:()=>l});const l=n.p+"assets/images/Kubernetes-c5f36b415dd16a8fcdae01fbc8d9c940.png"},4122:(e,t,n)=>{n.d(t,{A:()=>l});const l=n.p+"assets/images/argo-logo-c091bfee39aec37120d0e879edac74f6.png"}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9725],{5680:(e,t,n)=>{n.d(t,{xA:()=>c,yg:()=>g});var l=n(6540);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function r(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var i=l.createContext({}),u=function(e){var t=l.useContext(i),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},c=function(e){var t=u(e.components);return l.createElement(i.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},d=l.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,i=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),d=u(n),g=a,m=d["".concat(i,".").concat(g)]||d[g]||p[g]||o;return n?l.createElement(m,r(r({ref:t},c),{},{components:n})):l.createElement(m,r({ref:t},c))}));function g(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,r=new Array(o);r[0]=d;var s={};for(var i in t)hasOwnProperty.call(t,i)&&(s[i]=t[i]);s.originalType=e,s.mdxType="string"==typeof e?e:a,r[1]=s;for(var u=2;u{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>g,frontMatter:()=>s,metadata:()=>u,toc:()=>p});var l=n(9668),a=n(1367),o=(n(6540),n(5680)),r=["components"],s={id:"guide-local-install",title:"Install local OpenShift"},i=void 0,u={unversionedId:"guide-local-install",id:"guide-local-install",title:"Install local OpenShift",description:"OpenShift and Kubernetes can be installed locally on a single machine for test purpose. The installation requires knowledge of your OS administration, and can be quite complex. We recommend to install it locally only if really required. Otherwise we recommend you to simply use Docker to test images, then deploy them on the DSRI.",source:"@site/docs/guide-local-install.md",sourceDirName:".",slug:"/guide-local-install",permalink:"/docs/guide-local-install",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-local-install.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-local-install",title:"Install local OpenShift"}},c={},p=[{value:"Install MiniShift",id:"install-minishift",level:2},{value:"Start MiniShift",id:"start-minishift",level:3},{value:"Login",id:"login",level:3},{value:"Stop",id:"stop",level:3},{value:"Reset",id:"reset",level:3},{value:"Install kubectl",id:"install-kubectl",level:2},{value:"kubectl on Ubuntu",id:"kubectl-on-ubuntu",level:3},{value:"kubectl on MacOS & Windows",id:"kubectl-on-macos--windows",level:3},{value:"Install the Dashboard UI",id:"install-the-dashboard-ui",level:3},{value:"Run kubectl",id:"run-kubectl",level:3},{value:"Enable internet",id:"enable-internet",level:3},{value:"Create persistent volume",id:"create-persistent-volume",level:3},{value:"Uninstall",id:"uninstall",level:3},{value:"Install Argo workflows",id:"install-argo-workflows",level:2},{value:"Install on your local Kubernetes",id:"install-on-your-local-kubernetes",level:3},{value:"Install the client",id:"install-the-client",level:3},{value:"Expose the UI",id:"expose-the-ui",level:3}],d={toc:p};function g(e){var t=e.components,s=(0,a.A)(e,r);return(0,o.yg)("wrapper",(0,l.A)({},d,s,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("p",null,"OpenShift and Kubernetes can be installed locally on a single machine for test purpose. The installation requires knowledge of your OS administration, and can be quite complex. We recommend to install it locally only if really required. Otherwise we recommend you to simply use Docker to test images, then deploy them on the DSRI."),(0,o.yg)("h2",{id:"install-minishift"},"Install MiniShift"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://docs.okd.io/latest/minishift/getting-started/installing.html"})),(0,o.yg)("p",null,"You will need to ",(0,o.yg)("a",{parentName:"p",href:"https://docs.okd.io/latest/minishift/getting-started/setting-up-virtualization-environment.html"},"set up the virtualization environment")," before ",(0,o.yg)("a",{parentName:"p",href:"https://docs.okd.io/latest/minishift/getting-started/installing.html"},"installing MiniShift"),"."),(0,o.yg)("p",null,"Download ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/minishift/minishift/releases"},"MiniShift")," and unzip it."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"# For Ubuntu 18.04 and older\nsudo apt install -y libvirt-bin qemu-kvm\n# For Ubuntu 18.10 and newer (replace libvirtd by libvirt in next commands)\nsudo apt install -y qemu-kvm libvirt-daemon libvirt-daemon-system\n\n# Create group if does not exist\nsudo addgroup libvirtd\nsudo adduser $(whoami) libvirtd\n\nsudo usermod -a -G libvirtd $(whoami)\nnewgrp libvirtd\ncurl -L https://github.com/dhiltgen/docker-machine-kvm/releases/download/v0.10.0/docker-machine-driver-kvm-ubuntu16.04 -o /usr/local/bin/docker-machine-driver-kvm\nsudo chmod +x /usr/local/bin/docker-machine-driver-kvm\n\n# Check if libvirtd running\nsystemctl is-active libvirtd\n# Start if inactive\nsudo systemctl start libvirtd\n\n# Copy MiniShift in your path\ncp minishift-1.34.1-linux-amd64/minishift /usr/local/bin\n")),(0,o.yg)("h3",{id:"start-minishift"},"Start MiniShift"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"minishift start\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Get your local OpenShift cluster URL after the command complete.")),(0,o.yg)("h3",{id:"login"},"Login"),(0,o.yg)("p",null,"Go to your local cluster URL."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"E.g. ",(0,o.yg)("a",{parentName:"p",href:"https://192.168.42.58:8443/console/catalog"},"https://192.168.42.58:8443/console/catalog"),".")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Username: ",(0,o.yg)("inlineCode",{parentName:"p"},"admin")," or ",(0,o.yg)("inlineCode",{parentName:"p"},"developer")),(0,o.yg)("p",{parentName:"blockquote"},"Password: anything will work")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"# As admin\noc login -u system:admin\n")),(0,o.yg)("h3",{id:"stop"},"Stop"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"minishift stop\n")),(0,o.yg)("h3",{id:"reset"},"Reset"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"minishift delete -f\n")),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"install-kubectl"},"Install kubectl"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://kubernetes.io/"},(0,o.yg)("img",{alt:"Kubernetes",src:n(647).A,width:"209",height:"203"}))),(0,o.yg)("h3",{id:"kubectl-on-ubuntu"},"kubectl on Ubuntu"),(0,o.yg)("p",null,"For more details: read the official ",(0,o.yg)("a",{parentName:"p",href:"https://tutorials.ubuntu.com/tutorial/install-a-local-kubernetes-with-microk8s#0"},"install Kubernetes on Ubuntu tutorial")," or see the official ",(0,o.yg)("a",{parentName:"p",href:"https://ubuntu.com/kubernetes/install"},"Ubuntu Kubernetes install documentation"),"."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"sudo snap install microk8s --classic\nsudo usermod -a -G microk8s $USER\n# Restart your machine\nmkdir -p ~/.kube\nmicrok8s.kubectl config view --raw > $HOME/.kube/config\n\n# Make sure this works for dashboard on Ubuntu\nmicrok8s.enable dashboard dns\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"To do only if kubectl is not already installed on your machine:"),(0,o.yg)("pre",{parentName:"blockquote"},(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"sudo snap alias microk8s.kubectl kubectl\n"))),(0,o.yg)("h3",{id:"kubectl-on-macos--windows"},"kubectl on MacOS & Windows"),(0,o.yg)("p",null,"Included in Docker installation. Use the installer provided by DockerHub."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Activate it in Docker Preferences > Kubernetes.")),(0,o.yg)("p",null,"For Windows you will need to download the ",(0,o.yg)("inlineCode",{parentName:"p"},"kubectl.exe")," and place it in your PATH."),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/windows/amd64/kubectl.exe"},"https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/windows/amd64/kubectl.exe"))),(0,o.yg)("p",null,"We recommend to create a ",(0,o.yg)("inlineCode",{parentName:"p"},"kubectl")," directory in ",(0,o.yg)("inlineCode",{parentName:"p"},"C:/")," and add this ",(0,o.yg)("inlineCode",{parentName:"p"},"C:/kubectl")," to the Path environment variable in System properties > Advanced > Environment Variables > Path"),(0,o.yg)("h3",{id:"install-the-dashboard-ui"},"Install the Dashboard UI"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"# Install Kubernetes UI\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yaml\nkubectl apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/d2s-core/master/argo/roles/kube-dashboard-adminuser-sa.yml\nkubectl apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/d2s-core/master/argo/roles/kube-adminuser-rolebinding.yml\n\n# Get the Token to access the dashboard\nkubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')\n\n\n# Windows user will need to execute the 2 commands manually:\nkubectl -n kube-system get secret \n# And get the token containing 'admin-user'\nkubectl -n kube-system describe secret\n# For Windows: give the anonymous user global access\nkubectl create clusterrolebinding cluster-system-anonymous --clusterrole=admin --user=system:anonymous\n# Note: this could be improved. I think only the Dashboard UI didn't have the required permissions.\n\n# Finally, start the web UI, and chose the Token connection\nkubectl proxy\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/"),(0,o.yg)("p",{parentName:"blockquote"},"And provide the previously obtained token.")),(0,o.yg)("p",null,(0,o.yg)("strong",{parentName:"p"},"Warning:")," you will need to save the token to login again next time (use the password save from your browser if possible)."),(0,o.yg)("h3",{id:"run-kubectl"},"Run kubectl"),(0,o.yg)("p",null,(0,o.yg)("inlineCode",{parentName:"p"},"kubectl")," should be running at start."),(0,o.yg)("p",null,"Just restart the web UI"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"kubectl proxy\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/")),(0,o.yg)("h3",{id:"enable-internet"},"Enable internet"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/"},"Debug DNS on Ubuntu")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"microk8s.enable dns\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Restart your machine.")),(0,o.yg)("p",null,"You might need to change your firewall configuration"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"On Ubuntu")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"sudo ufw allow in on cni0\nsudo ufw allow out on cni0\nsudo ufw default allow routed\n")),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Try to connect to the internet from Kubernetes with the ",(0,o.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/d2s-core/blob/master/argo/tests/test-busybox.yaml"},"test-busybox pod"),".")),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"kubectl exec -ti busybox -- /bin/sh\nping google.com\n")),(0,o.yg)("h3",{id:"create-persistent-volume"},"Create persistent volume"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"# Create volume\nkubectl apply -n argo -f d2s-core/argo/storage/storage-mac.yaml\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Not working at the moment.")),(0,o.yg)("h3",{id:"uninstall"},"Uninstall"),(0,o.yg)("p",null,"Clean uninstall before 2.2."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"kubectl get cm workflow-controller-configmap -o yaml -n kube-system --export | kubectl apply -n argo -f -\nkubectl delete -n kube-system cm workflow-controller-configmap\nkubectl delete -n kube-system deploy workflow-controller argo-ui\nkubectl delete -n kube-system sa argo argo-ui\nkubectl delete -n kube-system svc argo-ui\n")),(0,o.yg)("hr",null),(0,o.yg)("h2",{id:"install-argo-workflows"},"Install Argo workflows"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://argoproj.github.io/argo/"},(0,o.yg)("img",{alt:"Argo project",src:n(4122).A,width:"300",height:"137"}))),(0,o.yg)("h3",{id:"install-on-your-local-kubernetes"},"Install on your local Kubernetes"),(0,o.yg)("p",null,"Argo workflows will be installed on the ",(0,o.yg)("inlineCode",{parentName:"p"},"argo")," namespace. See the ",(0,o.yg)("a",{parentName:"p",href:"https://argoproj.github.io/docs/argo/demo.html#2-install-the-controller-and-ui"},"official Argo documentation")," for more details."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"kubectl create ns argo\nkubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo/v2.4.2/manifests/install.yaml\n\n# Configure service account to run workflow\nkubectl create rolebinding default-admin --clusterrole=admin --serviceaccount=default:default\n\n# Test run\nargo submit --watch https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"See ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/vemonet/argo/master/manifests/namespace-install.yaml"},"custom configuration")," for namespace install."),(0,o.yg)("pre",{parentName:"blockquote"},(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"kubectl apply -n argo -f https://raw.githubusercontent.com/vemonet/argo/master/manifests/namespace-install.yaml\n"))),(0,o.yg)("h3",{id:"install-the-client"},"Install the client"),(0,o.yg)("p",null,"See the ",(0,o.yg)("a",{parentName:"p",href:"/docs/workflows-argo"},"Argo workflows documentation"),"."),(0,o.yg)("h3",{id:"expose-the-ui"},"Expose the UI"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-shell"},"kubectl -n argo port-forward deployment/argo-ui 8002:8001\n")),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Access on http://localhost:8002.")))}g.isMDXComponent=!0},647:(e,t,n)=>{n.d(t,{A:()=>l});const l=n.p+"assets/images/Kubernetes-c5f36b415dd16a8fcdae01fbc8d9c940.png"},4122:(e,t,n)=>{n.d(t,{A:()=>l});const l=n.p+"assets/images/argo-logo-c091bfee39aec37120d0e879edac74f6.png"}}]); \ No newline at end of file diff --git a/assets/js/b6e2013e.571c43af.js b/assets/js/b6e2013e.16157cdc.js similarity index 98% rename from assets/js/b6e2013e.571c43af.js rename to assets/js/b6e2013e.16157cdc.js index da89aefca..03659df72 100644 --- a/assets/js/b6e2013e.571c43af.js +++ b/assets/js/b6e2013e.16157cdc.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9759],{5680:(e,t,r)=>{r.d(t,{xA:()=>u,yg:()=>d});var n=r(6540);function s(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function c(e){for(var t=1;t=0||(s[r]=e[r]);return s}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(s[r]=e[r])}return s}var i=n.createContext({}),l=function(e){var t=n.useContext(i),r=t;return e&&(r="function"==typeof e?e(t):c(c({},t),e)),r},u=function(e){var t=l(e.components);return n.createElement(i.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var r=e.components,s=e.mdxType,a=e.originalType,i=e.parentName,u=o(e,["components","mdxType","originalType","parentName"]),m=l(r),d=s,v=m["".concat(i,".").concat(d)]||m[d]||p[d]||a;return r?n.createElement(v,c(c({ref:t},u),{},{components:r})):n.createElement(v,c({ref:t},u))}));function d(e,t){var r=arguments,s=t&&t.mdxType;if("string"==typeof e||s){var a=r.length,c=new Array(a);c[0]=m;var o={};for(var i in t)hasOwnProperty.call(t,i)&&(o[i]=t[i]);o.originalType=e,o.mdxType="string"==typeof e?e:s,c[1]=o;for(var l=2;l{r.r(t),r.d(t,{assets:()=>u,contentTitle:()=>i,default:()=>d,frontMatter:()=>o,metadata:()=>l,toc:()=>p});var n=r(9668),s=r(1367),a=(r(6540),r(5680)),c=["components"],o={id:"access-um-servers",title:"Access UM servers"},i=void 0,l={unversionedId:"access-um-servers",id:"access-um-servers",title:"Access UM servers",description:"Request access to internal UM servers",source:"@site/docs/access-um-servers.md",sourceDirName:".",slug:"/access-um-servers",permalink:"/docs/access-um-servers",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/access-um-servers.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"access-um-servers",title:"Access UM servers"},sidebar:"docs",previous:{title:"Utilities",permalink:"/docs/catalog-utilities"},next:{title:"Checkpointing Machine Learning Training",permalink:"/docs/checkpointing-ml-training"}},u={},p=[{value:"Request access to internal UM servers",id:"request-access-to-internal-um-servers",level:2}],m={toc:p};function d(e){var t=e.components,r=(0,s.A)(e,c);return(0,a.yg)("wrapper",(0,n.A)({},m,r,{components:t,mdxType:"MDXLayout"}),(0,a.yg)("h2",{id:"request-access-to-internal-um-servers"},"Request access to internal UM servers"),(0,a.yg)("p",null,"In certain cases, UM servers are not accessible by default from the DSRI. This is even the case for servers that are normally publicly accessible. To be able to access these UM servers from the DSRI, we need to put in the request to open the connection. "),(0,a.yg)("p",null,"Please let us know either the servername and port you like to access, or the URL (e.g. um-vm0057.unimaas.nl on port 443 or ",(0,a.yg)("a",{parentName:"p",href:"https://gitlab.maastrichtuniversity.nl"},"https://gitlab.maastrichtuniversity.nl"),"). You can reach out to us either by mail or by Slack."),(0,a.yg)("p",null,"UM services that are not accessible from DSRI right now:"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"central UM fileservices (MFS)")),(0,a.yg)("p",null,"The procedure is described in the diagram below:"),(0,a.yg)("img",{src:"/img/request-access-um-servers.svg",alt:"Access procedure UM servers",style:{maxWidth:"100%",maxHeight:"100%"}}))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9759],{5680:(e,t,r)=>{r.d(t,{xA:()=>u,yg:()=>d});var n=r(6540);function s(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function c(e){for(var t=1;t=0||(s[r]=e[r]);return s}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(s[r]=e[r])}return s}var i=n.createContext({}),l=function(e){var t=n.useContext(i),r=t;return e&&(r="function"==typeof e?e(t):c(c({},t),e)),r},u=function(e){var t=l(e.components);return n.createElement(i.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var r=e.components,s=e.mdxType,a=e.originalType,i=e.parentName,u=o(e,["components","mdxType","originalType","parentName"]),m=l(r),d=s,v=m["".concat(i,".").concat(d)]||m[d]||p[d]||a;return r?n.createElement(v,c(c({ref:t},u),{},{components:r})):n.createElement(v,c({ref:t},u))}));function d(e,t){var r=arguments,s=t&&t.mdxType;if("string"==typeof e||s){var a=r.length,c=new Array(a);c[0]=m;var o={};for(var i in t)hasOwnProperty.call(t,i)&&(o[i]=t[i]);o.originalType=e,o.mdxType="string"==typeof e?e:s,c[1]=o;for(var l=2;l{r.r(t),r.d(t,{assets:()=>u,contentTitle:()=>i,default:()=>d,frontMatter:()=>o,metadata:()=>l,toc:()=>p});var n=r(9668),s=r(1367),a=(r(6540),r(5680)),c=["components"],o={id:"access-um-servers",title:"Access UM servers"},i=void 0,l={unversionedId:"access-um-servers",id:"access-um-servers",title:"Access UM servers",description:"Request access to internal UM servers",source:"@site/docs/access-um-servers.md",sourceDirName:".",slug:"/access-um-servers",permalink:"/docs/access-um-servers",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/access-um-servers.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"access-um-servers",title:"Access UM servers"},sidebar:"docs",previous:{title:"Utilities",permalink:"/docs/catalog-utilities"},next:{title:"Checkpointing Machine Learning Training",permalink:"/docs/checkpointing-ml-training"}},u={},p=[{value:"Request access to internal UM servers",id:"request-access-to-internal-um-servers",level:2}],m={toc:p};function d(e){var t=e.components,r=(0,s.A)(e,c);return(0,a.yg)("wrapper",(0,n.A)({},m,r,{components:t,mdxType:"MDXLayout"}),(0,a.yg)("h2",{id:"request-access-to-internal-um-servers"},"Request access to internal UM servers"),(0,a.yg)("p",null,"In certain cases, UM servers are not accessible by default from the DSRI. This is even the case for servers that are normally publicly accessible. To be able to access these UM servers from the DSRI, we need to put in the request to open the connection. "),(0,a.yg)("p",null,"Please let us know either the servername and port you like to access, or the URL (e.g. um-vm0057.unimaas.nl on port 443 or ",(0,a.yg)("a",{parentName:"p",href:"https://gitlab.maastrichtuniversity.nl"},"https://gitlab.maastrichtuniversity.nl"),"). You can reach out to us either by mail or by Slack."),(0,a.yg)("p",null,"UM services that are not accessible from DSRI right now:"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"central UM fileservices (MFS)")),(0,a.yg)("p",null,"The procedure is described in the diagram below:"),(0,a.yg)("img",{src:"/img/request-access-um-servers.svg",alt:"Access procedure UM servers",style:{maxWidth:"100%",maxHeight:"100%"}}))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/bbb26d62.ee73a795.js b/assets/js/bbb26d62.1c3dafe1.js similarity index 99% rename from assets/js/bbb26d62.ee73a795.js rename to assets/js/bbb26d62.1c3dafe1.js index 6177ae238..77c0e6574 100644 --- a/assets/js/bbb26d62.ee73a795.js +++ b/assets/js/bbb26d62.1c3dafe1.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4969],{5680:(e,n,t)=>{t.d(n,{xA:()=>d,yg:()=>g});var r=t(6540);function a(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function i(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);n&&(r=r.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,r)}return t}function o(e){for(var n=1;n=0||(a[t]=e[t]);return a}(e,n);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(a[t]=e[t])}return a}var s=r.createContext({}),p=function(e){var n=r.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):o(o({},n),e)),t},d=function(e){var n=p(e.components);return r.createElement(s.Provider,{value:n},e.children)},u={inlineCode:"code",wrapper:function(e){var n=e.children;return r.createElement(r.Fragment,{},n)}},c=r.forwardRef((function(e,n){var t=e.components,a=e.mdxType,i=e.originalType,s=e.parentName,d=l(e,["components","mdxType","originalType","parentName"]),c=p(t),g=a,f=c["".concat(s,".").concat(g)]||c[g]||u[g]||i;return t?r.createElement(f,o(o({ref:n},d),{},{components:t})):r.createElement(f,o({ref:n},d))}));function g(e,n){var t=arguments,a=n&&n.mdxType;if("string"==typeof e||a){var i=t.length,o=new Array(i);o[0]=c;var l={};for(var s in n)hasOwnProperty.call(n,s)&&(l[s]=n[s]);l.originalType=e,l.mdxType="string"==typeof e?e:a,o[1]=l;for(var p=2;p{t.r(n),t.d(n,{assets:()=>d,contentTitle:()=>s,default:()=>g,frontMatter:()=>l,metadata:()=>p,toc:()=>u});var r=t(9668),a=t(1367),i=(t(6540),t(5680)),o=["components"],l={id:"speeding-tensorflow-dl",title:"Tensorflow Optimization"},s=void 0,p={unversionedId:"speeding-tensorflow-dl",id:"speeding-tensorflow-dl",title:"Tensorflow Optimization",description:"\ud83d\udd36 Speeding up Tensorflow based deep learning pipelines",source:"@site/docs/speeding-tensorflow-dl.md",sourceDirName:".",slug:"/speeding-tensorflow-dl",permalink:"/docs/speeding-tensorflow-dl",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/speeding-tensorflow-dl.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"speeding-tensorflow-dl",title:"Tensorflow Optimization"},sidebar:"docs",previous:{title:"PyTorch Profiling",permalink:"/docs/profile-pytorch-code"},next:{title:"SURF Offerings",permalink:"/docs/surf-offerings"}},d={},u=[{value:"\ud83d\udd36 Speeding up Tensorflow based deep learning pipelines",id:"-speeding-up-tensorflow-based-deep-learning-pipelines",level:2},{value:"A possible checklist for speeding up your deep learning pipeline in Tensorflow?",id:"a-possible-checklist-for-speeding-up-your-deep-learning-pipeline-in-tensorflow",level:2},{value:"Data Preparation",id:"data-preparation",level:2},{value:"Data Reading",id:"data-reading",level:2},{value:"Data Augmentation",id:"data-augmentation",level:2},{value:"Training",id:"training",level:2},{value:"Inference",id:"inference",level:2},{value:"How DSRI team can help you?",id:"how-dsri-team-can-help-you",level:2},{value:"External Resources and references",id:"external-resources-and-references",level:2}],c={toc:u};function g(e){var n=e.components,t=(0,a.A)(e,o);return(0,i.yg)("wrapper",(0,r.A)({},c,t,{components:n,mdxType:"MDXLayout"}),(0,i.yg)("h2",{id:"-speeding-up-tensorflow-based-deep-learning-pipelines"},"\ud83d\udd36 Speeding up Tensorflow based deep learning pipelines"),(0,i.yg)("p",null,"The amount of resources that you have is not nearly as important as using them to their maximum potential. It\u2019s all about doing more with less.In this write up, we discuss optimizations related to data preparation, data reading, data augmentation,training, and inference. "),(0,i.yg)("h2",{id:"a-possible-checklist-for-speeding-up-your-deep-learning-pipeline-in-tensorflow"},"A possible checklist for speeding up your deep learning pipeline in Tensorflow?"),(0,i.yg)("p",null,"Let\u2019s look at each area of the deep learning pipeline step by step, including data preparation, data reading, data augmentation, training, and, finally, inference."),(0,i.yg)("h2",{id:"data-preparation"},"Data Preparation"),(0,i.yg)("p",null,"1) Store as TFRecords\n2) Reduce Size of Input Data\n3) Use TensorFlow Datasets"),(0,i.yg)("h2",{id:"data-reading"},"Data Reading"),(0,i.yg)("p",null,"1) Use tf.data\n2) Prefetch Data\n3) Parallelize CPU Processing\n4) Parallelize I/O and Processing\n5) Enable Nondeterministic Ordering\n6) Cache Data\n7) Turn on Experimental Optimizations\n8) Autotune Parameter Values"),(0,i.yg)("h2",{id:"data-augmentation"},"Data Augmentation"),(0,i.yg)("p",null,"1) Use GPU for Augmentation"),(0,i.yg)("h2",{id:"training"},"Training"),(0,i.yg)("p",null,"1) Use Automatic Mixed Precision\n2) Use Larger Batch Size\n3) Use Multiples of Eight\n4) Find the Optimal Learning Rate\n5) Use tf.function\n6) Overtrain, and Then Generalize"),(0,i.yg)("p",null," 6a) Use progressive sampling"),(0,i.yg)("p",null," 6b) Use progressive augmentation"),(0,i.yg)("p",null," 6c) Use progressive resizing\u201d"),(0,i.yg)("p",null," 7) Install an Optimized Stack for the Hardware\n8) Optimize the Number of Parallel CPU Threads\n9) Use Better Hardware\n10) Distribute Training\n11) Examine Industry Benchmarks"),(0,i.yg)("h2",{id:"inference"},"Inference"),(0,i.yg)("p",null,"1) Use an Efficient Model\n2) Quantize the Model\n3) Prune the Model\n4) Use Fused Operations\n5) Enable GPU Persistence"),(0,i.yg)("h2",{id:"how-dsri-team-can-help-you"},"How DSRI team can help you?"),(0,i.yg)("p",null,"We can assist you with analyzing the bottleneck/s in your deep learning pipeline and recommend the improvments to speed up your pipeline."),(0,i.yg)("h2",{id:"external-resources-and-references"},"External Resources and references"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},'This documentation is adopted from the "Practical Deep Learning for Cloud, Mobile, and Edge by Koul etl (publish by O\u2019Reilly)')))}g.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4969],{5680:(e,n,t)=>{t.d(n,{xA:()=>d,yg:()=>g});var r=t(6540);function a(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function i(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);n&&(r=r.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,r)}return t}function o(e){for(var n=1;n=0||(a[t]=e[t]);return a}(e,n);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(a[t]=e[t])}return a}var s=r.createContext({}),p=function(e){var n=r.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):o(o({},n),e)),t},d=function(e){var n=p(e.components);return r.createElement(s.Provider,{value:n},e.children)},u={inlineCode:"code",wrapper:function(e){var n=e.children;return r.createElement(r.Fragment,{},n)}},c=r.forwardRef((function(e,n){var t=e.components,a=e.mdxType,i=e.originalType,s=e.parentName,d=l(e,["components","mdxType","originalType","parentName"]),c=p(t),g=a,f=c["".concat(s,".").concat(g)]||c[g]||u[g]||i;return t?r.createElement(f,o(o({ref:n},d),{},{components:t})):r.createElement(f,o({ref:n},d))}));function g(e,n){var t=arguments,a=n&&n.mdxType;if("string"==typeof e||a){var i=t.length,o=new Array(i);o[0]=c;var l={};for(var s in n)hasOwnProperty.call(n,s)&&(l[s]=n[s]);l.originalType=e,l.mdxType="string"==typeof e?e:a,o[1]=l;for(var p=2;p{t.r(n),t.d(n,{assets:()=>d,contentTitle:()=>s,default:()=>g,frontMatter:()=>l,metadata:()=>p,toc:()=>u});var r=t(9668),a=t(1367),i=(t(6540),t(5680)),o=["components"],l={id:"speeding-tensorflow-dl",title:"Tensorflow Optimization"},s=void 0,p={unversionedId:"speeding-tensorflow-dl",id:"speeding-tensorflow-dl",title:"Tensorflow Optimization",description:"\ud83d\udd36 Speeding up Tensorflow based deep learning pipelines",source:"@site/docs/speeding-tensorflow-dl.md",sourceDirName:".",slug:"/speeding-tensorflow-dl",permalink:"/docs/speeding-tensorflow-dl",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/speeding-tensorflow-dl.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"speeding-tensorflow-dl",title:"Tensorflow Optimization"},sidebar:"docs",previous:{title:"PyTorch Profiling",permalink:"/docs/profile-pytorch-code"},next:{title:"SURF Offerings",permalink:"/docs/surf-offerings"}},d={},u=[{value:"\ud83d\udd36 Speeding up Tensorflow based deep learning pipelines",id:"-speeding-up-tensorflow-based-deep-learning-pipelines",level:2},{value:"A possible checklist for speeding up your deep learning pipeline in Tensorflow?",id:"a-possible-checklist-for-speeding-up-your-deep-learning-pipeline-in-tensorflow",level:2},{value:"Data Preparation",id:"data-preparation",level:2},{value:"Data Reading",id:"data-reading",level:2},{value:"Data Augmentation",id:"data-augmentation",level:2},{value:"Training",id:"training",level:2},{value:"Inference",id:"inference",level:2},{value:"How DSRI team can help you?",id:"how-dsri-team-can-help-you",level:2},{value:"External Resources and references",id:"external-resources-and-references",level:2}],c={toc:u};function g(e){var n=e.components,t=(0,a.A)(e,o);return(0,i.yg)("wrapper",(0,r.A)({},c,t,{components:n,mdxType:"MDXLayout"}),(0,i.yg)("h2",{id:"-speeding-up-tensorflow-based-deep-learning-pipelines"},"\ud83d\udd36 Speeding up Tensorflow based deep learning pipelines"),(0,i.yg)("p",null,"The amount of resources that you have is not nearly as important as using them to their maximum potential. It\u2019s all about doing more with less.In this write up, we discuss optimizations related to data preparation, data reading, data augmentation,training, and inference. "),(0,i.yg)("h2",{id:"a-possible-checklist-for-speeding-up-your-deep-learning-pipeline-in-tensorflow"},"A possible checklist for speeding up your deep learning pipeline in Tensorflow?"),(0,i.yg)("p",null,"Let\u2019s look at each area of the deep learning pipeline step by step, including data preparation, data reading, data augmentation, training, and, finally, inference."),(0,i.yg)("h2",{id:"data-preparation"},"Data Preparation"),(0,i.yg)("p",null,"1) Store as TFRecords\n2) Reduce Size of Input Data\n3) Use TensorFlow Datasets"),(0,i.yg)("h2",{id:"data-reading"},"Data Reading"),(0,i.yg)("p",null,"1) Use tf.data\n2) Prefetch Data\n3) Parallelize CPU Processing\n4) Parallelize I/O and Processing\n5) Enable Nondeterministic Ordering\n6) Cache Data\n7) Turn on Experimental Optimizations\n8) Autotune Parameter Values"),(0,i.yg)("h2",{id:"data-augmentation"},"Data Augmentation"),(0,i.yg)("p",null,"1) Use GPU for Augmentation"),(0,i.yg)("h2",{id:"training"},"Training"),(0,i.yg)("p",null,"1) Use Automatic Mixed Precision\n2) Use Larger Batch Size\n3) Use Multiples of Eight\n4) Find the Optimal Learning Rate\n5) Use tf.function\n6) Overtrain, and Then Generalize"),(0,i.yg)("p",null," 6a) Use progressive sampling"),(0,i.yg)("p",null," 6b) Use progressive augmentation"),(0,i.yg)("p",null," 6c) Use progressive resizing\u201d"),(0,i.yg)("p",null," 7) Install an Optimized Stack for the Hardware\n8) Optimize the Number of Parallel CPU Threads\n9) Use Better Hardware\n10) Distribute Training\n11) Examine Industry Benchmarks"),(0,i.yg)("h2",{id:"inference"},"Inference"),(0,i.yg)("p",null,"1) Use an Efficient Model\n2) Quantize the Model\n3) Prune the Model\n4) Use Fused Operations\n5) Enable GPU Persistence"),(0,i.yg)("h2",{id:"how-dsri-team-can-help-you"},"How DSRI team can help you?"),(0,i.yg)("p",null,"We can assist you with analyzing the bottleneck/s in your deep learning pipeline and recommend the improvments to speed up your pipeline."),(0,i.yg)("h2",{id:"external-resources-and-references"},"External Resources and references"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},'This documentation is adopted from the "Practical Deep Learning for Cloud, Mobile, and Edge by Koul etl (publish by O\u2019Reilly)')))}g.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/bd7c3f6d.f9421184.js b/assets/js/bd7c3f6d.64962f6e.js similarity index 98% rename from assets/js/bd7c3f6d.f9421184.js rename to assets/js/bd7c3f6d.64962f6e.js index 723973c5c..8ce84b9df 100644 --- a/assets/js/bd7c3f6d.f9421184.js +++ b/assets/js/bd7c3f6d.64962f6e.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[3762],{5680:(e,t,n)=>{n.d(t,{xA:()=>s,yg:()=>u});var o=n(6540);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,o)}return n}function i(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var p=o.createContext({}),c=function(e){var t=o.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},s=function(e){var t=c(e.components);return o.createElement(p.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},m=o.forwardRef((function(e,t){var n=e.components,r=e.mdxType,a=e.originalType,p=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),m=c(n),u=r,y=m["".concat(p,".").concat(u)]||m[u]||d[u]||a;return n?o.createElement(y,i(i({ref:t},s),{},{components:n})):o.createElement(y,i({ref:t},s))}));function u(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var a=n.length,i=new Array(a);i[0]=m;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:r,i[1]=l;for(var c=2;c{n.r(t),n.d(t,{assets:()=>s,contentTitle:()=>p,default:()=>u,frontMatter:()=>l,metadata:()=>c,toc:()=>d});var o=n(9668),r=n(1367),a=(n(6540),n(5680)),i=["components"],l={id:"openshift-delete-services",title:"Delete an application"},p=void 0,c={unversionedId:"openshift-delete-services",id:"openshift-delete-services",title:"Delete an application",description:"It is recommend to use the oc tool to delete an application, as it will allow to properly delete all objects related to the application deployment.",source:"@site/docs/openshift-delete-services.md",sourceDirName:".",slug:"/openshift-delete-services",permalink:"/docs/openshift-delete-services",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/openshift-delete-services.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"openshift-delete-services",title:"Delete an application"},sidebar:"docs",previous:{title:"Upload data",permalink:"/docs/openshift-load-data"},next:{title:"Monitor your applications",permalink:"/docs/guide-monitoring"}},s={},d=[{value:"From the terminal",id:"from-the-terminal",level:2},{value:"From the web UI",id:"from-the-web-ui",level:2}],m={toc:d};function u(e){var t=e.components,n=(0,r.A)(e,i);return(0,a.yg)("wrapper",(0,o.A)({},m,n,{components:t,mdxType:"MDXLayout"}),(0,a.yg)("p",null,"It is recommend to use the ",(0,a.yg)("inlineCode",{parentName:"p"},"oc")," tool to delete an application, as it will allow to properly delete all objects related to the application deployment."),(0,a.yg)("admonition",{title:"Project",type:"caution"},(0,a.yg)("p",{parentName:"admonition"},"Make sure you are connected to the right project:"),(0,a.yg)("pre",{parentName:"admonition"},(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"oc project my-project\n"))),(0,a.yg)("h2",{id:"from-the-terminal"},"From the terminal"),(0,a.yg)("p",null,"The best way to make sure all objects related to your application have been deleted is to use the command line providing your application name:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=my-application\n")),(0,a.yg)("admonition",{title:"Force deletion",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"You can force the deletion if the objects are not deleting properly:"),(0,a.yg)("pre",{parentName:"admonition"},(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete all,secret,configmaps,serviceaccount,rolebinding --force --grace-period=0 --selector app=my-application\n"))),(0,a.yg)("h2",{id:"from-the-web-ui"},"From the web UI"),(0,a.yg)("p",null,"We recommend to use the ",(0,a.yg)("inlineCode",{parentName:"p"},"oc")," CLI to easily delete an application. But in the case you cannot install ",(0,a.yg)("inlineCode",{parentName:"p"},"oc")," on your computer you can delete the different objects created by the application (easy to find in the ",(0,a.yg)("strong",{parentName:"p"},"Topology")," page):"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Delete the ",(0,a.yg)("strong",{parentName:"li"},"Route")),(0,a.yg)("li",{parentName:"ol"},"Delete the ",(0,a.yg)("strong",{parentName:"li"},"Service")),(0,a.yg)("li",{parentName:"ol"},"Delete the ",(0,a.yg)("strong",{parentName:"li"},"Deployment Config")," ")),(0,a.yg)("img",{src:"/img/screenshot_delete_application.png",alt:"Delete application from the web UI",style:{maxWidth:"100%",maxHeight:"100%"}}))}u.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[3762],{5680:(e,t,n)=>{n.d(t,{xA:()=>s,yg:()=>u});var o=n(6540);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,o)}return n}function i(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var p=o.createContext({}),c=function(e){var t=o.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},s=function(e){var t=c(e.components);return o.createElement(p.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},m=o.forwardRef((function(e,t){var n=e.components,r=e.mdxType,a=e.originalType,p=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),m=c(n),u=r,y=m["".concat(p,".").concat(u)]||m[u]||d[u]||a;return n?o.createElement(y,i(i({ref:t},s),{},{components:n})):o.createElement(y,i({ref:t},s))}));function u(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var a=n.length,i=new Array(a);i[0]=m;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:r,i[1]=l;for(var c=2;c{n.r(t),n.d(t,{assets:()=>s,contentTitle:()=>p,default:()=>u,frontMatter:()=>l,metadata:()=>c,toc:()=>d});var o=n(9668),r=n(1367),a=(n(6540),n(5680)),i=["components"],l={id:"openshift-delete-services",title:"Delete an application"},p=void 0,c={unversionedId:"openshift-delete-services",id:"openshift-delete-services",title:"Delete an application",description:"It is recommend to use the oc tool to delete an application, as it will allow to properly delete all objects related to the application deployment.",source:"@site/docs/openshift-delete-services.md",sourceDirName:".",slug:"/openshift-delete-services",permalink:"/docs/openshift-delete-services",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/openshift-delete-services.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"openshift-delete-services",title:"Delete an application"},sidebar:"docs",previous:{title:"Upload data",permalink:"/docs/openshift-load-data"},next:{title:"Monitor your applications",permalink:"/docs/guide-monitoring"}},s={},d=[{value:"From the terminal",id:"from-the-terminal",level:2},{value:"From the web UI",id:"from-the-web-ui",level:2}],m={toc:d};function u(e){var t=e.components,n=(0,r.A)(e,i);return(0,a.yg)("wrapper",(0,o.A)({},m,n,{components:t,mdxType:"MDXLayout"}),(0,a.yg)("p",null,"It is recommend to use the ",(0,a.yg)("inlineCode",{parentName:"p"},"oc")," tool to delete an application, as it will allow to properly delete all objects related to the application deployment."),(0,a.yg)("admonition",{title:"Project",type:"caution"},(0,a.yg)("p",{parentName:"admonition"},"Make sure you are connected to the right project:"),(0,a.yg)("pre",{parentName:"admonition"},(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"oc project my-project\n"))),(0,a.yg)("h2",{id:"from-the-terminal"},"From the terminal"),(0,a.yg)("p",null,"The best way to make sure all objects related to your application have been deleted is to use the command line providing your application name:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=my-application\n")),(0,a.yg)("admonition",{title:"Force deletion",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"You can force the deletion if the objects are not deleting properly:"),(0,a.yg)("pre",{parentName:"admonition"},(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete all,secret,configmaps,serviceaccount,rolebinding --force --grace-period=0 --selector app=my-application\n"))),(0,a.yg)("h2",{id:"from-the-web-ui"},"From the web UI"),(0,a.yg)("p",null,"We recommend to use the ",(0,a.yg)("inlineCode",{parentName:"p"},"oc")," CLI to easily delete an application. But in the case you cannot install ",(0,a.yg)("inlineCode",{parentName:"p"},"oc")," on your computer you can delete the different objects created by the application (easy to find in the ",(0,a.yg)("strong",{parentName:"p"},"Topology")," page):"),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Delete the ",(0,a.yg)("strong",{parentName:"li"},"Route")),(0,a.yg)("li",{parentName:"ol"},"Delete the ",(0,a.yg)("strong",{parentName:"li"},"Service")),(0,a.yg)("li",{parentName:"ol"},"Delete the ",(0,a.yg)("strong",{parentName:"li"},"Deployment Config")," ")),(0,a.yg)("img",{src:"/img/screenshot_delete_application.png",alt:"Delete application from the web UI",style:{maxWidth:"100%",maxHeight:"100%"}}))}u.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/cdd0b013.6d4e6062.js b/assets/js/cdd0b013.54b1a2a7.js similarity index 99% rename from assets/js/cdd0b013.6d4e6062.js rename to assets/js/cdd0b013.54b1a2a7.js index 14b141b5a..74dd7f7cb 100644 --- a/assets/js/cdd0b013.6d4e6062.js +++ b/assets/js/cdd0b013.54b1a2a7.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6236],{5680:(e,t,n)=>{n.d(t,{xA:()=>u,yg:()=>d});var a=n(6540);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function o(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var p=a.createContext({}),s=function(e){var t=a.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},u=function(e){var t=s(e.components);return a.createElement(p.Provider,{value:t},e.children)},y={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},c=a.forwardRef((function(e,t){var n=e.components,r=e.mdxType,i=e.originalType,p=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),c=s(n),d=r,m=c["".concat(p,".").concat(d)]||c[d]||y[d]||i;return n?a.createElement(m,o(o({ref:t},u),{},{components:n})):a.createElement(m,o({ref:t},u))}));function d(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var i=n.length,o=new Array(i);o[0]=c;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:r,o[1]=l;for(var s=2;s{n.r(t),n.d(t,{assets:()=>u,contentTitle:()=>p,default:()=>d,frontMatter:()=>l,metadata:()=>s,toc:()=>y});var a=n(9668),r=n(1367),i=(n(6540),n(5680)),o=["components"],l={id:"jupyterhub-workspace",title:"JupyterHub workspace"},p=void 0,s={unversionedId:"jupyterhub-workspace",id:"jupyterhub-workspace",title:"JupyterHub workspace",description:"\ud83e\ude90 Start your workspace",source:"@site/docs/jupyterhub-workspace.md",sourceDirName:".",slug:"/jupyterhub-workspace",permalink:"/docs/jupyterhub-workspace",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/jupyterhub-workspace.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"jupyterhub-workspace",title:"JupyterHub workspace"}},u={},y=[{value:"\ud83e\ude90 Start your workspace",id:"-start-your-workspace",level:2},{value:"\ud83d\udce6\ufe0f Manage dependencies with Conda",id:"\ufe0f-manage-dependencies-with-conda",level:2},{value:"\ud83d\udc19 Use git in JupyterLab",id:"-use-git-in-jupyterlab",level:2}],c={toc:y};function d(e){var t=e.components,n=(0,r.A)(e,o);return(0,i.yg)("wrapper",(0,a.A)({},c,n,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("h2",{id:"-start-your-workspace"},"\ud83e\ude90 Start your workspace"),(0,i.yg)("p",null,"You can easily start a data science workspace with JupyterLab, VisualStudio Code and Conda pre-installed on the DSRI with JupyterHub:"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},"Connect to the UM VPN"),(0,i.yg)("li",{parentName:"ol"},"Go to ",(0,i.yg)("strong",{parentName:"li"},(0,i.yg)("a",{parentName:"strong",href:"https://jupyterhub-github.apps.dsri2.unimaas.nl"},"https://jupyterhub-github.apps.dsri2.unimaas.nl"))),(0,i.yg)("li",{parentName:"ol"},"Login with your ",(0,i.yg)("a",{parentName:"li",href:"https://github.com"},"GitHub")," account"),(0,i.yg)("li",{parentName:"ol"},"Choose the type of workspace, and the resources limitations "),(0,i.yg)("li",{parentName:"ol"},"Optionally you can provide additional parameters as environment variables:",(0,i.yg)("ol",{parentName:"li"},(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("inlineCode",{parentName:"li"},"GIT_NAME")," and ",(0,i.yg)("inlineCode",{parentName:"li"},"GIT_EMAIL"),": your name and email that will be used when committing with git"),(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("inlineCode",{parentName:"li"},"GIT_URL"),": the URL of a git repository to be automatically cloned in the workspace, if there is a ",(0,i.yg)("inlineCode",{parentName:"li"},"requirements.txt")," it will be automatically installed with ",(0,i.yg)("inlineCode",{parentName:"li"},"pip"))))),(0,i.yg)("p",null,"Once your workspace has started you can:"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},"Use the ",(0,i.yg)("inlineCode",{parentName:"li"},"persistent")," folder to put data that will be kept even when the server is stopped, or if you use a different type of workspace"),(0,i.yg)("li",{parentName:"ul"},"Clone your code repository with ",(0,i.yg)("inlineCode",{parentName:"li"},"git")),(0,i.yg)("li",{parentName:"ul"},"Install packages with ",(0,i.yg)("inlineCode",{parentName:"li"},"mamba"),"/",(0,i.yg)("inlineCode",{parentName:"li"},"conda")," or ",(0,i.yg)("inlineCode",{parentName:"li"},"pip")),(0,i.yg)("li",{parentName:"ul"},"Go to the workspace overview: ",(0,i.yg)("a",{parentName:"li",href:"https://jupyterhub-github.apps.dsri2.unimaas.nl/hub/home"},"https://jupyterhub-github.apps.dsri2.unimaas.nl/hub/home")," to see your workspace, and stop it.")),(0,i.yg)("admonition",{type:"tip"},(0,i.yg)("p",{parentName:"admonition"},"Put all the commands you use to install the packages required to run your code in a file in the ",(0,i.yg)("inlineCode",{parentName:"p"},"persistent")," folder (ideally in the git repository with your code), so you can easily reinstall your environment if your workspace is stopped.")),(0,i.yg)("h2",{id:"\ufe0f-manage-dependencies-with-conda"},"\ud83d\udce6\ufe0f Manage dependencies with Conda"),(0,i.yg)("p",null,"In your workspace you can install new ",(0,i.yg)("inlineCode",{parentName:"p"},"conda")," environments, if they include the packages ",(0,i.yg)("inlineCode",{parentName:"p"},"nb_conda_kernels")," and ",(0,i.yg)("inlineCode",{parentName:"p"},"ipykernel"),", then you will be able to easily start notebooks in those environments from the JupyterLab Launcher page."),(0,i.yg)("p",null,"Install a conda environment from a file with ",(0,i.yg)("inlineCode",{parentName:"p"},"mamba")," (it is like ",(0,i.yg)("inlineCode",{parentName:"p"},"conda")," but faster):"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"mamba env create -f environment.yml\n")),(0,i.yg)("p",null,"You'll need to wait for 1 minute before the new conda environment becomes available on the JupyterLab Launcher page."),(0,i.yg)("p",null,"You can easily install an environment with a different version of Python if you need it. Here is an example of an ",(0,i.yg)("inlineCode",{parentName:"p"},"environment.yml")," file to create an environment with Python 3.9, install the minimal dependencies required to easily starts notebooks in this environment with ",(0,i.yg)("inlineCode",{parentName:"p"},"conda"),", and install a ",(0,i.yg)("inlineCode",{parentName:"p"},"pip")," package:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},"name: py39\nchannels:\n - defaults\n - conda-forge\n - anaconda\ndependencies:\n - python=3.9\n - ipykernel\n - nb_conda_kernels\n - pip\n - pip:\n - matplotlib\n")),(0,i.yg)("h2",{id:"-use-git-in-jupyterlab"},"\ud83d\udc19 Use git in JupyterLab"),(0,i.yg)("p",null,"You can use ",(0,i.yg)("inlineCode",{parentName:"p"},"git")," from the terminal."),(0,i.yg)("p",null,"You can also use the ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/jupyterlab/jupyterlab-git"},"JupyterLab Git extension")," or the VisualStudio Code git integration to clone and manage your ",(0,i.yg)("inlineCode",{parentName:"p"},"git")," repositories."),(0,i.yg)("p",null,"They will ask you for a username and personal access token if the repository is private, or the first time you want to push changes."))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6236],{5680:(e,t,n)=>{n.d(t,{xA:()=>u,yg:()=>d});var a=n(6540);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function o(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var p=a.createContext({}),s=function(e){var t=a.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},u=function(e){var t=s(e.components);return a.createElement(p.Provider,{value:t},e.children)},y={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},c=a.forwardRef((function(e,t){var n=e.components,r=e.mdxType,i=e.originalType,p=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),c=s(n),d=r,m=c["".concat(p,".").concat(d)]||c[d]||y[d]||i;return n?a.createElement(m,o(o({ref:t},u),{},{components:n})):a.createElement(m,o({ref:t},u))}));function d(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var i=n.length,o=new Array(i);o[0]=c;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:r,o[1]=l;for(var s=2;s{n.r(t),n.d(t,{assets:()=>u,contentTitle:()=>p,default:()=>d,frontMatter:()=>l,metadata:()=>s,toc:()=>y});var a=n(9668),r=n(1367),i=(n(6540),n(5680)),o=["components"],l={id:"jupyterhub-workspace",title:"JupyterHub workspace"},p=void 0,s={unversionedId:"jupyterhub-workspace",id:"jupyterhub-workspace",title:"JupyterHub workspace",description:"\ud83e\ude90 Start your workspace",source:"@site/docs/jupyterhub-workspace.md",sourceDirName:".",slug:"/jupyterhub-workspace",permalink:"/docs/jupyterhub-workspace",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/jupyterhub-workspace.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"jupyterhub-workspace",title:"JupyterHub workspace"}},u={},y=[{value:"\ud83e\ude90 Start your workspace",id:"-start-your-workspace",level:2},{value:"\ud83d\udce6\ufe0f Manage dependencies with Conda",id:"\ufe0f-manage-dependencies-with-conda",level:2},{value:"\ud83d\udc19 Use git in JupyterLab",id:"-use-git-in-jupyterlab",level:2}],c={toc:y};function d(e){var t=e.components,n=(0,r.A)(e,o);return(0,i.yg)("wrapper",(0,a.A)({},c,n,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("h2",{id:"-start-your-workspace"},"\ud83e\ude90 Start your workspace"),(0,i.yg)("p",null,"You can easily start a data science workspace with JupyterLab, VisualStudio Code and Conda pre-installed on the DSRI with JupyterHub:"),(0,i.yg)("ol",null,(0,i.yg)("li",{parentName:"ol"},"Connect to the UM VPN"),(0,i.yg)("li",{parentName:"ol"},"Go to ",(0,i.yg)("strong",{parentName:"li"},(0,i.yg)("a",{parentName:"strong",href:"https://jupyterhub-github.apps.dsri2.unimaas.nl"},"https://jupyterhub-github.apps.dsri2.unimaas.nl"))),(0,i.yg)("li",{parentName:"ol"},"Login with your ",(0,i.yg)("a",{parentName:"li",href:"https://github.com"},"GitHub")," account"),(0,i.yg)("li",{parentName:"ol"},"Choose the type of workspace, and the resources limitations "),(0,i.yg)("li",{parentName:"ol"},"Optionally you can provide additional parameters as environment variables:",(0,i.yg)("ol",{parentName:"li"},(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("inlineCode",{parentName:"li"},"GIT_NAME")," and ",(0,i.yg)("inlineCode",{parentName:"li"},"GIT_EMAIL"),": your name and email that will be used when committing with git"),(0,i.yg)("li",{parentName:"ol"},(0,i.yg)("inlineCode",{parentName:"li"},"GIT_URL"),": the URL of a git repository to be automatically cloned in the workspace, if there is a ",(0,i.yg)("inlineCode",{parentName:"li"},"requirements.txt")," it will be automatically installed with ",(0,i.yg)("inlineCode",{parentName:"li"},"pip"))))),(0,i.yg)("p",null,"Once your workspace has started you can:"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},"Use the ",(0,i.yg)("inlineCode",{parentName:"li"},"persistent")," folder to put data that will be kept even when the server is stopped, or if you use a different type of workspace"),(0,i.yg)("li",{parentName:"ul"},"Clone your code repository with ",(0,i.yg)("inlineCode",{parentName:"li"},"git")),(0,i.yg)("li",{parentName:"ul"},"Install packages with ",(0,i.yg)("inlineCode",{parentName:"li"},"mamba"),"/",(0,i.yg)("inlineCode",{parentName:"li"},"conda")," or ",(0,i.yg)("inlineCode",{parentName:"li"},"pip")),(0,i.yg)("li",{parentName:"ul"},"Go to the workspace overview: ",(0,i.yg)("a",{parentName:"li",href:"https://jupyterhub-github.apps.dsri2.unimaas.nl/hub/home"},"https://jupyterhub-github.apps.dsri2.unimaas.nl/hub/home")," to see your workspace, and stop it.")),(0,i.yg)("admonition",{type:"tip"},(0,i.yg)("p",{parentName:"admonition"},"Put all the commands you use to install the packages required to run your code in a file in the ",(0,i.yg)("inlineCode",{parentName:"p"},"persistent")," folder (ideally in the git repository with your code), so you can easily reinstall your environment if your workspace is stopped.")),(0,i.yg)("h2",{id:"\ufe0f-manage-dependencies-with-conda"},"\ud83d\udce6\ufe0f Manage dependencies with Conda"),(0,i.yg)("p",null,"In your workspace you can install new ",(0,i.yg)("inlineCode",{parentName:"p"},"conda")," environments, if they include the packages ",(0,i.yg)("inlineCode",{parentName:"p"},"nb_conda_kernels")," and ",(0,i.yg)("inlineCode",{parentName:"p"},"ipykernel"),", then you will be able to easily start notebooks in those environments from the JupyterLab Launcher page."),(0,i.yg)("p",null,"Install a conda environment from a file with ",(0,i.yg)("inlineCode",{parentName:"p"},"mamba")," (it is like ",(0,i.yg)("inlineCode",{parentName:"p"},"conda")," but faster):"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-bash"},"mamba env create -f environment.yml\n")),(0,i.yg)("p",null,"You'll need to wait for 1 minute before the new conda environment becomes available on the JupyterLab Launcher page."),(0,i.yg)("p",null,"You can easily install an environment with a different version of Python if you need it. Here is an example of an ",(0,i.yg)("inlineCode",{parentName:"p"},"environment.yml")," file to create an environment with Python 3.9, install the minimal dependencies required to easily starts notebooks in this environment with ",(0,i.yg)("inlineCode",{parentName:"p"},"conda"),", and install a ",(0,i.yg)("inlineCode",{parentName:"p"},"pip")," package:"),(0,i.yg)("pre",null,(0,i.yg)("code",{parentName:"pre",className:"language-yaml"},"name: py39\nchannels:\n - defaults\n - conda-forge\n - anaconda\ndependencies:\n - python=3.9\n - ipykernel\n - nb_conda_kernels\n - pip\n - pip:\n - matplotlib\n")),(0,i.yg)("h2",{id:"-use-git-in-jupyterlab"},"\ud83d\udc19 Use git in JupyterLab"),(0,i.yg)("p",null,"You can use ",(0,i.yg)("inlineCode",{parentName:"p"},"git")," from the terminal."),(0,i.yg)("p",null,"You can also use the ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/jupyterlab/jupyterlab-git"},"JupyterLab Git extension")," or the VisualStudio Code git integration to clone and manage your ",(0,i.yg)("inlineCode",{parentName:"p"},"git")," repositories."),(0,i.yg)("p",null,"They will ask you for a username and personal access token if the repository is private, or the first time you want to push changes."))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/cfec30f8.e51dd3e9.js b/assets/js/cfec30f8.4752e40c.js similarity index 99% rename from assets/js/cfec30f8.e51dd3e9.js rename to assets/js/cfec30f8.4752e40c.js index d9857351f..31454c5f7 100644 --- a/assets/js/cfec30f8.e51dd3e9.js +++ b/assets/js/cfec30f8.4752e40c.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9215],{5680:(e,t,a)=>{a.d(t,{xA:()=>d,yg:()=>m});var n=a(6540);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function s(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var i=n.createContext({}),l=function(e){var t=n.useContext(i),a=t;return e&&(a="function"==typeof e?e(t):s(s({},t),e)),a},d=function(e){var t=l(e.components);return n.createElement(i.Provider,{value:t},e.children)},g={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},c=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,o=e.originalType,i=e.parentName,d=p(e,["components","mdxType","originalType","parentName"]),c=l(a),m=r,h=c["".concat(i,".").concat(m)]||c[m]||g[m]||o;return a?n.createElement(h,s(s({ref:t},d),{},{components:a})):n.createElement(h,s({ref:t},d))}));function m(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=a.length,s=new Array(o);s[0]=c;var p={};for(var i in t)hasOwnProperty.call(t,i)&&(p[i]=t[i]);p.originalType=e,p.mdxType="string"==typeof e?e:r,s[1]=p;for(var l=2;l{a.r(t),a.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>m,frontMatter:()=>p,metadata:()=>l,toc:()=>g});var n=a(9668),r=a(1367),o=(a(6540),a(5680)),s=["components"],p={id:"deploy-database",title:"Databases"},i=void 0,l={unversionedId:"deploy-database",id:"deploy-database",title:"Databases",description:"SQL databases",source:"@site/docs/deploy-database.md",sourceDirName:".",slug:"/deploy-database",permalink:"/docs/deploy-database",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-database.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-database",title:"Databases"},sidebar:"docs",previous:{title:"VisualStudio Code",permalink:"/docs/deploy-vscode"},next:{title:"Matlab",permalink:"/docs/deploy-matlab"}},d={},g=[{value:"SQL databases",id:"sql-databases",level:2},{value:"Start PostgreSQL \ud83d\udc18",id:"start-postgresql-",level:3},{value:"Start MySQL \ud83d\udc2c",id:"start-mysql-",level:3},{value:"NoSQL databases",id:"nosql-databases",level:2},{value:"MongoDB \ud83c\udf3f",id:"mongodb-",level:3},{value:"Redis \ud83c\udfb2",id:"redis-",level:3},{value:"Graph databases",id:"graph-databases",level:2},{value:"OpenLink Virtuoso triplestore",id:"openlink-virtuoso-triplestore",level:3},{value:"Ontotext GraphDB triplestore",id:"ontotext-graphdb-triplestore",level:3},{value:"AllegroGraph",id:"allegrograph",level:3}],c={toc:g};function m(e){var t=e.components,a=(0,r.A)(e,s);return(0,o.yg)("wrapper",(0,n.A)({},c,a,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"sql-databases"},"SQL databases"),(0,o.yg)("p",null,"You can easily create a database from the templates available in the DSRI OpenShift web UI catalog:"),(0,o.yg)("img",{src:"/img/screenshot-databases.png",alt:"Databases in catalog web UI",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"You can connect to a database from another application in the same project by using the database service name as hostname:"),(0,o.yg)("img",{src:"/img/screenshot_database_service.png",alt:"Databases in catalog web UI",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"You can also use the ",(0,o.yg)("inlineCode",{parentName:"p"},"oc")," CLI to get the services in your project:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc get services\n")),(0,o.yg)("h3",{id:"start-postgresql-"},"Start PostgreSQL \ud83d\udc18"),(0,o.yg)("p",null,"Use the ",(0,o.yg)("strong",{parentName:"p"},"Postgresql")," template in the DSRI OpenShift web UI catalog to start a SQL database. "),(0,o.yg)("admonition",{title:"Connect to the database",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"When the database has been deployed, you can connect from another pod using your favorite language and connector.")),(0,o.yg)("p",null,"Example with the ",(0,o.yg)("inlineCode",{parentName:"p"},"psql")," Command Line Interface:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"apt-get update && apt-get install postgresql-client -y\n")),(0,o.yg)("p",null,"Connect to the Postgresql database using the service name (change depending on the username and database name you chose):"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"psql -h postgresql-db -U postgres db\n")),(0,o.yg)("p",null,"Checkout the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-demo"},"dsri-demo repository")," for a quick demo for accessing and using a PostgreSQL database from a Jupyter notebook on the DSRI."),(0,o.yg)("h3",{id:"start-mysql-"},"Start MySQL \ud83d\udc2c"),(0,o.yg)("p",null,"Use the ",(0,o.yg)("strong",{parentName:"p"},"MySQL")," template in the DSRI OpenShift web UI catalog."),(0,o.yg)("admonition",{title:"Connect to the database",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"When the database has been deployed, you can connect from another pod using your favorite language and connector.")),(0,o.yg)("p",null,"Example with the ",(0,o.yg)("inlineCode",{parentName:"p"},"mysql")," Command Line Interface:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"apt-get update && apt-get install mariadb-client -y\n")),(0,o.yg)("p",null,"Connect to the MySQL database using the service name:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"mysql -h example-mysql -p\n")),(0,o.yg)("p",null,"Checkout the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-demo"},"dsri-demo repository")," for a quick demo for accessing and using a MySQL database from a Jupyter notebook on the DSRI."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Alternatively, MySQL databases can be started using Helm, see the ",(0,o.yg)("a",{parentName:"p",href:"/docs/helm#install-a-helm-chart"},"Helm documentation page")," for more details.")),(0,o.yg)("h2",{id:"nosql-databases"},"NoSQL databases"),(0,o.yg)("h3",{id:"mongodb-"},"MongoDB \ud83c\udf3f"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://www.mongodb.com/"},"MongoDB")," is a general purpose, document-based, distributed database built for modern application developers and for the cloud era. "),(0,o.yg)("p",null,"Use the ",(0,o.yg)("strong",{parentName:"p"},"MongoDB")," template in the DSRI OpenShift web UI catalog."),(0,o.yg)("admonition",{title:"Connect to the database",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"Use the service name as hostname to connect from another pod in the same project.")),(0,o.yg)("h3",{id:"redis-"},"Redis \ud83c\udfb2"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"http://redis.io/"},"Redis")," is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperlog."),(0,o.yg)("p",null,"Use the ",(0,o.yg)("strong",{parentName:"p"},"Redis")," template in the DSRI OpenShift web UI catalog."),(0,o.yg)("admonition",{title:"Connect to the database",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"Use the service name as hostname to connect from another pod in the same project.")),(0,o.yg)("h2",{id:"graph-databases"},"Graph databases"),(0,o.yg)("h3",{id:"openlink-virtuoso-triplestore"},"OpenLink Virtuoso triplestore"),(0,o.yg)("p",null,"Search for the ",(0,o.yg)("strong",{parentName:"p"},"Virtuoso triplestore")," template in the DSRI web UI catalog. Instantiate the template to create a Virtuoso triplestore in your project."),(0,o.yg)("p",null,"The deployment is based on the latest open source version of Virtuoso: ",(0,o.yg)("a",{parentName:"p",href:"https://hub.docker.com/r/openlink/virtuoso-opensource-7"},"https://hub.docker.com/r/openlink/virtuoso-opensource-7")),(0,o.yg)("admonition",{title:"Connect to the database",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"Use the service name as hostname to connect from another pod in the same project.")),(0,o.yg)("h3",{id:"ontotext-graphdb-triplestore"},"Ontotext GraphDB triplestore"),(0,o.yg)("p",null,"Use the official DockerHub image if you have an enterprise license. Or ",(0,o.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/guide-dockerfile-to-openshift"},"build")," GraphDB free edition image from ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/Ontotext-AD/graphdb-docker"},"graphdb-docker on GitHub"),"."),(0,o.yg)("p",null,"After copying the ",(0,o.yg)("inlineCode",{parentName:"p"},".zip")," file in the ",(0,o.yg)("inlineCode",{parentName:"p"},"graphdb-docker/free-edition")," folder, go the ",(0,o.yg)("inlineCode",{parentName:"p"},"graphdb-docker")," folder in your terminal:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"cd graphdb-docker\n")),(0,o.yg)("p",null,"Before creating your GraphDB ImageStream, make sure you are in the right project:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc project my-project\n")),(0,o.yg)("p",null,"Create the ImageStream for GraphDB:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc new-build --name graphdb --binary\n")),(0,o.yg)("p",null,"Build the image on the DSRI and save it in the ImageStream:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc start-build graphdb --from-dir=free-edition --follow --wait\n")),(0,o.yg)("p",null,"You can now use the ",(0,o.yg)("strong",{parentName:"p"},"Ontotext GraphDB")," template to deploy a GraphDB instance on DSRI. "),(0,o.yg)("p",null,"Use the name of the ImageStream when instantiating the template, you can check if the image was properly built in ",(0,o.yg)("strong",{parentName:"p"},"Search")," > Filter ",(0,o.yg)("strong",{parentName:"p"},"Resources")," for ImageStreams"),(0,o.yg)("admonition",{title:"Connect to the database",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"Use the service name as hostname to connect from another pod in the same project.")),(0,o.yg)("h3",{id:"allegrograph"},"AllegroGraph"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://franz.com/agraph/"},"AllegroGraph\xae")," is a modern, high-performance, persistent graph database. It supports SPARQL, RDFS++, and Prolog reasoning from numerous client applications. "),(0,o.yg)("p",null,"AllegroGraph has not been tested on DSRI yet, but it can be deployed on Kubernetes using Helm, cf. ",(0,o.yg)("a",{parentName:"p",href:"https://www.github.com/franzinc/agraph-examples/tree/master/clustering%2Fkubernetes%2Fmmr%2Fkubernetes-mmr.md"},"https://www.github.com/franzinc/agraph-examples/tree/master/clustering%2Fkubernetes%2Fmmr%2Fkubernetes-mmr.md")))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[9215],{5680:(e,t,a)=>{a.d(t,{xA:()=>d,yg:()=>m});var n=a(6540);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function s(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var i=n.createContext({}),l=function(e){var t=n.useContext(i),a=t;return e&&(a="function"==typeof e?e(t):s(s({},t),e)),a},d=function(e){var t=l(e.components);return n.createElement(i.Provider,{value:t},e.children)},g={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},c=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,o=e.originalType,i=e.parentName,d=p(e,["components","mdxType","originalType","parentName"]),c=l(a),m=r,h=c["".concat(i,".").concat(m)]||c[m]||g[m]||o;return a?n.createElement(h,s(s({ref:t},d),{},{components:a})):n.createElement(h,s({ref:t},d))}));function m(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=a.length,s=new Array(o);s[0]=c;var p={};for(var i in t)hasOwnProperty.call(t,i)&&(p[i]=t[i]);p.originalType=e,p.mdxType="string"==typeof e?e:r,s[1]=p;for(var l=2;l{a.r(t),a.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>m,frontMatter:()=>p,metadata:()=>l,toc:()=>g});var n=a(9668),r=a(1367),o=(a(6540),a(5680)),s=["components"],p={id:"deploy-database",title:"Databases"},i=void 0,l={unversionedId:"deploy-database",id:"deploy-database",title:"Databases",description:"SQL databases",source:"@site/docs/deploy-database.md",sourceDirName:".",slug:"/deploy-database",permalink:"/docs/deploy-database",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-database.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-database",title:"Databases"},sidebar:"docs",previous:{title:"VisualStudio Code",permalink:"/docs/deploy-vscode"},next:{title:"Matlab",permalink:"/docs/deploy-matlab"}},d={},g=[{value:"SQL databases",id:"sql-databases",level:2},{value:"Start PostgreSQL \ud83d\udc18",id:"start-postgresql-",level:3},{value:"Start MySQL \ud83d\udc2c",id:"start-mysql-",level:3},{value:"NoSQL databases",id:"nosql-databases",level:2},{value:"MongoDB \ud83c\udf3f",id:"mongodb-",level:3},{value:"Redis \ud83c\udfb2",id:"redis-",level:3},{value:"Graph databases",id:"graph-databases",level:2},{value:"OpenLink Virtuoso triplestore",id:"openlink-virtuoso-triplestore",level:3},{value:"Ontotext GraphDB triplestore",id:"ontotext-graphdb-triplestore",level:3},{value:"AllegroGraph",id:"allegrograph",level:3}],c={toc:g};function m(e){var t=e.components,a=(0,r.A)(e,s);return(0,o.yg)("wrapper",(0,n.A)({},c,a,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"sql-databases"},"SQL databases"),(0,o.yg)("p",null,"You can easily create a database from the templates available in the DSRI OpenShift web UI catalog:"),(0,o.yg)("img",{src:"/img/screenshot-databases.png",alt:"Databases in catalog web UI",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"You can connect to a database from another application in the same project by using the database service name as hostname:"),(0,o.yg)("img",{src:"/img/screenshot_database_service.png",alt:"Databases in catalog web UI",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"You can also use the ",(0,o.yg)("inlineCode",{parentName:"p"},"oc")," CLI to get the services in your project:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc get services\n")),(0,o.yg)("h3",{id:"start-postgresql-"},"Start PostgreSQL \ud83d\udc18"),(0,o.yg)("p",null,"Use the ",(0,o.yg)("strong",{parentName:"p"},"Postgresql")," template in the DSRI OpenShift web UI catalog to start a SQL database. "),(0,o.yg)("admonition",{title:"Connect to the database",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"When the database has been deployed, you can connect from another pod using your favorite language and connector.")),(0,o.yg)("p",null,"Example with the ",(0,o.yg)("inlineCode",{parentName:"p"},"psql")," Command Line Interface:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"apt-get update && apt-get install postgresql-client -y\n")),(0,o.yg)("p",null,"Connect to the Postgresql database using the service name (change depending on the username and database name you chose):"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"psql -h postgresql-db -U postgres db\n")),(0,o.yg)("p",null,"Checkout the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-demo"},"dsri-demo repository")," for a quick demo for accessing and using a PostgreSQL database from a Jupyter notebook on the DSRI."),(0,o.yg)("h3",{id:"start-mysql-"},"Start MySQL \ud83d\udc2c"),(0,o.yg)("p",null,"Use the ",(0,o.yg)("strong",{parentName:"p"},"MySQL")," template in the DSRI OpenShift web UI catalog."),(0,o.yg)("admonition",{title:"Connect to the database",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"When the database has been deployed, you can connect from another pod using your favorite language and connector.")),(0,o.yg)("p",null,"Example with the ",(0,o.yg)("inlineCode",{parentName:"p"},"mysql")," Command Line Interface:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"apt-get update && apt-get install mariadb-client -y\n")),(0,o.yg)("p",null,"Connect to the MySQL database using the service name:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"mysql -h example-mysql -p\n")),(0,o.yg)("p",null,"Checkout the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-demo"},"dsri-demo repository")," for a quick demo for accessing and using a MySQL database from a Jupyter notebook on the DSRI."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},"Alternatively, MySQL databases can be started using Helm, see the ",(0,o.yg)("a",{parentName:"p",href:"/docs/helm#install-a-helm-chart"},"Helm documentation page")," for more details.")),(0,o.yg)("h2",{id:"nosql-databases"},"NoSQL databases"),(0,o.yg)("h3",{id:"mongodb-"},"MongoDB \ud83c\udf3f"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://www.mongodb.com/"},"MongoDB")," is a general purpose, document-based, distributed database built for modern application developers and for the cloud era. "),(0,o.yg)("p",null,"Use the ",(0,o.yg)("strong",{parentName:"p"},"MongoDB")," template in the DSRI OpenShift web UI catalog."),(0,o.yg)("admonition",{title:"Connect to the database",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"Use the service name as hostname to connect from another pod in the same project.")),(0,o.yg)("h3",{id:"redis-"},"Redis \ud83c\udfb2"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"http://redis.io/"},"Redis")," is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperlog."),(0,o.yg)("p",null,"Use the ",(0,o.yg)("strong",{parentName:"p"},"Redis")," template in the DSRI OpenShift web UI catalog."),(0,o.yg)("admonition",{title:"Connect to the database",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"Use the service name as hostname to connect from another pod in the same project.")),(0,o.yg)("h2",{id:"graph-databases"},"Graph databases"),(0,o.yg)("h3",{id:"openlink-virtuoso-triplestore"},"OpenLink Virtuoso triplestore"),(0,o.yg)("p",null,"Search for the ",(0,o.yg)("strong",{parentName:"p"},"Virtuoso triplestore")," template in the DSRI web UI catalog. Instantiate the template to create a Virtuoso triplestore in your project."),(0,o.yg)("p",null,"The deployment is based on the latest open source version of Virtuoso: ",(0,o.yg)("a",{parentName:"p",href:"https://hub.docker.com/r/openlink/virtuoso-opensource-7"},"https://hub.docker.com/r/openlink/virtuoso-opensource-7")),(0,o.yg)("admonition",{title:"Connect to the database",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"Use the service name as hostname to connect from another pod in the same project.")),(0,o.yg)("h3",{id:"ontotext-graphdb-triplestore"},"Ontotext GraphDB triplestore"),(0,o.yg)("p",null,"Use the official DockerHub image if you have an enterprise license. Or ",(0,o.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/guide-dockerfile-to-openshift"},"build")," GraphDB free edition image from ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/Ontotext-AD/graphdb-docker"},"graphdb-docker on GitHub"),"."),(0,o.yg)("p",null,"After copying the ",(0,o.yg)("inlineCode",{parentName:"p"},".zip")," file in the ",(0,o.yg)("inlineCode",{parentName:"p"},"graphdb-docker/free-edition")," folder, go the ",(0,o.yg)("inlineCode",{parentName:"p"},"graphdb-docker")," folder in your terminal:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"cd graphdb-docker\n")),(0,o.yg)("p",null,"Before creating your GraphDB ImageStream, make sure you are in the right project:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc project my-project\n")),(0,o.yg)("p",null,"Create the ImageStream for GraphDB:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc new-build --name graphdb --binary\n")),(0,o.yg)("p",null,"Build the image on the DSRI and save it in the ImageStream:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc start-build graphdb --from-dir=free-edition --follow --wait\n")),(0,o.yg)("p",null,"You can now use the ",(0,o.yg)("strong",{parentName:"p"},"Ontotext GraphDB")," template to deploy a GraphDB instance on DSRI. "),(0,o.yg)("p",null,"Use the name of the ImageStream when instantiating the template, you can check if the image was properly built in ",(0,o.yg)("strong",{parentName:"p"},"Search")," > Filter ",(0,o.yg)("strong",{parentName:"p"},"Resources")," for ImageStreams"),(0,o.yg)("admonition",{title:"Connect to the database",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"Use the service name as hostname to connect from another pod in the same project.")),(0,o.yg)("h3",{id:"allegrograph"},"AllegroGraph"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://franz.com/agraph/"},"AllegroGraph\xae")," is a modern, high-performance, persistent graph database. It supports SPARQL, RDFS++, and Prolog reasoning from numerous client applications. "),(0,o.yg)("p",null,"AllegroGraph has not been tested on DSRI yet, but it can be deployed on Kubernetes using Helm, cf. ",(0,o.yg)("a",{parentName:"p",href:"https://www.github.com/franzinc/agraph-examples/tree/master/clustering%2Fkubernetes%2Fmmr%2Fkubernetes-mmr.md"},"https://www.github.com/franzinc/agraph-examples/tree/master/clustering%2Fkubernetes%2Fmmr%2Fkubernetes-mmr.md")))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/d013d563.7bbc8097.js b/assets/js/d013d563.3c7e8cb7.js similarity index 99% rename from assets/js/d013d563.7bbc8097.js rename to assets/js/d013d563.3c7e8cb7.js index e10c19d7e..a688a5ad9 100644 --- a/assets/js/d013d563.7bbc8097.js +++ b/assets/js/d013d563.3c7e8cb7.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5299],{5680:(e,t,r)=>{r.d(t,{xA:()=>c,yg:()=>y});var a=r(6540);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,a)}return r}function i(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var p=a.createContext({}),s=function(e){var t=a.useContext(p),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=s(e.components);return a.createElement(p.Provider,{value:t},e.children)},g={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},d=a.forwardRef((function(e,t){var r=e.components,n=e.mdxType,o=e.originalType,p=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),d=s(r),y=n,h=d["".concat(p,".").concat(y)]||d[y]||g[y]||o;return r?a.createElement(h,i(i({ref:t},c),{},{components:r})):a.createElement(h,i({ref:t},c))}));function y(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var o=r.length,i=new Array(o);i[0]=d;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:n,i[1]=l;for(var s=2;s{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>p,default:()=>y,frontMatter:()=>l,metadata:()=>s,toc:()=>g});var a=r(9668),n=r(1367),o=(r(6540),r(5680)),i=["components"],l={id:"tools-machine-learning",title:"Libraries for Machine Learning"},p=void 0,s={unversionedId:"tools-machine-learning",id:"tools-machine-learning",title:"Libraries for Machine Learning",description:"This page is in development, feel free to edit it to add more information.",source:"@site/docs/tools-machine-learning.md",sourceDirName:".",slug:"/tools-machine-learning",permalink:"/docs/tools-machine-learning",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/tools-machine-learning.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"tools-machine-learning",title:"Libraries for Machine Learning"},sidebar:"docs",previous:{title:"Create a new Project",permalink:"/docs/project-management"},next:{title:"Glossary",permalink:"/docs/glossary"}},c={},g=[{value:"Machine Learning libraries",id:"machine-learning-libraries",level:2},{value:"SciKit Learn",id:"scikit-learn",level:3},{value:"Deep Learning libraries",id:"deep-learning-libraries",level:2},{value:"Tensorflow",id:"tensorflow",level:3},{value:"PyTorch",id:"pytorch",level:3},{value:"Deep Java Library",id:"deep-java-library",level:3},{value:"Sonnet",id:"sonnet",level:3},{value:"Keras",id:"keras",level:3},{value:"Metaflow",id:"metaflow",level:3}],d={toc:g};function y(e){var t=e.components,r=(0,n.A)(e,i);return(0,o.yg)("wrapper",(0,a.A)({},d,r,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("admonition",{title:"Work in progress",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"This page is in development, feel free to ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/docs/tools-machine-learning.md"},"edit it")," to add more information.")),(0,o.yg)("h2",{id:"machine-learning-libraries"},"Machine Learning libraries"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"See ",(0,o.yg)("a",{parentName:"li",href:"https://www.saagie.com/blog/machine-learning-for-grandmas/"},"this vulgarisation article")," explaining the different principles of Machine Learning."),(0,o.yg)("li",{parentName:"ul"},"The ",(0,o.yg)("a",{parentName:"li",href:"https://docs.microsoft.com/en-us/azure/machine-learning/algorithm-cheat-sheet"},"Azure Machine Learning Algorithm Cheat Sheet")," helps you choose the right algorithm for a predictive analytics model."),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials"},"This repository")," provides tutorials and examples to a vast number of Machine / Deep Learning library.")),(0,o.yg)("h3",{id:"scikit-learn"},"SciKit Learn"),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://scikit-learn.org/stable/"},"https://scikit-learn.org/stable/"))),(0,o.yg)("h2",{id:"deep-learning-libraries"},"Deep Learning libraries"),(0,o.yg)("p",null,"See ",(0,o.yg)("a",{parentName:"p",href:"https://towardsdatascience.com/top-10-best-deep-learning-frameworks-in-2019-5ccb90ea6de"},"this article")," for more details about modern Deep Learning libraries."),(0,o.yg)("h3",{id:"tensorflow"},"Tensorflow"),(0,o.yg)("p",null,"Python library developed by Google."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://www.tensorflow.org/"},"https://www.tensorflow.org/"))),(0,o.yg)("h3",{id:"pytorch"},"PyTorch"),(0,o.yg)("p",null,"Python library developed by Facebook."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://pytorch.org/"},"https://pytorch.org/"))),(0,o.yg)("h3",{id:"deep-java-library"},"Deep Java Library"),(0,o.yg)("p",null,"Java library developed by Amazon. See the ",(0,o.yg)("a",{parentName:"p",href:"https://towardsdatascience.com/introducing-deep-java-library-djl-9de98de8c6ca"},"introduction article"),"."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://djl.ai/"},"https://djl.ai/"))),(0,o.yg)("h3",{id:"sonnet"},"Sonnet"),(0,o.yg)("p",null,"Layer on top of Tensorflow."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://sonnet.readthedocs.io/en/latest/"},"https://sonnet.readthedocs.io/en/latest/"))),(0,o.yg)("h3",{id:"keras"},"Keras"),(0,o.yg)("p",null,"Python library. Layer on top of Tensorflow, CNTK, Theano."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://keras.io/"},"https://keras.io/"))),(0,o.yg)("h3",{id:"metaflow"},"Metaflow"),(0,o.yg)("p",null,"Layer on top of Tensorflow, PyTorch, SciKit Learn developed by Netflix. "),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://metaflow.org/"},"https://metaflow.org/"))))}y.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5299],{5680:(e,t,r)=>{r.d(t,{xA:()=>c,yg:()=>y});var a=r(6540);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,a)}return r}function i(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var p=a.createContext({}),s=function(e){var t=a.useContext(p),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=s(e.components);return a.createElement(p.Provider,{value:t},e.children)},g={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},d=a.forwardRef((function(e,t){var r=e.components,n=e.mdxType,o=e.originalType,p=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),d=s(r),y=n,h=d["".concat(p,".").concat(y)]||d[y]||g[y]||o;return r?a.createElement(h,i(i({ref:t},c),{},{components:r})):a.createElement(h,i({ref:t},c))}));function y(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var o=r.length,i=new Array(o);i[0]=d;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:n,i[1]=l;for(var s=2;s{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>p,default:()=>y,frontMatter:()=>l,metadata:()=>s,toc:()=>g});var a=r(9668),n=r(1367),o=(r(6540),r(5680)),i=["components"],l={id:"tools-machine-learning",title:"Libraries for Machine Learning"},p=void 0,s={unversionedId:"tools-machine-learning",id:"tools-machine-learning",title:"Libraries for Machine Learning",description:"This page is in development, feel free to edit it to add more information.",source:"@site/docs/tools-machine-learning.md",sourceDirName:".",slug:"/tools-machine-learning",permalink:"/docs/tools-machine-learning",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/tools-machine-learning.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"tools-machine-learning",title:"Libraries for Machine Learning"},sidebar:"docs",previous:{title:"Create a new Project",permalink:"/docs/project-management"},next:{title:"Glossary",permalink:"/docs/glossary"}},c={},g=[{value:"Machine Learning libraries",id:"machine-learning-libraries",level:2},{value:"SciKit Learn",id:"scikit-learn",level:3},{value:"Deep Learning libraries",id:"deep-learning-libraries",level:2},{value:"Tensorflow",id:"tensorflow",level:3},{value:"PyTorch",id:"pytorch",level:3},{value:"Deep Java Library",id:"deep-java-library",level:3},{value:"Sonnet",id:"sonnet",level:3},{value:"Keras",id:"keras",level:3},{value:"Metaflow",id:"metaflow",level:3}],d={toc:g};function y(e){var t=e.components,r=(0,n.A)(e,i);return(0,o.yg)("wrapper",(0,a.A)({},d,r,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("admonition",{title:"Work in progress",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"This page is in development, feel free to ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/docs/tools-machine-learning.md"},"edit it")," to add more information.")),(0,o.yg)("h2",{id:"machine-learning-libraries"},"Machine Learning libraries"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"See ",(0,o.yg)("a",{parentName:"li",href:"https://www.saagie.com/blog/machine-learning-for-grandmas/"},"this vulgarisation article")," explaining the different principles of Machine Learning."),(0,o.yg)("li",{parentName:"ul"},"The ",(0,o.yg)("a",{parentName:"li",href:"https://docs.microsoft.com/en-us/azure/machine-learning/algorithm-cheat-sheet"},"Azure Machine Learning Algorithm Cheat Sheet")," helps you choose the right algorithm for a predictive analytics model."),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials"},"This repository")," provides tutorials and examples to a vast number of Machine / Deep Learning library.")),(0,o.yg)("h3",{id:"scikit-learn"},"SciKit Learn"),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://scikit-learn.org/stable/"},"https://scikit-learn.org/stable/"))),(0,o.yg)("h2",{id:"deep-learning-libraries"},"Deep Learning libraries"),(0,o.yg)("p",null,"See ",(0,o.yg)("a",{parentName:"p",href:"https://towardsdatascience.com/top-10-best-deep-learning-frameworks-in-2019-5ccb90ea6de"},"this article")," for more details about modern Deep Learning libraries."),(0,o.yg)("h3",{id:"tensorflow"},"Tensorflow"),(0,o.yg)("p",null,"Python library developed by Google."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://www.tensorflow.org/"},"https://www.tensorflow.org/"))),(0,o.yg)("h3",{id:"pytorch"},"PyTorch"),(0,o.yg)("p",null,"Python library developed by Facebook."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://pytorch.org/"},"https://pytorch.org/"))),(0,o.yg)("h3",{id:"deep-java-library"},"Deep Java Library"),(0,o.yg)("p",null,"Java library developed by Amazon. See the ",(0,o.yg)("a",{parentName:"p",href:"https://towardsdatascience.com/introducing-deep-java-library-djl-9de98de8c6ca"},"introduction article"),"."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://djl.ai/"},"https://djl.ai/"))),(0,o.yg)("h3",{id:"sonnet"},"Sonnet"),(0,o.yg)("p",null,"Layer on top of Tensorflow."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://sonnet.readthedocs.io/en/latest/"},"https://sonnet.readthedocs.io/en/latest/"))),(0,o.yg)("h3",{id:"keras"},"Keras"),(0,o.yg)("p",null,"Python library. Layer on top of Tensorflow, CNTK, Theano."),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://keras.io/"},"https://keras.io/"))),(0,o.yg)("h3",{id:"metaflow"},"Metaflow"),(0,o.yg)("p",null,"Layer on top of Tensorflow, PyTorch, SciKit Learn developed by Netflix. "),(0,o.yg)("blockquote",null,(0,o.yg)("p",{parentName:"blockquote"},(0,o.yg)("a",{parentName:"p",href:"https://metaflow.org/"},"https://metaflow.org/"))))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/d306cda8.b590a76f.js b/assets/js/d306cda8.5dd22227.js similarity index 99% rename from assets/js/d306cda8.b590a76f.js rename to assets/js/d306cda8.5dd22227.js index dece02da3..494878533 100644 --- a/assets/js/d306cda8.b590a76f.js +++ b/assets/js/d306cda8.5dd22227.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4100],{5680:(e,t,n)=>{n.d(t,{xA:()=>c,yg:()=>m});var o=n(6540);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,o)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var p=o.createContext({}),s=function(e){var t=o.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},c=function(e){var t=s(e.components);return o.createElement(p.Provider,{value:t},e.children)},g={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},u=o.forwardRef((function(e,t){var n=e.components,a=e.mdxType,r=e.originalType,p=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),u=s(n),m=a,y=u["".concat(p,".").concat(m)]||u[m]||g[m]||r;return n?o.createElement(y,i(i({ref:t},c),{},{components:n})):o.createElement(y,i({ref:t},c))}));function m(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var r=n.length,i=new Array(r);i[0]=u;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:a,i[1]=l;for(var s=2;s{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>p,default:()=>m,frontMatter:()=>l,metadata:()=>s,toc:()=>g});var o=n(9668),a=n(1367),r=(n(6540),n(5680)),i=["components"],l={id:"guide-monitoring",title:"Monitor your applications"},p=void 0,s={unversionedId:"guide-monitoring",id:"guide-monitoring",title:"Monitor your applications",description:"Monitor your application resources use",source:"@site/docs/guide-monitoring.md",sourceDirName:".",slug:"/guide-monitoring",permalink:"/docs/guide-monitoring",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-monitoring.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-monitoring",title:"Monitor your applications"},sidebar:"docs",previous:{title:"Delete an application",permalink:"/docs/openshift-delete-services"},next:{title:"GPU applications",permalink:"/docs/deploy-on-gpu"}},c={},g=[{value:"Monitor your application resources use",id:"monitor-your-application-resources-use",level:2},{value:"Debug an application deployment",id:"debug-an-application-deployment",level:2}],u={toc:g};function m(e){var t=e.components,n=(0,a.A)(e,i);return(0,r.yg)("wrapper",(0,o.A)({},u,n,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("h2",{id:"monitor-your-application-resources-use"},"Monitor your application resources use"),(0,r.yg)("p",null,"You can have an overview of the different resources consumed by the applications running in your project by going to the ",(0,r.yg)("strong",{parentName:"p"},"Monitoring")," tab (in the developer view)"),(0,r.yg)("img",{src:"/img/screenshot_monitoring.png",alt:"Filter templates catalog",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"You can also check the CPU and memory usage directly from the terminal inside a specific container"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Go to your application terminal, and run:"),(0,r.yg)("pre",{parentName:"li"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"top\n"))),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Check the number of Cpu(s) used at the top:"),(0,r.yg)("blockquote",{parentName:"li"},(0,r.yg)("p",{parentName:"blockquote"},"%Cpu(s): ",(0,r.yg)("strong",{parentName:"p"},"3,3")," us,"))),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Check the memory usage with the ",(0,r.yg)("inlineCode",{parentName:"p"},"used")," column:"),(0,r.yg)("blockquote",{parentName:"li"},(0,r.yg)("p",{parentName:"blockquote"},"MiB Mem : ",(0,r.yg)("strong",{parentName:"p"},"515543.2")," total, ",(0,r.yg)("strong",{parentName:"p"},"403486.8")," free, ",(0,r.yg)("strong",{parentName:"p"},"98612.0")," used, ",(0,r.yg)("strong",{parentName:"p"},"13444.5")," buff/cache")))),(0,r.yg)("h2",{id:"debug-an-application-deployment"},"Debug an application deployment"),(0,r.yg)("p",null,"If your application is facing issues when deployed:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"If the pod is not building, or not deploying properly, take a look at the ",(0,r.yg)("strong",{parentName:"li"},"Events")," tab of the deployment. It shows a log of all events faced by the deployment (assign to node, pull image, build, etc). Additionally, all ",(0,r.yg)("strong",{parentName:"li"},"Events")," in your project can be accessed in ",(0,r.yg)("strong",{parentName:"li"},"Monitoring"),".")),(0,r.yg)("img",{src:"/img/screenshot_debug_event.png",alt:"Filter templates catalog",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("admonition",{title:"Various ways to check the events",type:"tip"},(0,r.yg)("p",{parentName:"admonition"},"You can also check the ",(0,r.yg)("inlineCode",{parentName:"p"},"Monitoring")," page in the left side menu to see all events in a project."),(0,r.yg)("p",{parentName:"admonition"},"Or use the terminal:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc get events\n"))),(0,r.yg)("ol",{start:2},(0,r.yg)("li",{parentName:"ol"},"When a pod is running you can check its logs in the ",(0,r.yg)("strong",{parentName:"li"},"Logs")," tab (after going to the pod page). It will show the logs output of the container, equivalent to doing ",(0,r.yg)("inlineCode",{parentName:"li"},"docker logs"),".")),(0,r.yg)("admonition",{title:"Get help",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"If you cannot figure out the issue by yourself:"),(0,r.yg)("ol",{parentName:"admonition"},(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("strong",{parentName:"li"},"Gather relevant information to help the DSRI team")," to solve your issue: URL to the faulty application, which error was shown in the ",(0,r.yg)("strong",{parentName:"li"},"Events")," tab? Or in the ",(0,r.yg)("strong",{parentName:"li"},"Logs")," tab?"),(0,r.yg)("li",{parentName:"ol"},"Seek help on the ",(0,r.yg)("inlineCode",{parentName:"li"},"#helpdesk")," DSRI Slack channel"),(0,r.yg)("li",{parentName:"ol"},"Checkout if an issue have already been created for this problem, or create a new one: ",(0,r.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues"},"https://github.com/MaastrichtU-IDS/dsri-documentation/issues")))))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4100],{5680:(e,t,n)=>{n.d(t,{xA:()=>c,yg:()=>m});var o=n(6540);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,o)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var p=o.createContext({}),s=function(e){var t=o.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},c=function(e){var t=s(e.components);return o.createElement(p.Provider,{value:t},e.children)},g={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},u=o.forwardRef((function(e,t){var n=e.components,a=e.mdxType,r=e.originalType,p=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),u=s(n),m=a,y=u["".concat(p,".").concat(m)]||u[m]||g[m]||r;return n?o.createElement(y,i(i({ref:t},c),{},{components:n})):o.createElement(y,i({ref:t},c))}));function m(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var r=n.length,i=new Array(r);i[0]=u;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:a,i[1]=l;for(var s=2;s{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>p,default:()=>m,frontMatter:()=>l,metadata:()=>s,toc:()=>g});var o=n(9668),a=n(1367),r=(n(6540),n(5680)),i=["components"],l={id:"guide-monitoring",title:"Monitor your applications"},p=void 0,s={unversionedId:"guide-monitoring",id:"guide-monitoring",title:"Monitor your applications",description:"Monitor your application resources use",source:"@site/docs/guide-monitoring.md",sourceDirName:".",slug:"/guide-monitoring",permalink:"/docs/guide-monitoring",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-monitoring.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-monitoring",title:"Monitor your applications"},sidebar:"docs",previous:{title:"Delete an application",permalink:"/docs/openshift-delete-services"},next:{title:"GPU applications",permalink:"/docs/deploy-on-gpu"}},c={},g=[{value:"Monitor your application resources use",id:"monitor-your-application-resources-use",level:2},{value:"Debug an application deployment",id:"debug-an-application-deployment",level:2}],u={toc:g};function m(e){var t=e.components,n=(0,a.A)(e,i);return(0,r.yg)("wrapper",(0,o.A)({},u,n,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("h2",{id:"monitor-your-application-resources-use"},"Monitor your application resources use"),(0,r.yg)("p",null,"You can have an overview of the different resources consumed by the applications running in your project by going to the ",(0,r.yg)("strong",{parentName:"p"},"Monitoring")," tab (in the developer view)"),(0,r.yg)("img",{src:"/img/screenshot_monitoring.png",alt:"Filter templates catalog",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"You can also check the CPU and memory usage directly from the terminal inside a specific container"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Go to your application terminal, and run:"),(0,r.yg)("pre",{parentName:"li"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"top\n"))),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Check the number of Cpu(s) used at the top:"),(0,r.yg)("blockquote",{parentName:"li"},(0,r.yg)("p",{parentName:"blockquote"},"%Cpu(s): ",(0,r.yg)("strong",{parentName:"p"},"3,3")," us,"))),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Check the memory usage with the ",(0,r.yg)("inlineCode",{parentName:"p"},"used")," column:"),(0,r.yg)("blockquote",{parentName:"li"},(0,r.yg)("p",{parentName:"blockquote"},"MiB Mem : ",(0,r.yg)("strong",{parentName:"p"},"515543.2")," total, ",(0,r.yg)("strong",{parentName:"p"},"403486.8")," free, ",(0,r.yg)("strong",{parentName:"p"},"98612.0")," used, ",(0,r.yg)("strong",{parentName:"p"},"13444.5")," buff/cache")))),(0,r.yg)("h2",{id:"debug-an-application-deployment"},"Debug an application deployment"),(0,r.yg)("p",null,"If your application is facing issues when deployed:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"If the pod is not building, or not deploying properly, take a look at the ",(0,r.yg)("strong",{parentName:"li"},"Events")," tab of the deployment. It shows a log of all events faced by the deployment (assign to node, pull image, build, etc). Additionally, all ",(0,r.yg)("strong",{parentName:"li"},"Events")," in your project can be accessed in ",(0,r.yg)("strong",{parentName:"li"},"Monitoring"),".")),(0,r.yg)("img",{src:"/img/screenshot_debug_event.png",alt:"Filter templates catalog",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("admonition",{title:"Various ways to check the events",type:"tip"},(0,r.yg)("p",{parentName:"admonition"},"You can also check the ",(0,r.yg)("inlineCode",{parentName:"p"},"Monitoring")," page in the left side menu to see all events in a project."),(0,r.yg)("p",{parentName:"admonition"},"Or use the terminal:"),(0,r.yg)("pre",{parentName:"admonition"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc get events\n"))),(0,r.yg)("ol",{start:2},(0,r.yg)("li",{parentName:"ol"},"When a pod is running you can check its logs in the ",(0,r.yg)("strong",{parentName:"li"},"Logs")," tab (after going to the pod page). It will show the logs output of the container, equivalent to doing ",(0,r.yg)("inlineCode",{parentName:"li"},"docker logs"),".")),(0,r.yg)("admonition",{title:"Get help",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"If you cannot figure out the issue by yourself:"),(0,r.yg)("ol",{parentName:"admonition"},(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("strong",{parentName:"li"},"Gather relevant information to help the DSRI team")," to solve your issue: URL to the faulty application, which error was shown in the ",(0,r.yg)("strong",{parentName:"li"},"Events")," tab? Or in the ",(0,r.yg)("strong",{parentName:"li"},"Logs")," tab?"),(0,r.yg)("li",{parentName:"ol"},"Seek help on the ",(0,r.yg)("inlineCode",{parentName:"li"},"#helpdesk")," DSRI Slack channel"),(0,r.yg)("li",{parentName:"ol"},"Checkout if an issue have already been created for this problem, or create a new one: ",(0,r.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues"},"https://github.com/MaastrichtU-IDS/dsri-documentation/issues")))))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/d8f096f7.0dc3596e.js b/assets/js/d8f096f7.ae3fb799.js similarity index 99% rename from assets/js/d8f096f7.0dc3596e.js rename to assets/js/d8f096f7.ae3fb799.js index be7acf75b..5cbd4ef51 100644 --- a/assets/js/d8f096f7.0dc3596e.js +++ b/assets/js/d8f096f7.ae3fb799.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[145],{5680:(e,n,t)=>{t.d(n,{xA:()=>c,yg:()=>d});var o=t(6540);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function a(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var l=o.createContext({}),s=function(e){var n=o.useContext(l),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},c=function(e){var n=s(e.components);return o.createElement(l.Provider,{value:n},e.children)},m={inlineCode:"code",wrapper:function(e){var n=e.children;return o.createElement(o.Fragment,{},n)}},u=o.forwardRef((function(e,n){var t=e.components,r=e.mdxType,a=e.originalType,l=e.parentName,c=p(e,["components","mdxType","originalType","parentName"]),u=s(t),d=r,y=u["".concat(l,".").concat(d)]||u[d]||m[d]||a;return t?o.createElement(y,i(i({ref:n},c),{},{components:t})):o.createElement(y,i({ref:n},c))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var a=t.length,i=new Array(a);i[0]=u;var p={};for(var l in n)hasOwnProperty.call(n,l)&&(p[l]=n[l]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var s=2;s{t.r(n),t.d(n,{assets:()=>c,contentTitle:()=>l,default:()=>d,frontMatter:()=>p,metadata:()=>s,toc:()=>m});var o=t(9668),r=t(1367),a=(t(6540),t(5680)),i=["components"],p={id:"mpi-jobs",title:"Run MPI jobs"},l=void 0,s={unversionedId:"mpi-jobs",id:"mpi-jobs",title:"Run MPI jobs",description:"We deployed the MPI Operator from Kubeflow to run MPI jobs on the DSRI.",source:"@site/docs/mpi-jobs.md",sourceDirName:".",slug:"/mpi-jobs",permalink:"/docs/mpi-jobs",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/mpi-jobs.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"mpi-jobs",title:"Run MPI jobs"},sidebar:"docs",previous:{title:"Spark cluster",permalink:"/docs/deploy-spark"},next:{title:"Neuroscience research",permalink:"/docs/neuroscience"}},c={},m=[{value:"Run MPI jobs on CPU",id:"run-mpi-jobs-on-cpu",level:2}],u={toc:m};function d(e){var n=e.components,t=(0,r.A)(e,i);return(0,a.yg)("wrapper",(0,o.A)({},u,t,{components:n,mdxType:"MDXLayout"}),(0,a.yg)("p",null,"We deployed the ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/mpi-operator"},"MPI Operator")," from Kubeflow to run MPI jobs on the DSRI."),(0,a.yg)("blockquote",null,(0,a.yg)("p",{parentName:"blockquote"},"The MPI Operator makes it easy to run allreduce-style distributed training on Kubernetes. Please check out ",(0,a.yg)("a",{parentName:"p",href:"https://medium.com/kubeflow/introduction-to-kubeflow-mpi-operator-and-industry-adoption-296d5f2e6edc"},"this blog post")," for an introduction to MPI Operator and its industry adoption.")),(0,a.yg)("h2",{id:"run-mpi-jobs-on-cpu"},"Run MPI jobs on CPU"),(0,a.yg)("p",null,"Checkout the ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/mpi-operator/tree/master/examples/horovod"},"repository of the CPU benchmark")," for a complete example of an MPI job: python script, ",(0,a.yg)("inlineCode",{parentName:"p"},"Dockerfile"),", and the job deployment YAML."),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Clone the repository, and go to the example folder:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"git clone https://github.com/kubeflow/mpi-operator.git\ncd mpi-operator/examples/horovod\n")),(0,a.yg)("ol",{start:2},(0,a.yg)("li",{parentName:"ol"},"Open the ",(0,a.yg)("inlineCode",{parentName:"li"},"tensorflow-mnist.yaml")," file, and fix the ",(0,a.yg)("inlineCode",{parentName:"li"},"apiVersion")," on the first line:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"},"# From\napiVersion: kubeflow.org/v1\n# To\napiVersion: kubeflow.org/v1alpha2\n")),(0,a.yg)("p",null,"You will also need to specify those containers can run with the ",(0,a.yg)("inlineCode",{parentName:"p"},"root")," user by adding the ",(0,a.yg)("inlineCode",{parentName:"p"},"serviceAccountName")," between ",(0,a.yg)("inlineCode",{parentName:"p"},"spec:")," and ",(0,a.yg)("inlineCode",{parentName:"p"},"container:")," for the launcher and the worker templates:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"}," template:\n spec:\n serviceAccountName: anyuid\n containers:\n - image: docker.io/kubeflow/mpi-horovod-mnist\n")),(0,a.yg)("p",null,"Your ",(0,a.yg)("inlineCode",{parentName:"p"},"tensorflow-mnist.yaml")," file should look like this: "),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"},'apiVersion: kubeflow.org/v1alpha2\nkind: MPIJob\nmetadata:\n name: tensorflow-mnist\nspec:\n slotsPerWorker: 1\n cleanPodPolicy: Running\n mpiReplicaSpecs:\n Launcher:\n replicas: 1\n template:\n spec:\n serviceAccountName: anyuid\n containers:\n - image: docker.io/kubeflow/mpi-horovod-mnist\n name: mpi-launcher\n command:\n - mpirun\n args:\n - -np\n - "2"\n - --allow-run-as-root\n - -bind-to\n - none\n - -map-by\n - slot\n - -x\n - LD_LIBRARY_PATH\n - -x\n - PATH\n - -mca\n - pml\n - ob1\n - -mca\n - btl\n - ^openib\n - python\n - /examples/tensorflow_mnist.py\n resources:\n limits:\n cpu: 1\n memory: 2Gi\n Worker:\n replicas: 2\n template:\n spec:\n serviceAccountName: anyuid\n containers:\n - image: docker.io/kubeflow/mpi-horovod-mnist\n name: mpi-worker\n resources:\n limits:\n cpu: 2\n memory: 4Gi\n\n')),(0,a.yg)("ol",{start:3},(0,a.yg)("li",{parentName:"ol"},"Once this has been set, create the job in your current project on the DSRI (change with ",(0,a.yg)("inlineCode",{parentName:"li"},"oc project my-project"),"):")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"oc create -f tensorflow-mnist.yaml\n")),(0,a.yg)("p",null,"You should see the 2 workers and the main job running in your project ",(0,a.yg)("strong",{parentName:"p"},"Topology")," page in the DSRI web UI. You can then easily check the logs of the launcher and workers."),(0,a.yg)("p",null,"To run your own MPI job on the DSRI, you can take a look at, and edit, the different files provided by the ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/mpi-operator/tree/master/examples/horovod"},"MPI Operator example"),":"),(0,a.yg)("p",null,"\ud83d\udc0d ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/mpi-operator/blob/master/examples/horovod/tensorflow_mnist.py"},(0,a.yg)("inlineCode",{parentName:"a"},"tensorflow_mnist.py")),": the python script with the actual job to run"),(0,a.yg)("p",null,"\ud83d\udc33 ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/mpi-operator/blob/master/examples/horovod/Dockerfile.cpu"},(0,a.yg)("inlineCode",{parentName:"a"},"Dockerfile.cpu")),": the Dockerfile to define the image of the containers in which your job will run (install dependencies)"),(0,a.yg)("p",null,"\u26f5\ufe0f ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/mpi-operator/blob/master/examples/horovod/tensorflow-mnist.yaml"},(0,a.yg)("inlineCode",{parentName:"a"},"tensorflow-mnist.yaml")),": the YAML file to define the MPI deployment on Kubernetes (number and limits of workers, ",(0,a.yg)("inlineCode",{parentName:"p"},"mpirun")," command, etc)"),(0,a.yg)("p",null,"Visit the ",(0,a.yg)("a",{parentName:"p",href:"https://www.kubeflow.org/docs/components/training/mpi/#creating-an-mpi-job"},"Kubeflow documentation to create a MPI job")," for more details."),(0,a.yg)("admonition",{title:"Contact us",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"Feel free to contact us on the DSRI Slack ",(0,a.yg)("strong",{parentName:"p"},"#helpdesk")," channel to discuss the use of MPI on the DSRI.")))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[145],{5680:(e,n,t)=>{t.d(n,{xA:()=>c,yg:()=>d});var o=t(6540);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function a(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var l=o.createContext({}),s=function(e){var n=o.useContext(l),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},c=function(e){var n=s(e.components);return o.createElement(l.Provider,{value:n},e.children)},m={inlineCode:"code",wrapper:function(e){var n=e.children;return o.createElement(o.Fragment,{},n)}},u=o.forwardRef((function(e,n){var t=e.components,r=e.mdxType,a=e.originalType,l=e.parentName,c=p(e,["components","mdxType","originalType","parentName"]),u=s(t),d=r,y=u["".concat(l,".").concat(d)]||u[d]||m[d]||a;return t?o.createElement(y,i(i({ref:n},c),{},{components:t})):o.createElement(y,i({ref:n},c))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var a=t.length,i=new Array(a);i[0]=u;var p={};for(var l in n)hasOwnProperty.call(n,l)&&(p[l]=n[l]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var s=2;s{t.r(n),t.d(n,{assets:()=>c,contentTitle:()=>l,default:()=>d,frontMatter:()=>p,metadata:()=>s,toc:()=>m});var o=t(9668),r=t(1367),a=(t(6540),t(5680)),i=["components"],p={id:"mpi-jobs",title:"Run MPI jobs"},l=void 0,s={unversionedId:"mpi-jobs",id:"mpi-jobs",title:"Run MPI jobs",description:"We deployed the MPI Operator from Kubeflow to run MPI jobs on the DSRI.",source:"@site/docs/mpi-jobs.md",sourceDirName:".",slug:"/mpi-jobs",permalink:"/docs/mpi-jobs",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/mpi-jobs.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"mpi-jobs",title:"Run MPI jobs"},sidebar:"docs",previous:{title:"Spark cluster",permalink:"/docs/deploy-spark"},next:{title:"Neuroscience research",permalink:"/docs/neuroscience"}},c={},m=[{value:"Run MPI jobs on CPU",id:"run-mpi-jobs-on-cpu",level:2}],u={toc:m};function d(e){var n=e.components,t=(0,r.A)(e,i);return(0,a.yg)("wrapper",(0,o.A)({},u,t,{components:n,mdxType:"MDXLayout"}),(0,a.yg)("p",null,"We deployed the ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/mpi-operator"},"MPI Operator")," from Kubeflow to run MPI jobs on the DSRI."),(0,a.yg)("blockquote",null,(0,a.yg)("p",{parentName:"blockquote"},"The MPI Operator makes it easy to run allreduce-style distributed training on Kubernetes. Please check out ",(0,a.yg)("a",{parentName:"p",href:"https://medium.com/kubeflow/introduction-to-kubeflow-mpi-operator-and-industry-adoption-296d5f2e6edc"},"this blog post")," for an introduction to MPI Operator and its industry adoption.")),(0,a.yg)("h2",{id:"run-mpi-jobs-on-cpu"},"Run MPI jobs on CPU"),(0,a.yg)("p",null,"Checkout the ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/mpi-operator/tree/master/examples/horovod"},"repository of the CPU benchmark")," for a complete example of an MPI job: python script, ",(0,a.yg)("inlineCode",{parentName:"p"},"Dockerfile"),", and the job deployment YAML."),(0,a.yg)("ol",null,(0,a.yg)("li",{parentName:"ol"},"Clone the repository, and go to the example folder:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"git clone https://github.com/kubeflow/mpi-operator.git\ncd mpi-operator/examples/horovod\n")),(0,a.yg)("ol",{start:2},(0,a.yg)("li",{parentName:"ol"},"Open the ",(0,a.yg)("inlineCode",{parentName:"li"},"tensorflow-mnist.yaml")," file, and fix the ",(0,a.yg)("inlineCode",{parentName:"li"},"apiVersion")," on the first line:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"},"# From\napiVersion: kubeflow.org/v1\n# To\napiVersion: kubeflow.org/v1alpha2\n")),(0,a.yg)("p",null,"You will also need to specify those containers can run with the ",(0,a.yg)("inlineCode",{parentName:"p"},"root")," user by adding the ",(0,a.yg)("inlineCode",{parentName:"p"},"serviceAccountName")," between ",(0,a.yg)("inlineCode",{parentName:"p"},"spec:")," and ",(0,a.yg)("inlineCode",{parentName:"p"},"container:")," for the launcher and the worker templates:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"}," template:\n spec:\n serviceAccountName: anyuid\n containers:\n - image: docker.io/kubeflow/mpi-horovod-mnist\n")),(0,a.yg)("p",null,"Your ",(0,a.yg)("inlineCode",{parentName:"p"},"tensorflow-mnist.yaml")," file should look like this: "),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"},'apiVersion: kubeflow.org/v1alpha2\nkind: MPIJob\nmetadata:\n name: tensorflow-mnist\nspec:\n slotsPerWorker: 1\n cleanPodPolicy: Running\n mpiReplicaSpecs:\n Launcher:\n replicas: 1\n template:\n spec:\n serviceAccountName: anyuid\n containers:\n - image: docker.io/kubeflow/mpi-horovod-mnist\n name: mpi-launcher\n command:\n - mpirun\n args:\n - -np\n - "2"\n - --allow-run-as-root\n - -bind-to\n - none\n - -map-by\n - slot\n - -x\n - LD_LIBRARY_PATH\n - -x\n - PATH\n - -mca\n - pml\n - ob1\n - -mca\n - btl\n - ^openib\n - python\n - /examples/tensorflow_mnist.py\n resources:\n limits:\n cpu: 1\n memory: 2Gi\n Worker:\n replicas: 2\n template:\n spec:\n serviceAccountName: anyuid\n containers:\n - image: docker.io/kubeflow/mpi-horovod-mnist\n name: mpi-worker\n resources:\n limits:\n cpu: 2\n memory: 4Gi\n\n')),(0,a.yg)("ol",{start:3},(0,a.yg)("li",{parentName:"ol"},"Once this has been set, create the job in your current project on the DSRI (change with ",(0,a.yg)("inlineCode",{parentName:"li"},"oc project my-project"),"):")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"oc create -f tensorflow-mnist.yaml\n")),(0,a.yg)("p",null,"You should see the 2 workers and the main job running in your project ",(0,a.yg)("strong",{parentName:"p"},"Topology")," page in the DSRI web UI. You can then easily check the logs of the launcher and workers."),(0,a.yg)("p",null,"To run your own MPI job on the DSRI, you can take a look at, and edit, the different files provided by the ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/mpi-operator/tree/master/examples/horovod"},"MPI Operator example"),":"),(0,a.yg)("p",null,"\ud83d\udc0d ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/mpi-operator/blob/master/examples/horovod/tensorflow_mnist.py"},(0,a.yg)("inlineCode",{parentName:"a"},"tensorflow_mnist.py")),": the python script with the actual job to run"),(0,a.yg)("p",null,"\ud83d\udc33 ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/mpi-operator/blob/master/examples/horovod/Dockerfile.cpu"},(0,a.yg)("inlineCode",{parentName:"a"},"Dockerfile.cpu")),": the Dockerfile to define the image of the containers in which your job will run (install dependencies)"),(0,a.yg)("p",null,"\u26f5\ufe0f ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/kubeflow/mpi-operator/blob/master/examples/horovod/tensorflow-mnist.yaml"},(0,a.yg)("inlineCode",{parentName:"a"},"tensorflow-mnist.yaml")),": the YAML file to define the MPI deployment on Kubernetes (number and limits of workers, ",(0,a.yg)("inlineCode",{parentName:"p"},"mpirun")," command, etc)"),(0,a.yg)("p",null,"Visit the ",(0,a.yg)("a",{parentName:"p",href:"https://www.kubeflow.org/docs/components/training/mpi/#creating-an-mpi-job"},"Kubeflow documentation to create a MPI job")," for more details."),(0,a.yg)("admonition",{title:"Contact us",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"Feel free to contact us on the DSRI Slack ",(0,a.yg)("strong",{parentName:"p"},"#helpdesk")," channel to discuss the use of MPI on the DSRI.")))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/dbeba2b5.075f10f0.js b/assets/js/dbeba2b5.84064520.js similarity index 99% rename from assets/js/dbeba2b5.075f10f0.js rename to assets/js/dbeba2b5.84064520.js index 0a7fbf5c2..81bd2e8a9 100644 --- a/assets/js/dbeba2b5.075f10f0.js +++ b/assets/js/dbeba2b5.84064520.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4070],{5680:(e,t,r)=>{r.d(t,{xA:()=>c,yg:()=>d});var o=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function n(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var p=o.createContext({}),s=function(e){var t=o.useContext(p),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=s(e.components);return o.createElement(p.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},m=o.forwardRef((function(e,t){var r=e.components,a=e.mdxType,n=e.originalType,p=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),m=s(r),d=a,g=m["".concat(p,".").concat(d)]||m[d]||u[d]||n;return r?o.createElement(g,i(i({ref:t},c),{},{components:r})):o.createElement(g,i({ref:t},c))}));function d(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var n=r.length,i=new Array(n);i[0]=m;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:a,i[1]=l;for(var s=2;s{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>p,default:()=>d,frontMatter:()=>l,metadata:()=>s,toc:()=>u});var o=r(9668),a=r(1367),n=(r(6540),r(5680)),i=["components"],l={id:"operators",title:"Install from Operators"},p=void 0,s={unversionedId:"operators",id:"operators",title:"Install from Operators",description:"The Operator Framework is an open source toolkit to manage Kubernetes native applications, called Operators, in an effective, automated, and scalable way.",source:"@site/docs/operators.md",sourceDirName:".",slug:"/operators",permalink:"/docs/operators",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/operators.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"operators",title:"Install from Operators"},sidebar:"docs",previous:{title:"Install from Helm charts",permalink:"/docs/helm"},next:{title:"Jupyter Notebooks",permalink:"/docs/deploy-jupyter"}},c={},u=[{value:"Install existing Operators",id:"install-existing-operators",level:2},{value:"Build Operators",id:"build-operators",level:2},{value:"External resources",id:"external-resources",level:3}],m={toc:u};function d(e){var t=e.components,r=(0,a.A)(e,i);return(0,n.yg)("wrapper",(0,o.A)({},m,r,{components:t,mdxType:"MDXLayout"}),(0,n.yg)("p",null,"The ",(0,n.yg)("a",{parentName:"p",href:"https://operatorframework.io/"},"Operator Framework")," is an open source toolkit to manage Kubernetes native applications, called Operators, in an effective, automated, and scalable way."),(0,n.yg)("admonition",{title:"Use existing Operators",type:"tip"},(0,n.yg)("p",{parentName:"admonition"},"You can explore published Operators at ",(0,n.yg)("a",{parentName:"p",href:"https://operatorhub.io"},"https://operatorhub.io"))),(0,n.yg)("h2",{id:"install-existing-operators"},"Install existing Operators"),(0,n.yg)("admonition",{title:"Contact us",type:"info"},(0,n.yg)("p",{parentName:"admonition"},"Contact us on the DSRI Slack ",(0,n.yg)("strong",{parentName:"p"},"#helpdesk")," channel, if you want to install a new Operator on the DSRI.")),(0,n.yg)("h2",{id:"build-operators"},"Build Operators"),(0,n.yg)("p",null,"Install the ",(0,n.yg)("inlineCode",{parentName:"p"},"operator-sdk")," tool. See the ",(0,n.yg)("a",{parentName:"p",href:"https://sdk.operatorframework.io/docs/installation/"},"official documentation"),"."),(0,n.yg)("p",null,"Operators can be built using 3 different approaches:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("strong",{parentName:"li"},"Helm"),": a framework to define the deployment logic based on regular kubernetes YAML, but less capabilities for complete auto-update and insights. "),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("strong",{parentName:"li"},"Ansible"),": define the deployment logic with Ansible, provide maximum capabilities."),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("strong",{parentName:"li"},"Golang"),": define the deployment logic in Golang, provide maximum capabilities, but require more code.")),(0,n.yg)("h3",{id:"external-resources"},"External resources"),(0,n.yg)("p",null,"Documentation:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://sdk.operatorframework.io"},"Official docs")," to build Operators",(0,n.yg)("ul",{parentName:"li"},(0,n.yg)("li",{parentName:"ul"},"Official docs to build Operator from Helm charts: ",(0,n.yg)("a",{parentName:"li",href:"https://sdk.operatorframework.io/docs/building-operators/helm/tutorial"},"https://sdk.operatorframework.io/docs/building-operators/helm/tutorial")),(0,n.yg)("li",{parentName:"ul"},"Official docs to build Operator with Ansible: ",(0,n.yg)("a",{parentName:"li",href:"https://sdk.operatorframework.io/docs/building-operators/ansible/quickstart"},"https://sdk.operatorframework.io/docs/building-operators/ansible/quickstart")))),(0,n.yg)("li",{parentName:"ul"},"RedHat Certified Operator guide",(0,n.yg)("ul",{parentName:"li"},(0,n.yg)("li",{parentName:"ul"},"Make an operator use ",(0,n.yg)("inlineCode",{parentName:"li"},"anyuid"),": ",(0,n.yg)("a",{parentName:"li",href:"https://redhat-connect.gitbook.io/certified-operator-guide/what-if-ive-already-published-a-community-operator/applying-security-context-constraints"},"https://redhat-connect.gitbook.io/certified-operator-guide/what-if-ive-already-published-a-community-operator/applying-security-context-constraints")),(0,n.yg)("li",{parentName:"ul"},"Submit community Operators: ",(0,n.yg)("a",{parentName:"li",href:"https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/submitting-a-community-operator-to-operatorhub.io"},"https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/submitting-a-community-operator-to-operatorhub.io"))))),(0,n.yg)("p",null,"Examples:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("p",{parentName:"li"},"Deployment example: ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/microcks/microcks-ansible-operator/blob/master/roles/microcks/tasks/main.yml"},"https://github.com/microcks/microcks-ansible-operator/blob/master/roles/microcks/tasks/main.yml"))),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("p",{parentName:"li"},"Older OpenShift guide: ",(0,n.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.1/applications/operator_sdk/osdk-ansible.html"},"https://docs.openshift.com/container-platform/4.1/applications/operator_sdk/osdk-ansible.html"))),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("p",{parentName:"li"},"Simple older example with route: ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/djzager/ansible-role-hello-world-k8s"},"https://github.com/djzager/ansible-role-hello-world-k8s")))))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[4070],{5680:(e,t,r)=>{r.d(t,{xA:()=>c,yg:()=>d});var o=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function n(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var p=o.createContext({}),s=function(e){var t=o.useContext(p),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=s(e.components);return o.createElement(p.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},m=o.forwardRef((function(e,t){var r=e.components,a=e.mdxType,n=e.originalType,p=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),m=s(r),d=a,g=m["".concat(p,".").concat(d)]||m[d]||u[d]||n;return r?o.createElement(g,i(i({ref:t},c),{},{components:r})):o.createElement(g,i({ref:t},c))}));function d(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var n=r.length,i=new Array(n);i[0]=m;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l.mdxType="string"==typeof e?e:a,i[1]=l;for(var s=2;s{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>p,default:()=>d,frontMatter:()=>l,metadata:()=>s,toc:()=>u});var o=r(9668),a=r(1367),n=(r(6540),r(5680)),i=["components"],l={id:"operators",title:"Install from Operators"},p=void 0,s={unversionedId:"operators",id:"operators",title:"Install from Operators",description:"The Operator Framework is an open source toolkit to manage Kubernetes native applications, called Operators, in an effective, automated, and scalable way.",source:"@site/docs/operators.md",sourceDirName:".",slug:"/operators",permalink:"/docs/operators",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/operators.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"operators",title:"Install from Operators"},sidebar:"docs",previous:{title:"Install from Helm charts",permalink:"/docs/helm"},next:{title:"Jupyter Notebooks",permalink:"/docs/deploy-jupyter"}},c={},u=[{value:"Install existing Operators",id:"install-existing-operators",level:2},{value:"Build Operators",id:"build-operators",level:2},{value:"External resources",id:"external-resources",level:3}],m={toc:u};function d(e){var t=e.components,r=(0,a.A)(e,i);return(0,n.yg)("wrapper",(0,o.A)({},m,r,{components:t,mdxType:"MDXLayout"}),(0,n.yg)("p",null,"The ",(0,n.yg)("a",{parentName:"p",href:"https://operatorframework.io/"},"Operator Framework")," is an open source toolkit to manage Kubernetes native applications, called Operators, in an effective, automated, and scalable way."),(0,n.yg)("admonition",{title:"Use existing Operators",type:"tip"},(0,n.yg)("p",{parentName:"admonition"},"You can explore published Operators at ",(0,n.yg)("a",{parentName:"p",href:"https://operatorhub.io"},"https://operatorhub.io"))),(0,n.yg)("h2",{id:"install-existing-operators"},"Install existing Operators"),(0,n.yg)("admonition",{title:"Contact us",type:"info"},(0,n.yg)("p",{parentName:"admonition"},"Contact us on the DSRI Slack ",(0,n.yg)("strong",{parentName:"p"},"#helpdesk")," channel, if you want to install a new Operator on the DSRI.")),(0,n.yg)("h2",{id:"build-operators"},"Build Operators"),(0,n.yg)("p",null,"Install the ",(0,n.yg)("inlineCode",{parentName:"p"},"operator-sdk")," tool. See the ",(0,n.yg)("a",{parentName:"p",href:"https://sdk.operatorframework.io/docs/installation/"},"official documentation"),"."),(0,n.yg)("p",null,"Operators can be built using 3 different approaches:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("strong",{parentName:"li"},"Helm"),": a framework to define the deployment logic based on regular kubernetes YAML, but less capabilities for complete auto-update and insights. "),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("strong",{parentName:"li"},"Ansible"),": define the deployment logic with Ansible, provide maximum capabilities."),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("strong",{parentName:"li"},"Golang"),": define the deployment logic in Golang, provide maximum capabilities, but require more code.")),(0,n.yg)("h3",{id:"external-resources"},"External resources"),(0,n.yg)("p",null,"Documentation:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("a",{parentName:"li",href:"https://sdk.operatorframework.io"},"Official docs")," to build Operators",(0,n.yg)("ul",{parentName:"li"},(0,n.yg)("li",{parentName:"ul"},"Official docs to build Operator from Helm charts: ",(0,n.yg)("a",{parentName:"li",href:"https://sdk.operatorframework.io/docs/building-operators/helm/tutorial"},"https://sdk.operatorframework.io/docs/building-operators/helm/tutorial")),(0,n.yg)("li",{parentName:"ul"},"Official docs to build Operator with Ansible: ",(0,n.yg)("a",{parentName:"li",href:"https://sdk.operatorframework.io/docs/building-operators/ansible/quickstart"},"https://sdk.operatorframework.io/docs/building-operators/ansible/quickstart")))),(0,n.yg)("li",{parentName:"ul"},"RedHat Certified Operator guide",(0,n.yg)("ul",{parentName:"li"},(0,n.yg)("li",{parentName:"ul"},"Make an operator use ",(0,n.yg)("inlineCode",{parentName:"li"},"anyuid"),": ",(0,n.yg)("a",{parentName:"li",href:"https://redhat-connect.gitbook.io/certified-operator-guide/what-if-ive-already-published-a-community-operator/applying-security-context-constraints"},"https://redhat-connect.gitbook.io/certified-operator-guide/what-if-ive-already-published-a-community-operator/applying-security-context-constraints")),(0,n.yg)("li",{parentName:"ul"},"Submit community Operators: ",(0,n.yg)("a",{parentName:"li",href:"https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/submitting-a-community-operator-to-operatorhub.io"},"https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/submitting-a-community-operator-to-operatorhub.io"))))),(0,n.yg)("p",null,"Examples:"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("p",{parentName:"li"},"Deployment example: ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/microcks/microcks-ansible-operator/blob/master/roles/microcks/tasks/main.yml"},"https://github.com/microcks/microcks-ansible-operator/blob/master/roles/microcks/tasks/main.yml"))),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("p",{parentName:"li"},"Older OpenShift guide: ",(0,n.yg)("a",{parentName:"p",href:"https://docs.openshift.com/container-platform/4.1/applications/operator_sdk/osdk-ansible.html"},"https://docs.openshift.com/container-platform/4.1/applications/operator_sdk/osdk-ansible.html"))),(0,n.yg)("li",{parentName:"ul"},(0,n.yg)("p",{parentName:"li"},"Simple older example with route: ",(0,n.yg)("a",{parentName:"p",href:"https://github.com/djzager/ansible-role-hello-world-k8s"},"https://github.com/djzager/ansible-role-hello-world-k8s")))))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/dca73612.7d8098f0.js b/assets/js/dca73612.61ddb0b9.js similarity index 99% rename from assets/js/dca73612.7d8098f0.js rename to assets/js/dca73612.61ddb0b9.js index 4f753d988..add4864d1 100644 --- a/assets/js/dca73612.7d8098f0.js +++ b/assets/js/dca73612.61ddb0b9.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6690],{5680:(e,t,o)=>{o.d(t,{xA:()=>s,yg:()=>u});var a=o(6540);function n(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}function l(e,t){var o=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),o.push.apply(o,a)}return o}function r(e){for(var t=1;t=0||(n[o]=e[o]);return n}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,o)&&(n[o]=e[o])}return n}var p=a.createContext({}),d=function(e){var t=a.useContext(p),o=t;return e&&(o="function"==typeof e?e(t):r(r({},t),e)),o},s=function(e){var t=d(e.components);return a.createElement(p.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},y=a.forwardRef((function(e,t){var o=e.components,n=e.mdxType,l=e.originalType,p=e.parentName,s=i(e,["components","mdxType","originalType","parentName"]),y=d(o),u=n,g=y["".concat(p,".").concat(u)]||y[u]||c[u]||l;return o?a.createElement(g,r(r({ref:t},s),{},{components:o})):a.createElement(g,r({ref:t},s))}));function u(e,t){var o=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var l=o.length,r=new Array(l);r[0]=y;var i={};for(var p in t)hasOwnProperty.call(t,p)&&(i[p]=t[p]);i.originalType=e,i.mdxType="string"==typeof e?e:n,r[1]=i;for(var d=2;d{o.r(t),o.d(t,{assets:()=>s,contentTitle:()=>p,default:()=>u,frontMatter:()=>i,metadata:()=>d,toc:()=>c});var a=o(9668),n=o(1367),l=(o(6540),o(5680)),r=["components"],i={id:"openshift-load-data",title:"Upload data"},p=void 0,d={unversionedId:"openshift-load-data",id:"openshift-load-data",title:"Upload data",description:"In RStudio, JupyterLab and VSCode",source:"@site/docs/openshift-load-data.md",sourceDirName:".",slug:"/openshift-load-data",permalink:"/docs/openshift-load-data",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/openshift-load-data.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"openshift-load-data",title:"Upload data"},sidebar:"docs",previous:{title:"Prepare your project",permalink:"/docs/prepare-project-for-dsri"},next:{title:"Delete an application",permalink:"/docs/openshift-delete-services"}},s={},c=[{value:"In RStudio, JupyterLab and VSCode",id:"in-rstudio-jupyterlab-and-vscode",level:2},{value:"Copy large files with the terminal",id:"copy-large-files-with-the-terminal",level:2},{value:"Copy from local to pod",id:"copy-from-local-to-pod",level:3},{value:"Copy from pod to local",id:"copy-from-pod-to-local",level:3},{value:"Download data from SURFdrive",id:"download-data-from-surfdrive",level:3},{value:"Synchronizes files with oc rsync",id:"synchronizes-files-with-oc-rsync",level:2},{value:"Sync local to pod",id:"sync-local-to-pod",level:3},{value:"Sync pod to local",id:"sync-pod-to-local",level:3},{value:"More options",id:"more-options",level:3},{value:"One-liner",id:"one-liner",level:2}],y={toc:c};function u(e){var t=e.components,o=(0,n.A)(e,r);return(0,l.yg)("wrapper",(0,a.A)({},y,o,{components:t,mdxType:"MDXLayout"}),(0,l.yg)("h2",{id:"in-rstudio-jupyterlab-and-vscode"},"In RStudio, JupyterLab and VSCode"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"If you are using ",(0,l.yg)("strong",{parentName:"li"},"JupyterLab")," or ",(0,l.yg)("strong",{parentName:"li"},"VSCode")," you should be able to load data to the container by simply ",(0,l.yg)("strong",{parentName:"li"},"drag and drop the files to upload")," in the JupyterLab/VSCode web UI."),(0,l.yg)("li",{parentName:"ul"},"For ",(0,l.yg)("strong",{parentName:"li"},"RStudio"),", use the Upload file button in the RStudio web UI to upload files from your computer to the RStudio workspace.")),(0,l.yg)("admonition",{title:"File too big",type:"caution"},(0,l.yg)("p",{parentName:"admonition"},"If those solutions don't work due to the files size, try one of the solutions below.")),(0,l.yg)("h2",{id:"copy-large-files-with-the-terminal"},"Copy large files with the terminal"),(0,l.yg)("p",null,"The quickest way to upload large files or folders from a laptop or server to the DSRI is to use the ",(0,l.yg)("inlineCode",{parentName:"p"},"oc")," command line interface."),(0,l.yg)("admonition",{title:"Install the client",type:"tip"},(0,l.yg)("p",{parentName:"admonition"},"To install the ",(0,l.yg)("inlineCode",{parentName:"p"},"oc")," client on your laptop/server, visit the ",(0,l.yg)("a",{parentName:"p",href:"/docs/openshift-install"},"Install the client")," page")),(0,l.yg)("p",null,(0,l.yg)("inlineCode",{parentName:"p"},"oc cp")," directly copy, and overwrite existing files, from a laptop or server to an Application pod on the DSRI."),(0,l.yg)("p",null,"First get the ",(0,l.yg)("inlineCode",{parentName:"p"},"")," using your application name:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc get pod --selector app=\n")),(0,l.yg)("h3",{id:"copy-from-local-to-pod"},"Copy from local to pod"),(0,l.yg)("p",null,"Folders are uploaded recursively by default:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc cp :\n")),(0,l.yg)("admonition",{title:"Use absolute path in the pod",type:"caution"},(0,l.yg)("p",{parentName:"admonition"},"You need to provide the absolute (full) path where you want to copy it in the pod. Use your application workspace path, e.g. ",(0,l.yg)("inlineCode",{parentName:"p"},"/home/jovyan")," for JupyterLab or ",(0,l.yg)("inlineCode",{parentName:"p"},"/home/rstudio")," for RStudio)")),(0,l.yg)("p",null,"For example:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc cp my-folder jupyterlab-000:/home/jovyan\n")),(0,l.yg)("p",null,"You can also use this one-liner to automatically get the pod ID based on your app label:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"oc get pod --selector app= | xargs -I{} oc cp {}:\n")),(0,l.yg)("h3",{id:"copy-from-pod-to-local"},"Copy from pod to local"),(0,l.yg)("p",null,"Just do the inverse:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc cp : \n")),(0,l.yg)("h3",{id:"download-data-from-surfdrive"},"Download data from SURFdrive"),(0,l.yg)("p",null,"You can download data from your SURFdrive to your pod by creating a public link to the file:"),(0,l.yg)("ol",null,(0,l.yg)("li",{parentName:"ol"},"Go to the file in SURFdrive you'd like to share"),(0,l.yg)("li",{parentName:"ol"},"Click share and the create public link"),(0,l.yg)("li",{parentName:"ol"},"Fill in a name for the public link (like DSRI). The name does not matter much, but it can help you keep track of the goal of the public link."),(0,l.yg)("li",{parentName:"ol"},"Click copy to clipboard"),(0,l.yg)("li",{parentName:"ol"},"Visit link in browser and copy the direct URL displayed on that page."),(0,l.yg)("li",{parentName:"ol"},'Use the direct URL you just copied to download the file using either wget or curl (e.g. "wget ',(0,l.yg)("a",{parentName:"li",href:"https://surfdrive.surf.nl/files/index.php/s/5mFwyAKj4UexlJb/download%22"},'https://surfdrive.surf.nl/files/index.php/s/5mFwyAKj4UexlJb/download"'),")"),(0,l.yg)("li",{parentName:"ol"},"Revoke link in the SURFdrive portal")),(0,l.yg)("h2",{id:"synchronizes-files-with-oc-rsync"},"Synchronizes files with ",(0,l.yg)("inlineCode",{parentName:"h2"},"oc rsync")),(0,l.yg)("p",null,"If you have a lot of large files and/or they are updated regularly, you can use ",(0,l.yg)("inlineCode",{parentName:"p"},"rsync")," as it synchronizes the files if they already exist, preventing duplication and making synchronization faster. You can also see the progress with ",(0,l.yg)("inlineCode",{parentName:"p"},"rsync")," which you cannot with ",(0,l.yg)("inlineCode",{parentName:"p"},"cp"),". And if the upload is stopped for any reason ",(0,l.yg)("inlineCode",{parentName:"p"},"rsync")," should pick it up from where it stopped (instead of restarting from scratch like ",(0,l.yg)("inlineCode",{parentName:"p"},"oc cp")," does)"),(0,l.yg)("admonition",{type:"caution"},(0,l.yg)("p",{parentName:"admonition"},"Rsync does not work with symlinks (created with ",(0,l.yg)("inlineCode",{parentName:"p"},"ln -s"),")")),(0,l.yg)("h3",{id:"sync-local-to-pod"},"Sync local to pod"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc rsync --progress :\n")),(0,l.yg)("p",null,"You can also use this one-liner to automatically get the pod ID based on your app label:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"oc get pod --selector app= | xargs -I{} oc rsync --progress {}:\n")),(0,l.yg)("h3",{id:"sync-pod-to-local"},"Sync pod to local"),(0,l.yg)("p",null,"Again, do the inverse:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc rsync --progress : \n")),(0,l.yg)("h3",{id:"more-options"},"More options"),(0,l.yg)("p",null,"You can use more options to improve the upload of large files:"),(0,l.yg)("table",null,(0,l.yg)("thead",{parentName:"table"},(0,l.yg)("tr",{parentName:"thead"},(0,l.yg)("th",{parentName:"tr",align:null},(0,l.yg)("inlineCode",{parentName:"th"},"--compress")),(0,l.yg)("th",{parentName:"tr",align:null},"compress file data during the transfer"))),(0,l.yg)("tbody",{parentName:"table"},(0,l.yg)("tr",{parentName:"tbody"},(0,l.yg)("td",{parentName:"tr",align:null},(0,l.yg)("inlineCode",{parentName:"td"},"--delete")),(0,l.yg)("td",{parentName:"tr",align:null},"delete files not present in source")),(0,l.yg)("tr",{parentName:"tbody"},(0,l.yg)("td",{parentName:"tr",align:null},(0,l.yg)("inlineCode",{parentName:"td"},"--watch")),(0,l.yg)("td",{parentName:"tr",align:null},"Watch directory for changes and resync automatically")))),(0,l.yg)("h2",{id:"one-liner"},"One-liner"))}u.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6690],{5680:(e,t,o)=>{o.d(t,{xA:()=>s,yg:()=>u});var a=o(6540);function n(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}function l(e,t){var o=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),o.push.apply(o,a)}return o}function r(e){for(var t=1;t=0||(n[o]=e[o]);return n}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,o)&&(n[o]=e[o])}return n}var p=a.createContext({}),d=function(e){var t=a.useContext(p),o=t;return e&&(o="function"==typeof e?e(t):r(r({},t),e)),o},s=function(e){var t=d(e.components);return a.createElement(p.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},y=a.forwardRef((function(e,t){var o=e.components,n=e.mdxType,l=e.originalType,p=e.parentName,s=i(e,["components","mdxType","originalType","parentName"]),y=d(o),u=n,g=y["".concat(p,".").concat(u)]||y[u]||c[u]||l;return o?a.createElement(g,r(r({ref:t},s),{},{components:o})):a.createElement(g,r({ref:t},s))}));function u(e,t){var o=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var l=o.length,r=new Array(l);r[0]=y;var i={};for(var p in t)hasOwnProperty.call(t,p)&&(i[p]=t[p]);i.originalType=e,i.mdxType="string"==typeof e?e:n,r[1]=i;for(var d=2;d{o.r(t),o.d(t,{assets:()=>s,contentTitle:()=>p,default:()=>u,frontMatter:()=>i,metadata:()=>d,toc:()=>c});var a=o(9668),n=o(1367),l=(o(6540),o(5680)),r=["components"],i={id:"openshift-load-data",title:"Upload data"},p=void 0,d={unversionedId:"openshift-load-data",id:"openshift-load-data",title:"Upload data",description:"In RStudio, JupyterLab and VSCode",source:"@site/docs/openshift-load-data.md",sourceDirName:".",slug:"/openshift-load-data",permalink:"/docs/openshift-load-data",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/openshift-load-data.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"openshift-load-data",title:"Upload data"},sidebar:"docs",previous:{title:"Prepare your project",permalink:"/docs/prepare-project-for-dsri"},next:{title:"Delete an application",permalink:"/docs/openshift-delete-services"}},s={},c=[{value:"In RStudio, JupyterLab and VSCode",id:"in-rstudio-jupyterlab-and-vscode",level:2},{value:"Copy large files with the terminal",id:"copy-large-files-with-the-terminal",level:2},{value:"Copy from local to pod",id:"copy-from-local-to-pod",level:3},{value:"Copy from pod to local",id:"copy-from-pod-to-local",level:3},{value:"Download data from SURFdrive",id:"download-data-from-surfdrive",level:3},{value:"Synchronizes files with oc rsync",id:"synchronizes-files-with-oc-rsync",level:2},{value:"Sync local to pod",id:"sync-local-to-pod",level:3},{value:"Sync pod to local",id:"sync-pod-to-local",level:3},{value:"More options",id:"more-options",level:3},{value:"One-liner",id:"one-liner",level:2}],y={toc:c};function u(e){var t=e.components,o=(0,n.A)(e,r);return(0,l.yg)("wrapper",(0,a.A)({},y,o,{components:t,mdxType:"MDXLayout"}),(0,l.yg)("h2",{id:"in-rstudio-jupyterlab-and-vscode"},"In RStudio, JupyterLab and VSCode"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"If you are using ",(0,l.yg)("strong",{parentName:"li"},"JupyterLab")," or ",(0,l.yg)("strong",{parentName:"li"},"VSCode")," you should be able to load data to the container by simply ",(0,l.yg)("strong",{parentName:"li"},"drag and drop the files to upload")," in the JupyterLab/VSCode web UI."),(0,l.yg)("li",{parentName:"ul"},"For ",(0,l.yg)("strong",{parentName:"li"},"RStudio"),", use the Upload file button in the RStudio web UI to upload files from your computer to the RStudio workspace.")),(0,l.yg)("admonition",{title:"File too big",type:"caution"},(0,l.yg)("p",{parentName:"admonition"},"If those solutions don't work due to the files size, try one of the solutions below.")),(0,l.yg)("h2",{id:"copy-large-files-with-the-terminal"},"Copy large files with the terminal"),(0,l.yg)("p",null,"The quickest way to upload large files or folders from a laptop or server to the DSRI is to use the ",(0,l.yg)("inlineCode",{parentName:"p"},"oc")," command line interface."),(0,l.yg)("admonition",{title:"Install the client",type:"tip"},(0,l.yg)("p",{parentName:"admonition"},"To install the ",(0,l.yg)("inlineCode",{parentName:"p"},"oc")," client on your laptop/server, visit the ",(0,l.yg)("a",{parentName:"p",href:"/docs/openshift-install"},"Install the client")," page")),(0,l.yg)("p",null,(0,l.yg)("inlineCode",{parentName:"p"},"oc cp")," directly copy, and overwrite existing files, from a laptop or server to an Application pod on the DSRI."),(0,l.yg)("p",null,"First get the ",(0,l.yg)("inlineCode",{parentName:"p"},"")," using your application name:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc get pod --selector app=\n")),(0,l.yg)("h3",{id:"copy-from-local-to-pod"},"Copy from local to pod"),(0,l.yg)("p",null,"Folders are uploaded recursively by default:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc cp :\n")),(0,l.yg)("admonition",{title:"Use absolute path in the pod",type:"caution"},(0,l.yg)("p",{parentName:"admonition"},"You need to provide the absolute (full) path where you want to copy it in the pod. Use your application workspace path, e.g. ",(0,l.yg)("inlineCode",{parentName:"p"},"/home/jovyan")," for JupyterLab or ",(0,l.yg)("inlineCode",{parentName:"p"},"/home/rstudio")," for RStudio)")),(0,l.yg)("p",null,"For example:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc cp my-folder jupyterlab-000:/home/jovyan\n")),(0,l.yg)("p",null,"You can also use this one-liner to automatically get the pod ID based on your app label:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"oc get pod --selector app= | xargs -I{} oc cp {}:\n")),(0,l.yg)("h3",{id:"copy-from-pod-to-local"},"Copy from pod to local"),(0,l.yg)("p",null,"Just do the inverse:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc cp : \n")),(0,l.yg)("h3",{id:"download-data-from-surfdrive"},"Download data from SURFdrive"),(0,l.yg)("p",null,"You can download data from your SURFdrive to your pod by creating a public link to the file:"),(0,l.yg)("ol",null,(0,l.yg)("li",{parentName:"ol"},"Go to the file in SURFdrive you'd like to share"),(0,l.yg)("li",{parentName:"ol"},"Click share and the create public link"),(0,l.yg)("li",{parentName:"ol"},"Fill in a name for the public link (like DSRI). The name does not matter much, but it can help you keep track of the goal of the public link."),(0,l.yg)("li",{parentName:"ol"},"Click copy to clipboard"),(0,l.yg)("li",{parentName:"ol"},"Visit link in browser and copy the direct URL displayed on that page."),(0,l.yg)("li",{parentName:"ol"},'Use the direct URL you just copied to download the file using either wget or curl (e.g. "wget ',(0,l.yg)("a",{parentName:"li",href:"https://surfdrive.surf.nl/files/index.php/s/5mFwyAKj4UexlJb/download%22"},'https://surfdrive.surf.nl/files/index.php/s/5mFwyAKj4UexlJb/download"'),")"),(0,l.yg)("li",{parentName:"ol"},"Revoke link in the SURFdrive portal")),(0,l.yg)("h2",{id:"synchronizes-files-with-oc-rsync"},"Synchronizes files with ",(0,l.yg)("inlineCode",{parentName:"h2"},"oc rsync")),(0,l.yg)("p",null,"If you have a lot of large files and/or they are updated regularly, you can use ",(0,l.yg)("inlineCode",{parentName:"p"},"rsync")," as it synchronizes the files if they already exist, preventing duplication and making synchronization faster. You can also see the progress with ",(0,l.yg)("inlineCode",{parentName:"p"},"rsync")," which you cannot with ",(0,l.yg)("inlineCode",{parentName:"p"},"cp"),". And if the upload is stopped for any reason ",(0,l.yg)("inlineCode",{parentName:"p"},"rsync")," should pick it up from where it stopped (instead of restarting from scratch like ",(0,l.yg)("inlineCode",{parentName:"p"},"oc cp")," does)"),(0,l.yg)("admonition",{type:"caution"},(0,l.yg)("p",{parentName:"admonition"},"Rsync does not work with symlinks (created with ",(0,l.yg)("inlineCode",{parentName:"p"},"ln -s"),")")),(0,l.yg)("h3",{id:"sync-local-to-pod"},"Sync local to pod"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc rsync --progress :\n")),(0,l.yg)("p",null,"You can also use this one-liner to automatically get the pod ID based on your app label:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"oc get pod --selector app= | xargs -I{} oc rsync --progress {}:\n")),(0,l.yg)("h3",{id:"sync-pod-to-local"},"Sync pod to local"),(0,l.yg)("p",null,"Again, do the inverse:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc rsync --progress : \n")),(0,l.yg)("h3",{id:"more-options"},"More options"),(0,l.yg)("p",null,"You can use more options to improve the upload of large files:"),(0,l.yg)("table",null,(0,l.yg)("thead",{parentName:"table"},(0,l.yg)("tr",{parentName:"thead"},(0,l.yg)("th",{parentName:"tr",align:null},(0,l.yg)("inlineCode",{parentName:"th"},"--compress")),(0,l.yg)("th",{parentName:"tr",align:null},"compress file data during the transfer"))),(0,l.yg)("tbody",{parentName:"table"},(0,l.yg)("tr",{parentName:"tbody"},(0,l.yg)("td",{parentName:"tr",align:null},(0,l.yg)("inlineCode",{parentName:"td"},"--delete")),(0,l.yg)("td",{parentName:"tr",align:null},"delete files not present in source")),(0,l.yg)("tr",{parentName:"tbody"},(0,l.yg)("td",{parentName:"tr",align:null},(0,l.yg)("inlineCode",{parentName:"td"},"--watch")),(0,l.yg)("td",{parentName:"tr",align:null},"Watch directory for changes and resync automatically")))),(0,l.yg)("h2",{id:"one-liner"},"One-liner"))}u.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/de77a223.3f497115.js b/assets/js/de77a223.d065e143.js similarity index 99% rename from assets/js/de77a223.3f497115.js rename to assets/js/de77a223.d065e143.js index a5610c6f9..7fa71727f 100644 --- a/assets/js/de77a223.3f497115.js +++ b/assets/js/de77a223.d065e143.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5416],{5680:(e,t,o)=>{o.d(t,{xA:()=>u,yg:()=>m});var a=o(6540);function n(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}function l(e,t){var o=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),o.push.apply(o,a)}return o}function r(e){for(var t=1;t=0||(n[o]=e[o]);return n}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,o)&&(n[o]=e[o])}return n}var p=a.createContext({}),c=function(e){var t=a.useContext(p),o=t;return e&&(o="function"==typeof e?e(t):r(r({},t),e)),o},u=function(e){var t=c(e.components);return a.createElement(p.Provider,{value:t},e.children)},s={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},d=a.forwardRef((function(e,t){var o=e.components,n=e.mdxType,l=e.originalType,p=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),d=c(o),m=n,y=d["".concat(p,".").concat(m)]||d[m]||s[m]||l;return o?a.createElement(y,r(r({ref:t},u),{},{components:o})):a.createElement(y,r({ref:t},u))}));function m(e,t){var o=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var l=o.length,r=new Array(l);r[0]=d;var i={};for(var p in t)hasOwnProperty.call(t,p)&&(i[p]=t[p]);i.originalType=e,i.mdxType="string"==typeof e?e:n,r[1]=i;for(var c=2;c{o.r(t),o.d(t,{assets:()=>u,contentTitle:()=>p,default:()=>m,frontMatter:()=>i,metadata:()=>c,toc:()=>s});var a=o(9668),n=o(1367),l=(o(6540),o(5680)),r=["components"],i={id:"guide-dockerfile-to-openshift",title:"Deploy from a Dockerfile"},p=void 0,c={unversionedId:"guide-dockerfile-to-openshift",id:"guide-dockerfile-to-openshift",title:"Deploy from a Dockerfile",description:"Build from local Dockerfile",source:"@site/docs/guide-dockerfile-to-openshift.md",sourceDirName:".",slug:"/guide-dockerfile-to-openshift",permalink:"/docs/guide-dockerfile-to-openshift",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-dockerfile-to-openshift.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-dockerfile-to-openshift",title:"Deploy from a Dockerfile"},sidebar:"docs",previous:{title:"GPU applications",permalink:"/docs/deploy-on-gpu"},next:{title:"Deploy from a Docker image",permalink:"/docs/deploy-from-docker"}},u={},s=[{value:"Build from local Dockerfile",id:"build-from-local-dockerfile",level:2},{value:"Create new build configuration.",id:"create-new-build-configuration",level:3},{value:"Build the image",id:"build-the-image",level:3},{value:"Create your app",id:"create-your-app",level:3},{value:"Expose app",id:"expose-app",level:3},{value:"Delete the created build",id:"delete-the-created-build",level:3},{value:"Deploy from a local docker image",id:"deploy-from-a-local-docker-image",level:2},{value:"Deploy from a Git repository",id:"deploy-from-a-git-repository",level:2}],d={toc:s};function m(e){var t=e.components,o=(0,n.A)(e,r);return(0,l.yg)("wrapper",(0,a.A)({},d,o,{components:t,mdxType:"MDXLayout"}),(0,l.yg)("h2",{id:"build-from-local-dockerfile"},"Build from local Dockerfile"),(0,l.yg)("p",null,"This manual shows you an example of how to convert a dockerfile from your local machine to a running container on DSRI (openshift / okd). Start by cloning the example repository to your local machine."),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"git clone git@gitlab.maastrichtuniversity.nl:dsri-examples/dockerfile-to-okd.git\n")),(0,l.yg)("p",null,"After cloning you now have a local folder containing a Dockerfile and index.html file. Inspect both files."),(0,l.yg)("p",null,"Login with the openshift client:\n",(0,l.yg)("a",{parentName:"p",href:"/docs/openshift-install"},"Authenticate to the OpenShift cluster")," using ",(0,l.yg)("inlineCode",{parentName:"p"},"oc login")," ."),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc login --token=\n")),(0,l.yg)("p",null,"Create a new project if you don't have a project yet you can work with (change myproject to a project name of your choice:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc new-project myproject\n")),(0,l.yg)("hr",null),(0,l.yg)("h3",{id:"create-new-build-configuration"},"Create new build configuration."),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc new-build --name dockerfile-to-okd --binary\n")),(0,l.yg)("hr",null),(0,l.yg)("h3",{id:"build-the-image"},"Build the image"),(0,l.yg)("p",null,"Start a new build on the DSRI with the files provided:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"cd dockerfile-to-okd\noc start-build dockerfile-to-okd --from-dir=. --follow --wait\n")),(0,l.yg)("hr",null),(0,l.yg)("h3",{id:"create-your-app"},"Create your app"),(0,l.yg)("p",null,"Create a new app using the build we just created:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc new-app dockerfile-to-okd\n")),(0,l.yg)("p",null,"To properly deploy your app on OpenShift you will need to define a few more parameters:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"Enable root user access (with ",(0,l.yg)("inlineCode",{parentName:"li"},"serviceAccountName"),") by running this command:")),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch deployment/dockerfile-to-okd --patch \'{"spec":{"template": {"spec":{"serviceAccountName": "anyuid"}}}}\'\n')),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("p",{parentName:"li"},"You can also add persistent storage (with ",(0,l.yg)("inlineCode",{parentName:"p"},"volumes")," and ",(0,l.yg)("inlineCode",{parentName:"p"},"containers: volumeMounts")," )"),(0,l.yg)("ul",{parentName:"li"},(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("inlineCode",{parentName:"li"},"${STORAGE_NAME}"),": Name of your persistent volume claim in the ",(0,l.yg)("strong",{parentName:"li"},"Storage")," page of your project in the web UI"),(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("inlineCode",{parentName:"li"},"${STORAGE_FOLDER}")," : Name of the folder inside the persistent volume claim to store the application data (so you can store multiple applications on the same persistent volume claim)")))),(0,l.yg)("p",null,"Open the configuration of the started app to fix its configuration:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc edit deployment/dockerfile-to-okd\n")),(0,l.yg)("p",null,"You can mount existing persistent volume this way (replace the variables, such as ",(0,l.yg)("inlineCode",{parentName:"p"},"${STORAGE_NAME}")," by your values):"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-yaml"},' template:\n spec:\n serviceAccountName: anyuid\n volumes:\n - name: data\n persistentVolumeClaim:\n claimName: "${STORAGE_NAME}"\n containers:\n - image: rstudio-root:latest\n volumeMounts:\n - name: data\n mountPath: "/home/rstudio"\n subPath: "${STORAGE_FOLDER}"\n')),(0,l.yg)("admonition",{title:"Generate deployment file in YAML",type:"info"},(0,l.yg)("p",{parentName:"admonition"},"You can also generate the app deployment in a YAML file to edit it before start:"),(0,l.yg)("pre",{parentName:"admonition"},(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc new-app dockerfile-to-okd -o yaml > myapp.yml\n# Edit myapp.yml\noc create -f myapp.yml\n"))),(0,l.yg)("hr",null),(0,l.yg)("h3",{id:"expose-app"},"Expose app"),(0,l.yg)("p",null,"Expose the application so you can reach it from your browser and check the route that was created"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc expose svc/dockerfile-to-okd\noc get route\n")),(0,l.yg)("p",null,"You can now visit the route shown in the HOST/PORT output of the ",(0,l.yg)("inlineCode",{parentName:"p"},"oc get route")," command and see if you have successfully converted the docker file. "),(0,l.yg)("p",null,"You can edit the created route to enable HTTPS with this command:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch route/dockerfile-to-okd --patch \'{"spec":{"tls": {"termination": "edge", "insecureEdgeTerminationPolicy": "Redirect"}}}\'\n')),(0,l.yg)("hr",null),(0,l.yg)("h3",{id:"delete-the-created-build"},"Delete the created build"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete build dockerfile-to-okd\n")),(0,l.yg)("blockquote",null,(0,l.yg)("p",{parentName:"blockquote"},"See ",(0,l.yg)("a",{parentName:"p",href:"https://docs.openshift.com/enterprise/3.0/cli_reference/basic_cli_operations.html#application-modification-cli-operations"},"oc delete documentation"),".")),(0,l.yg)("hr",null),(0,l.yg)("h2",{id:"deploy-from-a-local-docker-image"},"Deploy from a local docker image"),(0,l.yg)("p",null,"You can also deploy a local docker image from your machine. "),(0,l.yg)("p",null,"First build the docker image:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"docker build -t my-docker-image:latest .\n")),(0,l.yg)("p",null,"Check you have the image locally on your system:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"docker images ls\n")),(0,l.yg)("p",null,"You should have a docker image for your application:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"REPOSITORY TAG \nmy-docker-image latest\n")),(0,l.yg)("p",null,"You can then deploy providing the docker image name and the name of the application to be deployed:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-abash"},"oc new-app my-docker-image --name app-name-on-openshift\n")),(0,l.yg)("hr",null),(0,l.yg)("h2",{id:"deploy-from-a-git-repository"},"Deploy from a Git repository"),(0,l.yg)("p",null,"Go to ",(0,l.yg)("strong",{parentName:"p"},"+Add")," > ",(0,l.yg)("strong",{parentName:"p"},"From Git"),": ",(0,l.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/import"},"https://console-openshift-console.apps.dsri2.unimaas.nl/import")),(0,l.yg)("p",null,"Follow the instructions given by the web UI: provide the URL to your git repository, the port on which the web interface will be deployed, you can also create a secret for git login if the repository is private."),(0,l.yg)("p",null,"Once the container has started you will need to make a small change to enable it running with any user ID (due to OpenShift security policies)."),(0,l.yg)("p",null,"You can do it with the command line (just change ",(0,l.yg)("inlineCode",{parentName:"p"},"your-app-name")," by your application name)"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch deployment/your-app-name --patch \'{"spec":{"template": {"spec":{"serviceAccountName": "anyuid"}}}}\'\n')),(0,l.yg)("p",null,"Or through the web UI: click on your deployment, then ",(0,l.yg)("strong",{parentName:"p"},"Actions")," > ",(0,l.yg)("strong",{parentName:"p"},"Edit Deployment"),". And edit the YAML of your deployment to add ",(0,l.yg)("inlineCode",{parentName:"p"},"serviceAccountName: anyuid")," under ",(0,l.yg)("inlineCode",{parentName:"p"},"template.spec"),":"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-yaml"}," template:\n spec:\n serviceAccountName: anyuid\n containers:\n - [...]\n")))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5416],{5680:(e,t,o)=>{o.d(t,{xA:()=>u,yg:()=>m});var a=o(6540);function n(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}function l(e,t){var o=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),o.push.apply(o,a)}return o}function r(e){for(var t=1;t=0||(n[o]=e[o]);return n}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,o)&&(n[o]=e[o])}return n}var p=a.createContext({}),c=function(e){var t=a.useContext(p),o=t;return e&&(o="function"==typeof e?e(t):r(r({},t),e)),o},u=function(e){var t=c(e.components);return a.createElement(p.Provider,{value:t},e.children)},s={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},d=a.forwardRef((function(e,t){var o=e.components,n=e.mdxType,l=e.originalType,p=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),d=c(o),m=n,y=d["".concat(p,".").concat(m)]||d[m]||s[m]||l;return o?a.createElement(y,r(r({ref:t},u),{},{components:o})):a.createElement(y,r({ref:t},u))}));function m(e,t){var o=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var l=o.length,r=new Array(l);r[0]=d;var i={};for(var p in t)hasOwnProperty.call(t,p)&&(i[p]=t[p]);i.originalType=e,i.mdxType="string"==typeof e?e:n,r[1]=i;for(var c=2;c{o.r(t),o.d(t,{assets:()=>u,contentTitle:()=>p,default:()=>m,frontMatter:()=>i,metadata:()=>c,toc:()=>s});var a=o(9668),n=o(1367),l=(o(6540),o(5680)),r=["components"],i={id:"guide-dockerfile-to-openshift",title:"Deploy from a Dockerfile"},p=void 0,c={unversionedId:"guide-dockerfile-to-openshift",id:"guide-dockerfile-to-openshift",title:"Deploy from a Dockerfile",description:"Build from local Dockerfile",source:"@site/docs/guide-dockerfile-to-openshift.md",sourceDirName:".",slug:"/guide-dockerfile-to-openshift",permalink:"/docs/guide-dockerfile-to-openshift",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-dockerfile-to-openshift.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-dockerfile-to-openshift",title:"Deploy from a Dockerfile"},sidebar:"docs",previous:{title:"GPU applications",permalink:"/docs/deploy-on-gpu"},next:{title:"Deploy from a Docker image",permalink:"/docs/deploy-from-docker"}},u={},s=[{value:"Build from local Dockerfile",id:"build-from-local-dockerfile",level:2},{value:"Create new build configuration.",id:"create-new-build-configuration",level:3},{value:"Build the image",id:"build-the-image",level:3},{value:"Create your app",id:"create-your-app",level:3},{value:"Expose app",id:"expose-app",level:3},{value:"Delete the created build",id:"delete-the-created-build",level:3},{value:"Deploy from a local docker image",id:"deploy-from-a-local-docker-image",level:2},{value:"Deploy from a Git repository",id:"deploy-from-a-git-repository",level:2}],d={toc:s};function m(e){var t=e.components,o=(0,n.A)(e,r);return(0,l.yg)("wrapper",(0,a.A)({},d,o,{components:t,mdxType:"MDXLayout"}),(0,l.yg)("h2",{id:"build-from-local-dockerfile"},"Build from local Dockerfile"),(0,l.yg)("p",null,"This manual shows you an example of how to convert a dockerfile from your local machine to a running container on DSRI (openshift / okd). Start by cloning the example repository to your local machine."),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"git clone git@gitlab.maastrichtuniversity.nl:dsri-examples/dockerfile-to-okd.git\n")),(0,l.yg)("p",null,"After cloning you now have a local folder containing a Dockerfile and index.html file. Inspect both files."),(0,l.yg)("p",null,"Login with the openshift client:\n",(0,l.yg)("a",{parentName:"p",href:"/docs/openshift-install"},"Authenticate to the OpenShift cluster")," using ",(0,l.yg)("inlineCode",{parentName:"p"},"oc login")," ."),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc login --token=\n")),(0,l.yg)("p",null,"Create a new project if you don't have a project yet you can work with (change myproject to a project name of your choice:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc new-project myproject\n")),(0,l.yg)("hr",null),(0,l.yg)("h3",{id:"create-new-build-configuration"},"Create new build configuration."),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc new-build --name dockerfile-to-okd --binary\n")),(0,l.yg)("hr",null),(0,l.yg)("h3",{id:"build-the-image"},"Build the image"),(0,l.yg)("p",null,"Start a new build on the DSRI with the files provided:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"cd dockerfile-to-okd\noc start-build dockerfile-to-okd --from-dir=. --follow --wait\n")),(0,l.yg)("hr",null),(0,l.yg)("h3",{id:"create-your-app"},"Create your app"),(0,l.yg)("p",null,"Create a new app using the build we just created:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc new-app dockerfile-to-okd\n")),(0,l.yg)("p",null,"To properly deploy your app on OpenShift you will need to define a few more parameters:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"Enable root user access (with ",(0,l.yg)("inlineCode",{parentName:"li"},"serviceAccountName"),") by running this command:")),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch deployment/dockerfile-to-okd --patch \'{"spec":{"template": {"spec":{"serviceAccountName": "anyuid"}}}}\'\n')),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("p",{parentName:"li"},"You can also add persistent storage (with ",(0,l.yg)("inlineCode",{parentName:"p"},"volumes")," and ",(0,l.yg)("inlineCode",{parentName:"p"},"containers: volumeMounts")," )"),(0,l.yg)("ul",{parentName:"li"},(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("inlineCode",{parentName:"li"},"${STORAGE_NAME}"),": Name of your persistent volume claim in the ",(0,l.yg)("strong",{parentName:"li"},"Storage")," page of your project in the web UI"),(0,l.yg)("li",{parentName:"ul"},(0,l.yg)("inlineCode",{parentName:"li"},"${STORAGE_FOLDER}")," : Name of the folder inside the persistent volume claim to store the application data (so you can store multiple applications on the same persistent volume claim)")))),(0,l.yg)("p",null,"Open the configuration of the started app to fix its configuration:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc edit deployment/dockerfile-to-okd\n")),(0,l.yg)("p",null,"You can mount existing persistent volume this way (replace the variables, such as ",(0,l.yg)("inlineCode",{parentName:"p"},"${STORAGE_NAME}")," by your values):"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-yaml"},' template:\n spec:\n serviceAccountName: anyuid\n volumes:\n - name: data\n persistentVolumeClaim:\n claimName: "${STORAGE_NAME}"\n containers:\n - image: rstudio-root:latest\n volumeMounts:\n - name: data\n mountPath: "/home/rstudio"\n subPath: "${STORAGE_FOLDER}"\n')),(0,l.yg)("admonition",{title:"Generate deployment file in YAML",type:"info"},(0,l.yg)("p",{parentName:"admonition"},"You can also generate the app deployment in a YAML file to edit it before start:"),(0,l.yg)("pre",{parentName:"admonition"},(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc new-app dockerfile-to-okd -o yaml > myapp.yml\n# Edit myapp.yml\noc create -f myapp.yml\n"))),(0,l.yg)("hr",null),(0,l.yg)("h3",{id:"expose-app"},"Expose app"),(0,l.yg)("p",null,"Expose the application so you can reach it from your browser and check the route that was created"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc expose svc/dockerfile-to-okd\noc get route\n")),(0,l.yg)("p",null,"You can now visit the route shown in the HOST/PORT output of the ",(0,l.yg)("inlineCode",{parentName:"p"},"oc get route")," command and see if you have successfully converted the docker file. "),(0,l.yg)("p",null,"You can edit the created route to enable HTTPS with this command:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch route/dockerfile-to-okd --patch \'{"spec":{"tls": {"termination": "edge", "insecureEdgeTerminationPolicy": "Redirect"}}}\'\n')),(0,l.yg)("hr",null),(0,l.yg)("h3",{id:"delete-the-created-build"},"Delete the created build"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"oc delete build dockerfile-to-okd\n")),(0,l.yg)("blockquote",null,(0,l.yg)("p",{parentName:"blockquote"},"See ",(0,l.yg)("a",{parentName:"p",href:"https://docs.openshift.com/enterprise/3.0/cli_reference/basic_cli_operations.html#application-modification-cli-operations"},"oc delete documentation"),".")),(0,l.yg)("hr",null),(0,l.yg)("h2",{id:"deploy-from-a-local-docker-image"},"Deploy from a local docker image"),(0,l.yg)("p",null,"You can also deploy a local docker image from your machine. "),(0,l.yg)("p",null,"First build the docker image:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"docker build -t my-docker-image:latest .\n")),(0,l.yg)("p",null,"Check you have the image locally on your system:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-shell"},"docker images ls\n")),(0,l.yg)("p",null,"You should have a docker image for your application:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"REPOSITORY TAG \nmy-docker-image latest\n")),(0,l.yg)("p",null,"You can then deploy providing the docker image name and the name of the application to be deployed:"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-abash"},"oc new-app my-docker-image --name app-name-on-openshift\n")),(0,l.yg)("hr",null),(0,l.yg)("h2",{id:"deploy-from-a-git-repository"},"Deploy from a Git repository"),(0,l.yg)("p",null,"Go to ",(0,l.yg)("strong",{parentName:"p"},"+Add")," > ",(0,l.yg)("strong",{parentName:"p"},"From Git"),": ",(0,l.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/import"},"https://console-openshift-console.apps.dsri2.unimaas.nl/import")),(0,l.yg)("p",null,"Follow the instructions given by the web UI: provide the URL to your git repository, the port on which the web interface will be deployed, you can also create a secret for git login if the repository is private."),(0,l.yg)("p",null,"Once the container has started you will need to make a small change to enable it running with any user ID (due to OpenShift security policies)."),(0,l.yg)("p",null,"You can do it with the command line (just change ",(0,l.yg)("inlineCode",{parentName:"p"},"your-app-name")," by your application name)"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},'oc patch deployment/your-app-name --patch \'{"spec":{"template": {"spec":{"serviceAccountName": "anyuid"}}}}\'\n')),(0,l.yg)("p",null,"Or through the web UI: click on your deployment, then ",(0,l.yg)("strong",{parentName:"p"},"Actions")," > ",(0,l.yg)("strong",{parentName:"p"},"Edit Deployment"),". And edit the YAML of your deployment to add ",(0,l.yg)("inlineCode",{parentName:"p"},"serviceAccountName: anyuid")," under ",(0,l.yg)("inlineCode",{parentName:"p"},"template.spec"),":"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-yaml"}," template:\n spec:\n serviceAccountName: anyuid\n containers:\n - [...]\n")))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/e53f82ff.cb5ff291.js b/assets/js/e53f82ff.ed442302.js similarity index 99% rename from assets/js/e53f82ff.cb5ff291.js rename to assets/js/e53f82ff.ed442302.js index 9a77a955e..6ca021e2c 100644 --- a/assets/js/e53f82ff.cb5ff291.js +++ b/assets/js/e53f82ff.ed442302.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7692],{5680:(e,t,o)=>{o.d(t,{xA:()=>d,yg:()=>g});var n=o(6540);function r(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}function a(e,t){var o=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),o.push.apply(o,n)}return o}function i(e){for(var t=1;t=0||(r[o]=e[o]);return r}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,o)&&(r[o]=e[o])}return r}var l=n.createContext({}),p=function(e){var t=n.useContext(l),o=t;return e&&(o="function"==typeof e?e(t):i(i({},t),e)),o},d=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},c=n.forwardRef((function(e,t){var o=e.components,r=e.mdxType,a=e.originalType,l=e.parentName,d=s(e,["components","mdxType","originalType","parentName"]),c=p(o),g=r,m=c["".concat(l,".").concat(g)]||c[g]||u[g]||a;return o?n.createElement(m,i(i({ref:t},d),{},{components:o})):n.createElement(m,i({ref:t},d))}));function g(e,t){var o=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var a=o.length,i=new Array(a);i[0]=c;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:r,i[1]=s;for(var p=2;p{o.r(t),o.d(t,{assets:()=>d,contentTitle:()=>l,default:()=>g,frontMatter:()=>s,metadata:()=>p,toc:()=>u});var n=o(9668),r=o(1367),a=(o(6540),o(5680)),i=["components"],s={id:"deploy-vscode",title:"VisualStudio Code"},l=void 0,p={unversionedId:"deploy-vscode",id:"deploy-vscode",title:"VisualStudio Code",description:"Start VisualStudio Code server",source:"@site/docs/deploy-vscode.md",sourceDirName:".",slug:"/deploy-vscode",permalink:"/docs/deploy-vscode",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-vscode.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-vscode",title:"VisualStudio Code"},sidebar:"docs",previous:{title:"RStudio",permalink:"/docs/deploy-rstudio"},next:{title:"Databases",permalink:"/docs/deploy-database"}},d={},u=[{value:"Start VisualStudio Code server",id:"start-visualstudio-code-server",level:2},{value:"Use Git in VSCode",id:"use-git-in-vscode",level:2},{value:"VSCode for GPU",id:"vscode-for-gpu",level:2}],c={toc:u};function g(e){var t=e.components,o=(0,r.A)(e,i);return(0,a.yg)("wrapper",(0,n.A)({},c,o,{components:t,mdxType:"MDXLayout"}),(0,a.yg)("h2",{id:"start-visualstudio-code-server"},"Start VisualStudio Code server"),(0,a.yg)("p",null,"Start a VisualStudio Code server with the ",(0,a.yg)("inlineCode",{parentName:"p"},"coder")," user, which has ",(0,a.yg)("inlineCode",{parentName:"p"},"sudo")," privileges."),(0,a.yg)("p",null,"You can deploy it using the ",(0,a.yg)("strong",{parentName:"p"},"VisualStudio Code server")," solution in the ",(0,a.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,a.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,a.yg)("p",null,"Provide a few parameters, and instantiate the template. The DSRI will automatically create a persistent volume to store data you will put in the ",(0,a.yg)("inlineCode",{parentName:"p"},"/home/coder/project")," folder. You can find the persistent volumes in the DSRI web UI, go to the ",(0,a.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,a.yg)("strong",{parentName:"p"},"Storage")," > ",(0,a.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),"."),(0,a.yg)("img",{src:"/img/screenshot-deploy-vscode.png",alt:"Deploy VSCode",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,a.yg)("h2",{id:"use-git-in-vscode"},"Use Git in VSCode"),(0,a.yg)("p",null,"The easiest way to login and clone a repository from GitHub is to use the built-in authentication system of VisualStudio Code, to do so click on ",(0,a.yg)("strong",{parentName:"p"},"clone repository...")," in the ",(0,a.yg)("strong",{parentName:"p"},"Welcome")," page, and follow the instructions in the top of the VisualStudio window."),(0,a.yg)("p",null,"If this solution does not work for you, you can use ",(0,a.yg)("inlineCode",{parentName:"p"},"git")," from the terminal to clone the git repository with ",(0,a.yg)("inlineCode",{parentName:"p"},"git clone"),". VisualStudio might ask you to login in the dialog box at the top of the page, enter your username and password when requested. For GitHub you might need to generate a token at ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/settings/tokens"},"https://github.com/settings/tokens")," to use as password."),(0,a.yg)("p",null,"Once the repository cloned, you can use git from the VSCode web UI to manage your ",(0,a.yg)("inlineCode",{parentName:"p"},"git")," repositories (add, commit, push changes), or in the terminal."),(0,a.yg)("p",null,"Before committing to GitHub or GitLab, you might need to configure you username and email in VSCode terminal:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},'git config --global user.name "Jean Dupont"\ngit config --global user.email jeandupont@gmail.com\n')),(0,a.yg)("admonition",{title:"Save your git password",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"You can run this command to ask git to save your password for 15min:"),(0,a.yg)("pre",{parentName:"admonition"},(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"git config credential.helper cache\n")),(0,a.yg)("p",{parentName:"admonition"},"Or store the password in a plain text file:"),(0,a.yg)("pre",{parentName:"admonition"},(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"git config --global credential.helper 'store --file ~/.git-credentials'\n"))),(0,a.yg)("admonition",{title:"Git tip",type:"tip"},(0,a.yg)("p",{parentName:"admonition"},"We recommend to use SSH instead of HTTPS connection when possible, checkout ",(0,a.yg)("a",{parentName:"p",href:"https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent"},"here")," how to generate SSH keys and use them with your GitHub account.")),(0,a.yg)("h2",{id:"vscode-for-gpu"},"VSCode for GPU"),(0,a.yg)("p",null,"See the ",(0,a.yg)("a",{parentName:"p",href:"/docs/deploy-on-gpu"},"Deploy on GPU")," page to deploy a VisualStudio Code server on GPU."))}g.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7692],{5680:(e,t,o)=>{o.d(t,{xA:()=>d,yg:()=>g});var n=o(6540);function r(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}function a(e,t){var o=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),o.push.apply(o,n)}return o}function i(e){for(var t=1;t=0||(r[o]=e[o]);return r}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,o)&&(r[o]=e[o])}return r}var l=n.createContext({}),p=function(e){var t=n.useContext(l),o=t;return e&&(o="function"==typeof e?e(t):i(i({},t),e)),o},d=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},c=n.forwardRef((function(e,t){var o=e.components,r=e.mdxType,a=e.originalType,l=e.parentName,d=s(e,["components","mdxType","originalType","parentName"]),c=p(o),g=r,m=c["".concat(l,".").concat(g)]||c[g]||u[g]||a;return o?n.createElement(m,i(i({ref:t},d),{},{components:o})):n.createElement(m,i({ref:t},d))}));function g(e,t){var o=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var a=o.length,i=new Array(a);i[0]=c;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:r,i[1]=s;for(var p=2;p{o.r(t),o.d(t,{assets:()=>d,contentTitle:()=>l,default:()=>g,frontMatter:()=>s,metadata:()=>p,toc:()=>u});var n=o(9668),r=o(1367),a=(o(6540),o(5680)),i=["components"],s={id:"deploy-vscode",title:"VisualStudio Code"},l=void 0,p={unversionedId:"deploy-vscode",id:"deploy-vscode",title:"VisualStudio Code",description:"Start VisualStudio Code server",source:"@site/docs/deploy-vscode.md",sourceDirName:".",slug:"/deploy-vscode",permalink:"/docs/deploy-vscode",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-vscode.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-vscode",title:"VisualStudio Code"},sidebar:"docs",previous:{title:"RStudio",permalink:"/docs/deploy-rstudio"},next:{title:"Databases",permalink:"/docs/deploy-database"}},d={},u=[{value:"Start VisualStudio Code server",id:"start-visualstudio-code-server",level:2},{value:"Use Git in VSCode",id:"use-git-in-vscode",level:2},{value:"VSCode for GPU",id:"vscode-for-gpu",level:2}],c={toc:u};function g(e){var t=e.components,o=(0,r.A)(e,i);return(0,a.yg)("wrapper",(0,n.A)({},c,o,{components:t,mdxType:"MDXLayout"}),(0,a.yg)("h2",{id:"start-visualstudio-code-server"},"Start VisualStudio Code server"),(0,a.yg)("p",null,"Start a VisualStudio Code server with the ",(0,a.yg)("inlineCode",{parentName:"p"},"coder")," user, which has ",(0,a.yg)("inlineCode",{parentName:"p"},"sudo")," privileges."),(0,a.yg)("p",null,"You can deploy it using the ",(0,a.yg)("strong",{parentName:"p"},"VisualStudio Code server")," solution in the ",(0,a.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,a.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,a.yg)("p",null,"Provide a few parameters, and instantiate the template. The DSRI will automatically create a persistent volume to store data you will put in the ",(0,a.yg)("inlineCode",{parentName:"p"},"/home/coder/project")," folder. You can find the persistent volumes in the DSRI web UI, go to the ",(0,a.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,a.yg)("strong",{parentName:"p"},"Storage")," > ",(0,a.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),"."),(0,a.yg)("img",{src:"/img/screenshot-deploy-vscode.png",alt:"Deploy VSCode",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,a.yg)("h2",{id:"use-git-in-vscode"},"Use Git in VSCode"),(0,a.yg)("p",null,"The easiest way to login and clone a repository from GitHub is to use the built-in authentication system of VisualStudio Code, to do so click on ",(0,a.yg)("strong",{parentName:"p"},"clone repository...")," in the ",(0,a.yg)("strong",{parentName:"p"},"Welcome")," page, and follow the instructions in the top of the VisualStudio window."),(0,a.yg)("p",null,"If this solution does not work for you, you can use ",(0,a.yg)("inlineCode",{parentName:"p"},"git")," from the terminal to clone the git repository with ",(0,a.yg)("inlineCode",{parentName:"p"},"git clone"),". VisualStudio might ask you to login in the dialog box at the top of the page, enter your username and password when requested. For GitHub you might need to generate a token at ",(0,a.yg)("a",{parentName:"p",href:"https://github.com/settings/tokens"},"https://github.com/settings/tokens")," to use as password."),(0,a.yg)("p",null,"Once the repository cloned, you can use git from the VSCode web UI to manage your ",(0,a.yg)("inlineCode",{parentName:"p"},"git")," repositories (add, commit, push changes), or in the terminal."),(0,a.yg)("p",null,"Before committing to GitHub or GitLab, you might need to configure you username and email in VSCode terminal:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-bash"},'git config --global user.name "Jean Dupont"\ngit config --global user.email jeandupont@gmail.com\n')),(0,a.yg)("admonition",{title:"Save your git password",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"You can run this command to ask git to save your password for 15min:"),(0,a.yg)("pre",{parentName:"admonition"},(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"git config credential.helper cache\n")),(0,a.yg)("p",{parentName:"admonition"},"Or store the password in a plain text file:"),(0,a.yg)("pre",{parentName:"admonition"},(0,a.yg)("code",{parentName:"pre",className:"language-bash"},"git config --global credential.helper 'store --file ~/.git-credentials'\n"))),(0,a.yg)("admonition",{title:"Git tip",type:"tip"},(0,a.yg)("p",{parentName:"admonition"},"We recommend to use SSH instead of HTTPS connection when possible, checkout ",(0,a.yg)("a",{parentName:"p",href:"https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent"},"here")," how to generate SSH keys and use them with your GitHub account.")),(0,a.yg)("h2",{id:"vscode-for-gpu"},"VSCode for GPU"),(0,a.yg)("p",null,"See the ",(0,a.yg)("a",{parentName:"p",href:"/docs/deploy-on-gpu"},"Deploy on GPU")," page to deploy a VisualStudio Code server on GPU."))}g.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/e747ec83.7fef5581.js b/assets/js/e747ec83.75332ce7.js similarity index 98% rename from assets/js/e747ec83.7fef5581.js rename to assets/js/e747ec83.75332ce7.js index fb8f81067..0b3580b57 100644 --- a/assets/js/e747ec83.7fef5581.js +++ b/assets/js/e747ec83.75332ce7.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7051],{5680:(e,t,r)=>{r.d(t,{xA:()=>c,yg:()=>y});var n=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var l=n.createContext({}),p=function(e){var t=n.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),d=p(r),y=a,g=d["".concat(l,".").concat(y)]||d[y]||u[y]||o;return r?n.createElement(g,i(i({ref:t},c),{},{components:r})):n.createElement(g,i({ref:t},c))}));function y(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=d;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var p=2;p{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>l,default:()=>y,frontMatter:()=>s,metadata:()=>p,toc:()=>u});var n=r(9668),a=r(1367),o=(r(6540),r(5680)),i=["components"],s={id:"glossary",title:"Glossary"},l=void 0,p={unversionedId:"glossary",id:"glossary",title:"Glossary",description:"Docker",source:"@site/docs/glossary.md",sourceDirName:".",slug:"/glossary",permalink:"/docs/glossary",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/glossary.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"glossary",title:"Glossary"},sidebar:"docs",previous:{title:"Libraries for Machine Learning",permalink:"/docs/tools-machine-learning"},next:{title:"Introduction to workflows",permalink:"/docs/workflows-introduction"}},c={},u=[{value:"Docker",id:"docker",level:2},{value:"Kubernetes",id:"kubernetes",level:2},{value:"OpenShift",id:"openshift",level:2},{value:"OKD",id:"okd",level:2}],d={toc:u};function y(e){var t=e.components,r=(0,a.A)(e,i);return(0,o.yg)("wrapper",(0,n.A)({},d,r,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"docker"},"Docker"),(0,o.yg)("h2",{id:"kubernetes"},"Kubernetes"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://kubernetes.io/"},"Kubernetes")," is a portable, extensible, open-source platform for managing containerized workloads and services, that facilitates both declarative configuration and automation. It has a large, rapidly growing ecosystem. "),(0,o.yg)("p",null,"Kubernetes services, support, and tools are widely available."),(0,o.yg)("p",null,"Kubernetes, also known as K8s, is an open-source system for automating deployment, scaling, and management of containerized applications."),(0,o.yg)("img",{src:"/img/glossary_kubernetes.png",alt:"Kubernetes Architecture",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"More Information: ",(0,o.yg)("a",{parentName:"p",href:"https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/"},"https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/")),(0,o.yg)("h2",{id:"openshift"},"OpenShift"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://www.openshift.com/"},"Red Hat OpenShift")," is a hybrid cloud, enterprise Kubernetes application platform, trusted by 2,000+ organizations."),(0,o.yg)("p",null,"It includes "),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Container host and runtime"),(0,o.yg)("li",{parentName:"ul"},"Enterprise Kubernetes"),(0,o.yg)("li",{parentName:"ul"},"Validated integrations"),(0,o.yg)("li",{parentName:"ul"},"Integrated container registry"),(0,o.yg)("li",{parentName:"ul"},"Developer workflows"),(0,o.yg)("li",{parentName:"ul"},"Easy access to services")),(0,o.yg)("img",{src:"/img/glossary_openshift.png",alt:"Red Hat Openshift",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h2",{id:"okd"},"OKD"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://www.okd.io/"},"OKD")," is a ",(0,o.yg)("strong",{parentName:"p"},"distribution of Kubernetes")," optimized for continuous application development and multi-tenant deployment. OKD adds ",(0,o.yg)("strong",{parentName:"p"},"developer and operations-centric")," tools on top of Kubernetes to enable rapid application development, easy deployment and scaling, and long-term lifecycle maintenance for small and large teams. OKD is a ",(0,o.yg)("strong",{parentName:"p"},"sibling")," Kubernetes distribution to ",(0,o.yg)("strong",{parentName:"p"},"Red Hat OpenShift")),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://docs.okd.io/latest/welcome/index.html"},"OKD 4 Documentation")))}y.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7051],{5680:(e,t,r)=>{r.d(t,{xA:()=>c,yg:()=>y});var n=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var l=n.createContext({}),p=function(e){var t=n.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),d=p(r),y=a,g=d["".concat(l,".").concat(y)]||d[y]||u[y]||o;return r?n.createElement(g,i(i({ref:t},c),{},{components:r})):n.createElement(g,i({ref:t},c))}));function y(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=d;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var p=2;p{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>l,default:()=>y,frontMatter:()=>s,metadata:()=>p,toc:()=>u});var n=r(9668),a=r(1367),o=(r(6540),r(5680)),i=["components"],s={id:"glossary",title:"Glossary"},l=void 0,p={unversionedId:"glossary",id:"glossary",title:"Glossary",description:"Docker",source:"@site/docs/glossary.md",sourceDirName:".",slug:"/glossary",permalink:"/docs/glossary",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/glossary.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"glossary",title:"Glossary"},sidebar:"docs",previous:{title:"Libraries for Machine Learning",permalink:"/docs/tools-machine-learning"},next:{title:"Introduction to workflows",permalink:"/docs/workflows-introduction"}},c={},u=[{value:"Docker",id:"docker",level:2},{value:"Kubernetes",id:"kubernetes",level:2},{value:"OpenShift",id:"openshift",level:2},{value:"OKD",id:"okd",level:2}],d={toc:u};function y(e){var t=e.components,r=(0,a.A)(e,i);return(0,o.yg)("wrapper",(0,n.A)({},d,r,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"docker"},"Docker"),(0,o.yg)("h2",{id:"kubernetes"},"Kubernetes"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://kubernetes.io/"},"Kubernetes")," is a portable, extensible, open-source platform for managing containerized workloads and services, that facilitates both declarative configuration and automation. It has a large, rapidly growing ecosystem. "),(0,o.yg)("p",null,"Kubernetes services, support, and tools are widely available."),(0,o.yg)("p",null,"Kubernetes, also known as K8s, is an open-source system for automating deployment, scaling, and management of containerized applications."),(0,o.yg)("img",{src:"/img/glossary_kubernetes.png",alt:"Kubernetes Architecture",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"More Information: ",(0,o.yg)("a",{parentName:"p",href:"https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/"},"https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/")),(0,o.yg)("h2",{id:"openshift"},"OpenShift"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://www.openshift.com/"},"Red Hat OpenShift")," is a hybrid cloud, enterprise Kubernetes application platform, trusted by 2,000+ organizations."),(0,o.yg)("p",null,"It includes "),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Container host and runtime"),(0,o.yg)("li",{parentName:"ul"},"Enterprise Kubernetes"),(0,o.yg)("li",{parentName:"ul"},"Validated integrations"),(0,o.yg)("li",{parentName:"ul"},"Integrated container registry"),(0,o.yg)("li",{parentName:"ul"},"Developer workflows"),(0,o.yg)("li",{parentName:"ul"},"Easy access to services")),(0,o.yg)("img",{src:"/img/glossary_openshift.png",alt:"Red Hat Openshift",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("h2",{id:"okd"},"OKD"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://www.okd.io/"},"OKD")," is a ",(0,o.yg)("strong",{parentName:"p"},"distribution of Kubernetes")," optimized for continuous application development and multi-tenant deployment. OKD adds ",(0,o.yg)("strong",{parentName:"p"},"developer and operations-centric")," tools on top of Kubernetes to enable rapid application development, easy deployment and scaling, and long-term lifecycle maintenance for small and large teams. OKD is a ",(0,o.yg)("strong",{parentName:"p"},"sibling")," Kubernetes distribution to ",(0,o.yg)("strong",{parentName:"p"},"Red Hat OpenShift")),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"https://docs.okd.io/latest/welcome/index.html"},"OKD 4 Documentation")))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/e9a2555b.9a06c52f.js b/assets/js/e9a2555b.9f6e4e96.js similarity index 99% rename from assets/js/e9a2555b.9a06c52f.js rename to assets/js/e9a2555b.9f6e4e96.js index 8980f7be9..f13989f72 100644 --- a/assets/js/e9a2555b.9a06c52f.js +++ b/assets/js/e9a2555b.9f6e4e96.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[3807],{5680:(e,t,a)=>{a.d(t,{xA:()=>g,yg:()=>c});var n=a(6540);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function r(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var l=n.createContext({}),p=function(e){var t=n.useContext(l),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},g=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},u=n.forwardRef((function(e,t){var a=e.components,o=e.mdxType,r=e.originalType,l=e.parentName,g=s(e,["components","mdxType","originalType","parentName"]),u=p(a),c=o,d=u["".concat(l,".").concat(c)]||u[c]||m[c]||r;return a?n.createElement(d,i(i({ref:t},g),{},{components:a})):n.createElement(d,i({ref:t},g))}));function c(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var r=a.length,i=new Array(r);i[0]=u;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:o,i[1]=s;for(var p=2;p{a.r(t),a.d(t,{assets:()=>g,contentTitle:()=>l,default:()=>c,frontMatter:()=>s,metadata:()=>p,toc:()=>m});var n=a(9668),o=a(1367),r=(a(6540),a(5680)),i=["components"],s={id:"catalog-utilities",title:"Utilities"},l=void 0,p={unversionedId:"catalog-utilities",id:"catalog-utilities",title:"Utilities",description:"Feel free to propose new services using pull requests, or to request them by creating new issues.",source:"@site/docs/catalog-utilities.md",sourceDirName:".",slug:"/catalog-utilities",permalink:"/docs/catalog-utilities",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/catalog-utilities.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"catalog-utilities",title:"Utilities"},sidebar:"docs",previous:{title:"Imaging softwares",permalink:"/docs/catalog-imaging"},next:{title:"Access UM servers",permalink:"/docs/access-um-servers"}},g={},m=[{value:"Ubuntu",id:"ubuntu",level:2},{value:"With the terminal",id:"with-the-terminal",level:3},{value:"With a web UI",id:"with-a-web-ui",level:3},{value:"File browser",id:"file-browser",level:2},{value:"Creating or Connecting an Existing Persistent Storage",id:"creating-or-connecting-an-existing-persistent-storage",level:3}],u={toc:m};function c(e){var t=e.components,a=(0,o.A)(e,i);return(0,r.yg)("wrapper",(0,n.A)({},u,a,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("p",null,"Feel free to propose new services using ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/pulls"},"pull requests"),", or to request them by creating ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues"},"new issues"),"."),(0,r.yg)("h2",{id:"ubuntu"},"Ubuntu"),(0,r.yg)("h3",{id:"with-the-terminal"},"With the terminal"),(0,r.yg)("p",null,"Start Ubuntu with the ",(0,r.yg)("inlineCode",{parentName:"p"},"root")," user which has ",(0,r.yg)("inlineCode",{parentName:"p"},"sudo")," permissions to install anything."),(0,r.yg)("p",null,"You can start the application using the ",(0,r.yg)("strong",{parentName:"p"},"Ubuntu")," template in the ",(0,r.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,r.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,r.yg)("admonition",{title:"Login Credentials",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Username: ",(0,r.yg)("strong",{parentName:"p"},"root")),(0,r.yg)("p",{parentName:"admonition"},"Password: ",(0,r.yg)("strong",{parentName:"p"},"Template creation password"))),(0,r.yg)("p",null,"This template uses the Ubuntu image hosted on DockerHub, see its documentation at ",(0,r.yg)("a",{parentName:"p",href:"https://hub.docker.com/r/ubuntu"},"https://hub.docker.com/r/ubuntu")),(0,r.yg)("admonition",{title:"Persistent data folder",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"\ud83d\udcc2 Use the ",(0,r.yg)("inlineCode",{parentName:"p"},"/root")," folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the ",(0,r.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,r.yg)("strong",{parentName:"p"},"Storage")," > ",(0,r.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),".")),(0,r.yg)("p",null,"We enabled the port ",(0,r.yg)("inlineCode",{parentName:"p"},"8080")," in the Ubuntu container if you need to deploy applications."),(0,r.yg)("p",null,"To quickly access it from the terminal you can use the ",(0,r.yg)("strong",{parentName:"p"},"Terminal")," tab in the pod page, or via your local terminal:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Get the Ubuntu pod ID:"),(0,r.yg)("pre",{parentName:"li"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc get pods\n"))),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Connect to it:"),(0,r.yg)("pre",{parentName:"li"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc rsh POD_ID\n"))),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Enable Bash in the Ubuntu container (if it starts with the Shell)"),(0,r.yg)("pre",{parentName:"li"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"bash\n")))),(0,r.yg)("img",{src:"/img/screenshot-deploy-ubuntu.png",alt:"Deploy Ubuntu",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("h3",{id:"with-a-web-ui"},"With a web UI"),(0,r.yg)("p",null,"Start Ubuntu with a web UI accessible via a URL (using VNC). You will be the ",(0,r.yg)("inlineCode",{parentName:"p"},"root")," user which has elevated permissions to install anything via ",(0,r.yg)("inlineCode",{parentName:"p"},"apt install "),". Before you install a package run ",(0,r.yg)("inlineCode",{parentName:"p"},"apt update"),". This also solves ",(0,r.yg)("inlineCode",{parentName:"p"},"E: unable to locate package")," and ",(0,r.yg)("inlineCode",{parentName:"p"},"E: no installation candidate")," errors."),(0,r.yg)("p",null,"You can start the application using the ",(0,r.yg)("strong",{parentName:"p"},"Ubuntu with web UI")," template in the ",(0,r.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,r.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,r.yg)("admonition",{title:"Login Credentials",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Username: ",(0,r.yg)("strong",{parentName:"p"},"root")),(0,r.yg)("p",{parentName:"admonition"},"Password: ",(0,r.yg)("strong",{parentName:"p"},"Template creation password"))),(0,r.yg)("p",null,"This template uses the Docker image defined at ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/fcwu/docker-ubuntu-vnc-desktop"},"https://github.com/fcwu/docker-ubuntu-vnc-desktop")),(0,r.yg)("admonition",{title:"Less stable than the official image",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"This image might be less stable than the original Ubuntu image. Let us know on Slack if you have any problem!")),(0,r.yg)("h2",{id:"file-browser"},"File browser"),(0,r.yg)("p",null,"Deploy a file browser on your persistent volume. This will provide a web UI to upload and download data to your DSRI persistent volume in case you need it (JupyterLab, RStudio and VisualStudio Code server already include a file browser)"),(0,r.yg)("p",null,"You can start a container using the ",(0,r.yg)("strong",{parentName:"p"},"File Browser for existing storage")," template in the ",(0,r.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,r.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,r.yg)("img",{src:"/img/screenshot-deploy-filebrowser.png",alt:"Deploy File browser",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"You can only deploy file browser on an existing Persistent Volume Claim, this enables you to add a web UI to access this storage."),(0,r.yg)("p",null,"The following parameters can be provided:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Provide a unique ",(0,r.yg)("strong",{parentName:"li"},"Application name"),". It will be used to generate the application URL."),(0,r.yg)("li",{parentName:"ol"},"Provide a ",(0,r.yg)("strong",{parentName:"li"},"Password"),", you will need to hash the password first for extra security, use this quick docker command to do it: ",(0,r.yg)("inlineCode",{parentName:"li"},"docker run filebrowser/filebrowser hash mypassword")),(0,r.yg)("li",{parentName:"ol"},"The ",(0,r.yg)("strong",{parentName:"li"},"Storage name")," of the Persistent Volume Claim (PVC) that will be exposed by the filebrowser."),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("strong",{parentName:"li"},"Storage subpath")," in the the Persistent Volume Claim that will be exposed by the filebrowser. Let it empty to use the Root folder of the persistent volume.")),(0,r.yg)("p",null,"You can find the Storage name if you Go to the deployments page > Storage panel."),(0,r.yg)("h3",{id:"creating-or-connecting-an-existing-persistent-storage"},"Creating or Connecting an Existing Persistent Storage"),(0,r.yg)("p",null,"Find more details about the how to ",(0,r.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/openshift-storage/#create-the-persistent-storage"},"create persistent storage")),(0,r.yg)("img",{src:"/img/screenshot_pvc_storage.png",alt:"Create Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("img",{src:"/img/screenshot_pvc_storage_create.png",alt:"Create Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("admonition",{type:"info"},(0,r.yg)("p",{parentName:"admonition"},"The DSRI using the ",(0,r.yg)("a",{parentName:"p",href:"https://www.openshift.com/products/container-storage/"},(0,r.yg)("strong",{parentName:"a"},"Openshift Container Stroage"))," (",(0,r.yg)("inlineCode",{parentName:"p"},"OCS"),") which is based on ",(0,r.yg)("a",{parentName:"p",href:"https://ceph.io/ceph-storage/"},(0,r.yg)("strong",{parentName:"a"},"CEPH"))," offers ",(0,r.yg)("inlineCode",{parentName:"p"},"ReadWriteOnce")," and ",(0,r.yg)("inlineCode",{parentName:"p"},"ReadWriteMany")," access mode. "),(0,r.yg)("ul",{parentName:"admonition"},(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"ReadWriteOnce")," (",(0,r.yg)("a",{parentName:"li",href:"https://docs.openshift.com/container-platform/4.6/storage/understanding-persistent-storage.html"},(0,r.yg)("strong",{parentName:"a"},"RWO")),") volumes cannot be mounted on multiple nodes. Use the ",(0,r.yg)("inlineCode",{parentName:"li"},"ReadWriteMany")," (",(0,r.yg)("a",{parentName:"li",href:"https://docs.openshift.com/container-platform/4.6/storage/understanding-persistent-storage.html"},(0,r.yg)("strong",{parentName:"a"},"RWX")),") access mode when possible. If a node fails, the system does not allow the attached RWO volume to be mounted on a new node because it is already assigned to the failed node. If you encounter a multi-attach error message as a result, force delete the pod on a shut down or crashed node. "))),(0,r.yg)("p",null,"Find more details about the how to ",(0,r.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/openshift-storage/#connect-the-existing-persistent-storage"},"Connect the Existing persistent storage")),(0,r.yg)("img",{src:"/img/screenshot_existing_storage.png",alt:"Add Existing Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("img",{src:"/img/screenshot_add_storage.png",alt:"Add Existing Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("admonition",{type:"info"},(0,r.yg)("p",{parentName:"admonition"},"You can try above method if you want to connect ",(0,r.yg)("strong",{parentName:"p"},"more applications to the same storage"))),(0,r.yg)("p",null,"This deployment require to have root user enabled on your project. Contact the ",(0,r.yg)("a",{parentName:"p",href:"mailto:dsri-support-l@maastrichtuniversity.nl"},"DSRI support team")," or create a ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues"},"new issues")," to request root access or to create persistent volume for your project if you don't have them ."),(0,r.yg)("admonition",{title:"Credentials",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Default credentials will be username ",(0,r.yg)("inlineCode",{parentName:"p"},"admin")," and password ",(0,r.yg)("inlineCode",{parentName:"p"},"admin"))),(0,r.yg)("admonition",{title:"Change password",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"Please ",(0,r.yg)("strong",{parentName:"p"},"change the password in the Filebrowser Web UI")," once it has been created.")))}c.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[3807],{5680:(e,t,a)=>{a.d(t,{xA:()=>g,yg:()=>c});var n=a(6540);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function r(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var l=n.createContext({}),p=function(e){var t=n.useContext(l),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},g=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},u=n.forwardRef((function(e,t){var a=e.components,o=e.mdxType,r=e.originalType,l=e.parentName,g=s(e,["components","mdxType","originalType","parentName"]),u=p(a),c=o,d=u["".concat(l,".").concat(c)]||u[c]||m[c]||r;return a?n.createElement(d,i(i({ref:t},g),{},{components:a})):n.createElement(d,i({ref:t},g))}));function c(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var r=a.length,i=new Array(r);i[0]=u;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:o,i[1]=s;for(var p=2;p{a.r(t),a.d(t,{assets:()=>g,contentTitle:()=>l,default:()=>c,frontMatter:()=>s,metadata:()=>p,toc:()=>m});var n=a(9668),o=a(1367),r=(a(6540),a(5680)),i=["components"],s={id:"catalog-utilities",title:"Utilities"},l=void 0,p={unversionedId:"catalog-utilities",id:"catalog-utilities",title:"Utilities",description:"Feel free to propose new services using pull requests, or to request them by creating new issues.",source:"@site/docs/catalog-utilities.md",sourceDirName:".",slug:"/catalog-utilities",permalink:"/docs/catalog-utilities",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/catalog-utilities.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"catalog-utilities",title:"Utilities"},sidebar:"docs",previous:{title:"Imaging softwares",permalink:"/docs/catalog-imaging"},next:{title:"Access UM servers",permalink:"/docs/access-um-servers"}},g={},m=[{value:"Ubuntu",id:"ubuntu",level:2},{value:"With the terminal",id:"with-the-terminal",level:3},{value:"With a web UI",id:"with-a-web-ui",level:3},{value:"File browser",id:"file-browser",level:2},{value:"Creating or Connecting an Existing Persistent Storage",id:"creating-or-connecting-an-existing-persistent-storage",level:3}],u={toc:m};function c(e){var t=e.components,a=(0,o.A)(e,i);return(0,r.yg)("wrapper",(0,n.A)({},u,a,{components:t,mdxType:"MDXLayout"}),(0,r.yg)("p",null,"Feel free to propose new services using ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/pulls"},"pull requests"),", or to request them by creating ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues"},"new issues"),"."),(0,r.yg)("h2",{id:"ubuntu"},"Ubuntu"),(0,r.yg)("h3",{id:"with-the-terminal"},"With the terminal"),(0,r.yg)("p",null,"Start Ubuntu with the ",(0,r.yg)("inlineCode",{parentName:"p"},"root")," user which has ",(0,r.yg)("inlineCode",{parentName:"p"},"sudo")," permissions to install anything."),(0,r.yg)("p",null,"You can start the application using the ",(0,r.yg)("strong",{parentName:"p"},"Ubuntu")," template in the ",(0,r.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,r.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,r.yg)("admonition",{title:"Login Credentials",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Username: ",(0,r.yg)("strong",{parentName:"p"},"root")),(0,r.yg)("p",{parentName:"admonition"},"Password: ",(0,r.yg)("strong",{parentName:"p"},"Template creation password"))),(0,r.yg)("p",null,"This template uses the Ubuntu image hosted on DockerHub, see its documentation at ",(0,r.yg)("a",{parentName:"p",href:"https://hub.docker.com/r/ubuntu"},"https://hub.docker.com/r/ubuntu")),(0,r.yg)("admonition",{title:"Persistent data folder",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"\ud83d\udcc2 Use the ",(0,r.yg)("inlineCode",{parentName:"p"},"/root")," folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the ",(0,r.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,r.yg)("strong",{parentName:"p"},"Storage")," > ",(0,r.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),".")),(0,r.yg)("p",null,"We enabled the port ",(0,r.yg)("inlineCode",{parentName:"p"},"8080")," in the Ubuntu container if you need to deploy applications."),(0,r.yg)("p",null,"To quickly access it from the terminal you can use the ",(0,r.yg)("strong",{parentName:"p"},"Terminal")," tab in the pod page, or via your local terminal:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Get the Ubuntu pod ID:"),(0,r.yg)("pre",{parentName:"li"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc get pods\n"))),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Connect to it:"),(0,r.yg)("pre",{parentName:"li"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"oc rsh POD_ID\n"))),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("p",{parentName:"li"},"Enable Bash in the Ubuntu container (if it starts with the Shell)"),(0,r.yg)("pre",{parentName:"li"},(0,r.yg)("code",{parentName:"pre",className:"language-bash"},"bash\n")))),(0,r.yg)("img",{src:"/img/screenshot-deploy-ubuntu.png",alt:"Deploy Ubuntu",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("h3",{id:"with-a-web-ui"},"With a web UI"),(0,r.yg)("p",null,"Start Ubuntu with a web UI accessible via a URL (using VNC). You will be the ",(0,r.yg)("inlineCode",{parentName:"p"},"root")," user which has elevated permissions to install anything via ",(0,r.yg)("inlineCode",{parentName:"p"},"apt install "),". Before you install a package run ",(0,r.yg)("inlineCode",{parentName:"p"},"apt update"),". This also solves ",(0,r.yg)("inlineCode",{parentName:"p"},"E: unable to locate package")," and ",(0,r.yg)("inlineCode",{parentName:"p"},"E: no installation candidate")," errors."),(0,r.yg)("p",null,"You can start the application using the ",(0,r.yg)("strong",{parentName:"p"},"Ubuntu with web UI")," template in the ",(0,r.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,r.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,r.yg)("admonition",{title:"Login Credentials",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Username: ",(0,r.yg)("strong",{parentName:"p"},"root")),(0,r.yg)("p",{parentName:"admonition"},"Password: ",(0,r.yg)("strong",{parentName:"p"},"Template creation password"))),(0,r.yg)("p",null,"This template uses the Docker image defined at ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/fcwu/docker-ubuntu-vnc-desktop"},"https://github.com/fcwu/docker-ubuntu-vnc-desktop")),(0,r.yg)("admonition",{title:"Less stable than the official image",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"This image might be less stable than the original Ubuntu image. Let us know on Slack if you have any problem!")),(0,r.yg)("h2",{id:"file-browser"},"File browser"),(0,r.yg)("p",null,"Deploy a file browser on your persistent volume. This will provide a web UI to upload and download data to your DSRI persistent volume in case you need it (JupyterLab, RStudio and VisualStudio Code server already include a file browser)"),(0,r.yg)("p",null,"You can start a container using the ",(0,r.yg)("strong",{parentName:"p"},"File Browser for existing storage")," template in the ",(0,r.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,r.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,r.yg)("img",{src:"/img/screenshot-deploy-filebrowser.png",alt:"Deploy File browser",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("p",null,"You can only deploy file browser on an existing Persistent Volume Claim, this enables you to add a web UI to access this storage."),(0,r.yg)("p",null,"The following parameters can be provided:"),(0,r.yg)("ol",null,(0,r.yg)("li",{parentName:"ol"},"Provide a unique ",(0,r.yg)("strong",{parentName:"li"},"Application name"),". It will be used to generate the application URL."),(0,r.yg)("li",{parentName:"ol"},"Provide a ",(0,r.yg)("strong",{parentName:"li"},"Password"),", you will need to hash the password first for extra security, use this quick docker command to do it: ",(0,r.yg)("inlineCode",{parentName:"li"},"docker run filebrowser/filebrowser hash mypassword")),(0,r.yg)("li",{parentName:"ol"},"The ",(0,r.yg)("strong",{parentName:"li"},"Storage name")," of the Persistent Volume Claim (PVC) that will be exposed by the filebrowser."),(0,r.yg)("li",{parentName:"ol"},(0,r.yg)("strong",{parentName:"li"},"Storage subpath")," in the the Persistent Volume Claim that will be exposed by the filebrowser. Let it empty to use the Root folder of the persistent volume.")),(0,r.yg)("p",null,"You can find the Storage name if you Go to the deployments page > Storage panel."),(0,r.yg)("h3",{id:"creating-or-connecting-an-existing-persistent-storage"},"Creating or Connecting an Existing Persistent Storage"),(0,r.yg)("p",null,"Find more details about the how to ",(0,r.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/openshift-storage/#create-the-persistent-storage"},"create persistent storage")),(0,r.yg)("img",{src:"/img/screenshot_pvc_storage.png",alt:"Create Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("img",{src:"/img/screenshot_pvc_storage_create.png",alt:"Create Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("admonition",{type:"info"},(0,r.yg)("p",{parentName:"admonition"},"The DSRI using the ",(0,r.yg)("a",{parentName:"p",href:"https://www.openshift.com/products/container-storage/"},(0,r.yg)("strong",{parentName:"a"},"Openshift Container Stroage"))," (",(0,r.yg)("inlineCode",{parentName:"p"},"OCS"),") which is based on ",(0,r.yg)("a",{parentName:"p",href:"https://ceph.io/ceph-storage/"},(0,r.yg)("strong",{parentName:"a"},"CEPH"))," offers ",(0,r.yg)("inlineCode",{parentName:"p"},"ReadWriteOnce")," and ",(0,r.yg)("inlineCode",{parentName:"p"},"ReadWriteMany")," access mode. "),(0,r.yg)("ul",{parentName:"admonition"},(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"ReadWriteOnce")," (",(0,r.yg)("a",{parentName:"li",href:"https://docs.openshift.com/container-platform/4.6/storage/understanding-persistent-storage.html"},(0,r.yg)("strong",{parentName:"a"},"RWO")),") volumes cannot be mounted on multiple nodes. Use the ",(0,r.yg)("inlineCode",{parentName:"li"},"ReadWriteMany")," (",(0,r.yg)("a",{parentName:"li",href:"https://docs.openshift.com/container-platform/4.6/storage/understanding-persistent-storage.html"},(0,r.yg)("strong",{parentName:"a"},"RWX")),") access mode when possible. If a node fails, the system does not allow the attached RWO volume to be mounted on a new node because it is already assigned to the failed node. If you encounter a multi-attach error message as a result, force delete the pod on a shut down or crashed node. "))),(0,r.yg)("p",null,"Find more details about the how to ",(0,r.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/openshift-storage/#connect-the-existing-persistent-storage"},"Connect the Existing persistent storage")),(0,r.yg)("img",{src:"/img/screenshot_existing_storage.png",alt:"Add Existing Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("img",{src:"/img/screenshot_add_storage.png",alt:"Add Existing Persistent Storage",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,r.yg)("admonition",{type:"info"},(0,r.yg)("p",{parentName:"admonition"},"You can try above method if you want to connect ",(0,r.yg)("strong",{parentName:"p"},"more applications to the same storage"))),(0,r.yg)("p",null,"This deployment require to have root user enabled on your project. Contact the ",(0,r.yg)("a",{parentName:"p",href:"mailto:dsri-support-l@maastrichtuniversity.nl"},"DSRI support team")," or create a ",(0,r.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-documentation/issues"},"new issues")," to request root access or to create persistent volume for your project if you don't have them ."),(0,r.yg)("admonition",{title:"Credentials",type:"info"},(0,r.yg)("p",{parentName:"admonition"},"Default credentials will be username ",(0,r.yg)("inlineCode",{parentName:"p"},"admin")," and password ",(0,r.yg)("inlineCode",{parentName:"p"},"admin"))),(0,r.yg)("admonition",{title:"Change password",type:"caution"},(0,r.yg)("p",{parentName:"admonition"},"Please ",(0,r.yg)("strong",{parentName:"p"},"change the password in the Filebrowser Web UI")," once it has been created.")))}c.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/e9bae93a.e2db0242.js b/assets/js/e9bae93a.48910b99.js similarity index 98% rename from assets/js/e9bae93a.e2db0242.js rename to assets/js/e9bae93a.48910b99.js index 365e1e3d2..7a70b3c12 100644 --- a/assets/js/e9bae93a.e2db0242.js +++ b/assets/js/e9bae93a.48910b99.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7968],{5680:(e,t,r)=>{r.d(t,{xA:()=>d,yg:()=>m});var n=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function i(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function o(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var l=n.createContext({}),c=function(e){var t=n.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):o(o({},t),e)),r},d=function(e){var t=c(e.components);return n.createElement(l.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},p=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,i=e.originalType,l=e.parentName,d=s(e,["components","mdxType","originalType","parentName"]),p=c(r),m=a,y=p["".concat(l,".").concat(m)]||p[m]||u[m]||i;return r?n.createElement(y,o(o({ref:t},d),{},{components:r})):n.createElement(y,o({ref:t},d))}));function m(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var i=r.length,o=new Array(i);o[0]=p;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:a,o[1]=s;for(var c=2;c{r.r(t),r.d(t,{assets:()=>d,contentTitle:()=>l,default:()=>m,frontMatter:()=>s,metadata:()=>c,toc:()=>u});var n=r(9668),a=r(1367),i=(r(6540),r(5680)),o=["components"],s={id:"sensible-data",title:"Working with sensible data"},l=void 0,c={unversionedId:"sensible-data",id:"sensible-data",title:"Working with sensible data",description:"Reminder: DSRI restrictions",source:"@site/docs/sensible-data.md",sourceDirName:".",slug:"/sensible-data",permalink:"/docs/sensible-data",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/sensible-data.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"sensible-data",title:"Working with sensible data"}},d={},u=[{value:"Reminder: DSRI restrictions",id:"reminder-dsri-restrictions",level:3},{value:"Disclaimer",id:"disclaimer",level:2}],p={toc:u};function m(e){var t=e.components,r=(0,a.A)(e,o);return(0,i.yg)("wrapper",(0,n.A)({},p,r,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("h3",{id:"reminder-dsri-restrictions"},"Reminder: DSRI restrictions"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},"Since DSRI can only be accessed when on the physical UM network or using the ",(0,i.yg)("a",{parentName:"li",href:"https://vpn.maastrichtuniversity.nl/"},"UM VPN"),", deployed services will not be available on the public Internet \ud83d\udd12"),(0,i.yg)("li",{parentName:"ul"},"All activities must be legal in basis. You must closely examine and abide by the terms and conditions of any data, software, or web service that you use as part of your work \ud83d\udcdc")),(0,i.yg)("h2",{id:"disclaimer"},"Disclaimer"),(0,i.yg)("p",null,"The DSRI administration disclaims all responsibility in the misuse of sensible data processed on the DSRI "),(0,i.yg)("p",null,"We can guarantee you that only you, and 4 administrators are able to access the data (you might need to see with the data owner if that is not a problem)"),(0,i.yg)("p",null,"Feel to ask us more details"))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7968],{5680:(e,t,r)=>{r.d(t,{xA:()=>d,yg:()=>m});var n=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function i(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function o(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var l=n.createContext({}),c=function(e){var t=n.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):o(o({},t),e)),r},d=function(e){var t=c(e.components);return n.createElement(l.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},p=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,i=e.originalType,l=e.parentName,d=s(e,["components","mdxType","originalType","parentName"]),p=c(r),m=a,y=p["".concat(l,".").concat(m)]||p[m]||u[m]||i;return r?n.createElement(y,o(o({ref:t},d),{},{components:r})):n.createElement(y,o({ref:t},d))}));function m(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var i=r.length,o=new Array(i);o[0]=p;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:a,o[1]=s;for(var c=2;c{r.r(t),r.d(t,{assets:()=>d,contentTitle:()=>l,default:()=>m,frontMatter:()=>s,metadata:()=>c,toc:()=>u});var n=r(9668),a=r(1367),i=(r(6540),r(5680)),o=["components"],s={id:"sensible-data",title:"Working with sensible data"},l=void 0,c={unversionedId:"sensible-data",id:"sensible-data",title:"Working with sensible data",description:"Reminder: DSRI restrictions",source:"@site/docs/sensible-data.md",sourceDirName:".",slug:"/sensible-data",permalink:"/docs/sensible-data",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/sensible-data.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"sensible-data",title:"Working with sensible data"}},d={},u=[{value:"Reminder: DSRI restrictions",id:"reminder-dsri-restrictions",level:3},{value:"Disclaimer",id:"disclaimer",level:2}],p={toc:u};function m(e){var t=e.components,r=(0,a.A)(e,o);return(0,i.yg)("wrapper",(0,n.A)({},p,r,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("h3",{id:"reminder-dsri-restrictions"},"Reminder: DSRI restrictions"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},"Since DSRI can only be accessed when on the physical UM network or using the ",(0,i.yg)("a",{parentName:"li",href:"https://vpn.maastrichtuniversity.nl/"},"UM VPN"),", deployed services will not be available on the public Internet \ud83d\udd12"),(0,i.yg)("li",{parentName:"ul"},"All activities must be legal in basis. You must closely examine and abide by the terms and conditions of any data, software, or web service that you use as part of your work \ud83d\udcdc")),(0,i.yg)("h2",{id:"disclaimer"},"Disclaimer"),(0,i.yg)("p",null,"The DSRI administration disclaims all responsibility in the misuse of sensible data processed on the DSRI "),(0,i.yg)("p",null,"We can guarantee you that only you, and 4 administrators are able to access the data (you might need to see with the data owner if that is not a problem)"),(0,i.yg)("p",null,"Feel to ask us more details"))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/ecdc8e44.a365e563.js b/assets/js/ecdc8e44.6e432d61.js similarity index 99% rename from assets/js/ecdc8e44.a365e563.js rename to assets/js/ecdc8e44.6e432d61.js index 8a7f6fb00..094e5691d 100644 --- a/assets/js/ecdc8e44.a365e563.js +++ b/assets/js/ecdc8e44.6e432d61.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7953],{5680:(e,t,a)=>{a.d(t,{xA:()=>c,yg:()=>g});var r=a(6540);function n(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,r)}return a}function p(e){for(var t=1;t=0||(n[a]=e[a]);return n}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(n[a]=e[a])}return n}var s=r.createContext({}),i=function(e){var t=r.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):p(p({},t),e)),a},c=function(e){var t=i(e.components);return r.createElement(s.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},y=r.forwardRef((function(e,t){var a=e.components,n=e.mdxType,o=e.originalType,s=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),y=i(a),g=n,d=y["".concat(s,".").concat(g)]||y[g]||u[g]||o;return a?r.createElement(d,p(p({ref:t},c),{},{components:a})):r.createElement(d,p({ref:t},c))}));function g(e,t){var a=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var o=a.length,p=new Array(o);p[0]=y;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:n,p[1]=l;for(var i=2;i{a.r(t),a.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>g,frontMatter:()=>l,metadata:()=>i,toc:()=>u});var r=a(9668),n=a(1367),o=(a(6540),a(5680)),p=["components"],l={id:"deploy-spark",title:"Spark cluster"},s=void 0,i={unversionedId:"deploy-spark",id:"deploy-spark",title:"Spark cluster",description:"To be able to deploy Spark you will need to ask the DSRI admins to enable the Spark Operator in your project. It will be done quickly, once enabled you will be able to start a Spark cluster in a few clicks.",source:"@site/docs/deploy-spark.md",sourceDirName:".",slug:"/deploy-spark",permalink:"/docs/deploy-spark",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-spark.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-spark",title:"Spark cluster"},sidebar:"docs",previous:{title:"Deploy Dask Cluster",permalink:"/docs/dask-cluster"},next:{title:"Run MPI jobs",permalink:"/docs/mpi-jobs"}},c={},u=[{value:"Deploy a Spark cluster",id:"deploy-a-spark-cluster",level:2},{value:"Deploy the cluster from the catalog",id:"deploy-the-cluster-from-the-catalog",level:3},{value:"Create a route to the Spark dashboard",id:"create-a-route-to-the-spark-dashboard",level:3},{value:"Run on Spark",id:"run-on-spark",level:2},{value:"Using PySpark",id:"using-pyspark",level:3},{value:"RDF analytics with SANSA and Zeppelin notebooks",id:"rdf-analytics-with-sansa-and-zeppelin-notebooks",level:3},{value:"Connect Spark to the persistent storage",id:"connect-spark-to-the-persistent-storage",level:2},{value:"Delete a running Spark cluster",id:"delete-a-running-spark-cluster",level:2}],y={toc:u};function g(e){var t=e.components,a=(0,n.A)(e,p);return(0,o.yg)("wrapper",(0,r.A)({},y,a,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("admonition",{title:"Request access to the Spark Operator",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"To be able to deploy Spark you will need to ",(0,o.yg)("a",{parentName:"p",href:"/help"},"ask the DSRI admins")," to enable the Spark Operator in your project. It will be done quickly, once enabled you will be able to start a Spark cluster in a few clicks.")),(0,o.yg)("h2",{id:"deploy-a-spark-cluster"},"Deploy a Spark cluster"),(0,o.yg)("p",null,"Once the DSRI admins have enabled the Spark Operator your project, you should found a ",(0,o.yg)("strong",{parentName:"p"},"Spark Cluster")," entry in the Catalog (in the ",(0,o.yg)("strong",{parentName:"p"},"Operator Backed")," category)"),(0,o.yg)("h3",{id:"deploy-the-cluster-from-the-catalog"},"Deploy the cluster from the catalog"),(0,o.yg)("img",{src:"/img/screenshot-spark-operator1.png",alt:"Apache Spark in the Catalog",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"Click on the ",(0,o.yg)("strong",{parentName:"p"},"Spark Cluster")," entry to deploy a Spark cluster."),(0,o.yg)("p",null,"You will be presented a form where you can provide the number of Spark workers in your cluster. "),(0,o.yg)("p",null,"Additionally you can provide a label which can be helpful later to manage or delete the cluster, use the name of your application and the label ",(0,o.yg)("inlineCode",{parentName:"p"},"app"),", e.g.: ",(0,o.yg)("inlineCode",{parentName:"p"},"app=my-spark-cluster")),(0,o.yg)("img",{src:"/img/screenshot-spark-operator2.png",alt:"Deploy a Apache Spark cluster",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("admonition",{title:"Change ",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"The number of Spark workers can be easily updated later in the Spark deployment YAML file.")),(0,o.yg)("h3",{id:"create-a-route-to-the-spark-dashboard"},"Create a route to the Spark dashboard"),(0,o.yg)("p",null,"Once the cluster has been started you can create a route to access the Spark web UI:"),(0,o.yg)("p",null,"Go to ",(0,o.yg)("strong",{parentName:"p"},"Search")," > Click on ",(0,o.yg)("strong",{parentName:"p"},"Resources")," and search for ",(0,o.yg)("strong",{parentName:"p"},"Route")," > Click on ",(0,o.yg)("strong",{parentName:"p"},"Route")),(0,o.yg)("p",null,"You should now see the routes deployed in your project. Click on the button ",(0,o.yg)("strong",{parentName:"p"},"Create Route")),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Give a short meaningful name to your route, e.g. ",(0,o.yg)("inlineCode",{parentName:"li"},"my-spark-ui")),(0,o.yg)("li",{parentName:"ul"},"Keep Hostname and Path as it is"),(0,o.yg)("li",{parentName:"ul"},"Select the ",(0,o.yg)("strong",{parentName:"li"},"Service")," corresponding your Spark cluster suffixed with ",(0,o.yg)("inlineCode",{parentName:"li"},"-ui"),", e.g. ",(0,o.yg)("inlineCode",{parentName:"li"},"my-spark-cluster-ui")),(0,o.yg)("li",{parentName:"ul"},"Select the ",(0,o.yg)("strong",{parentName:"li"},"Target Port")," of the route, it should be 8080")),(0,o.yg)("p",null,"You can now access the Spark web UI at the generated URL to see which jobs are running and the nodes in your cluster."),(0,o.yg)("h2",{id:"run-on-spark"},"Run on Spark"),(0,o.yg)("p",null,"You can now start a spark-enabled JupyterLab, or any other spark-enabled applications, to use the Spark cluster deployed."),(0,o.yg)("h3",{id:"using-pyspark"},"Using PySpark"),(0,o.yg)("p",null,"The easiest is to use a Spark-enabled JupyterLab image, such as ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/jupyter/docker-stacks/tree/master/pyspark-notebook"},"jupyter/pyspark-notebook")),(0,o.yg)("p",null,"But you can also use any image as long as you download the jar file, install all requirements, such as ",(0,o.yg)("inlineCode",{parentName:"p"},"pyspark"),", and set the right environment variable, such as ",(0,o.yg)("inlineCode",{parentName:"p"},"SPARK_HOME")),(0,o.yg)("p",null,"Connect to a Spark cluster deployed in the same project, replace ",(0,o.yg)("inlineCode",{parentName:"p"},"spark-cluster")," by your Spark cluster name:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"from pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SparkSession\n# Stop existing Spark Context\nspark = SparkSession.builder.master(\"spark://spark-cluster:7077\").getOrCreate()\nspark.sparkContext.stop()\n# Connect to the Spark cluster\nconf = SparkConf().setAppName('sansa').setMaster('spark://spark-cluster:7077') \nsc = SparkContext(conf=conf)\n\n# Run basic Spark test\nx = ['spark', 'rdd', 'example', 'sample', 'example'] \ny = sc.parallelize(x)\ny.collect()\n")),(0,o.yg)("h3",{id:"rdf-analytics-with-sansa-and-zeppelin-notebooks"},"RDF analytics with SANSA and Zeppelin notebooks"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"http://sansa-stack.net"},"SANSA")," is a big data engine for scalable processing of large-scale RDF data. SANSA uses Spark, or Flink, which offer fault-tolerant, highly available and scalable approaches to efficiently process massive sized datasets. SANSA provides the facilities for Semantic data representation, Querying, Inference, and Analytics."),(0,o.yg)("p",null,"Use the ",(0,o.yg)("strong",{parentName:"p"},"Zeppelin notebook for Spark")," template in the catalog to start a Spark-enabled Zeppelin notebook. You can find more information on the Zeppelin image at ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/rimolive/zeppelin-openshift"},"https://github.com/rimolive/zeppelin-openshift")),(0,o.yg)("p",null,"Connect and test Spark in a Zeppelin notebook, replace ",(0,o.yg)("inlineCode",{parentName:"p"},"spark-cluster")," by your Spark cluster name:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"%pyspark\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SparkSession\n# Stop existing Spark Context\nspark = SparkSession.builder.master(\"spark://spark-cluster:7077\").getOrCreate()\nspark.sparkContext.stop()\n# Connect to the Spark cluster\nconf = SparkConf().setAppName('sansa').setMaster('spark://spark-cluster:7077') \nsc = SparkContext(conf=conf)\n\n# Run basic Spark test\nx = [1, 2, 3, 4, 5] \ny = sc.parallelize(x)\ny.collect()\n")),(0,o.yg)("p",null,"You should see the job running in the Spark web UI, kill the job with the ",(0,o.yg)("strong",{parentName:"p"},"kill")," button in the Spark dashboard."),(0,o.yg)("p",null,"You can now start to run your workload on the Spark cluster"),(0,o.yg)("admonition",{title:"Reset a Zeppelin notebook",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"Click on the cranked wheel in the top right of the note: ",(0,o.yg)("strong",{parentName:"p"},"Interpreter binding"),", and reset the interpreter")),(0,o.yg)("p",null,"Use the official ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/SANSA-Stack/SANSA-Notebooks/tree/stack-merge/sansa-notebooks"},"SANSA notebooks examples")),(0,o.yg)("p",null,"See more examples:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/rimolive/zeppelin-openshift"},"https://github.com/rimolive/zeppelin-openshift"))),(0,o.yg)("h2",{id:"connect-spark-to-the-persistent-storage"},"Connect Spark to the persistent storage"),(0,o.yg)("p",null,"Instructions available at ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/rimolive/ceph-spark-integration"},"https://github.com/rimolive/ceph-spark-integration")),(0,o.yg)("p",null,"Requirements:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"pip install boto\n")),(0,o.yg)("p",null,"Check the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/rimolive/ceph-spark-integration/blob/master/notebooks/ceph-example.ipynb"},"example notebook for Ceph storage")),(0,o.yg)("h2",{id:"delete-a-running-spark-cluster"},"Delete a running Spark cluster"),(0,o.yg)("p",null,"Get all objects part of the Spark cluster, change ",(0,o.yg)("inlineCode",{parentName:"p"},"app=spark-cluster")," to match your Spark cluster name:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc get all,secret,configmaps --selector app=spark-cluster\n")),(0,o.yg)("p",null,"Then delete the Operator deployment from the OpenShift web UI overview."))}g.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[7953],{5680:(e,t,a)=>{a.d(t,{xA:()=>c,yg:()=>g});var r=a(6540);function n(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,r)}return a}function p(e){for(var t=1;t=0||(n[a]=e[a]);return n}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(n[a]=e[a])}return n}var s=r.createContext({}),i=function(e){var t=r.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):p(p({},t),e)),a},c=function(e){var t=i(e.components);return r.createElement(s.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},y=r.forwardRef((function(e,t){var a=e.components,n=e.mdxType,o=e.originalType,s=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),y=i(a),g=n,d=y["".concat(s,".").concat(g)]||y[g]||u[g]||o;return a?r.createElement(d,p(p({ref:t},c),{},{components:a})):r.createElement(d,p({ref:t},c))}));function g(e,t){var a=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var o=a.length,p=new Array(o);p[0]=y;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:n,p[1]=l;for(var i=2;i{a.r(t),a.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>g,frontMatter:()=>l,metadata:()=>i,toc:()=>u});var r=a(9668),n=a(1367),o=(a(6540),a(5680)),p=["components"],l={id:"deploy-spark",title:"Spark cluster"},s=void 0,i={unversionedId:"deploy-spark",id:"deploy-spark",title:"Spark cluster",description:"To be able to deploy Spark you will need to ask the DSRI admins to enable the Spark Operator in your project. It will be done quickly, once enabled you will be able to start a Spark cluster in a few clicks.",source:"@site/docs/deploy-spark.md",sourceDirName:".",slug:"/deploy-spark",permalink:"/docs/deploy-spark",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-spark.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-spark",title:"Spark cluster"},sidebar:"docs",previous:{title:"Deploy Dask Cluster",permalink:"/docs/dask-cluster"},next:{title:"Run MPI jobs",permalink:"/docs/mpi-jobs"}},c={},u=[{value:"Deploy a Spark cluster",id:"deploy-a-spark-cluster",level:2},{value:"Deploy the cluster from the catalog",id:"deploy-the-cluster-from-the-catalog",level:3},{value:"Create a route to the Spark dashboard",id:"create-a-route-to-the-spark-dashboard",level:3},{value:"Run on Spark",id:"run-on-spark",level:2},{value:"Using PySpark",id:"using-pyspark",level:3},{value:"RDF analytics with SANSA and Zeppelin notebooks",id:"rdf-analytics-with-sansa-and-zeppelin-notebooks",level:3},{value:"Connect Spark to the persistent storage",id:"connect-spark-to-the-persistent-storage",level:2},{value:"Delete a running Spark cluster",id:"delete-a-running-spark-cluster",level:2}],y={toc:u};function g(e){var t=e.components,a=(0,n.A)(e,p);return(0,o.yg)("wrapper",(0,r.A)({},y,a,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("admonition",{title:"Request access to the Spark Operator",type:"warning"},(0,o.yg)("p",{parentName:"admonition"},"To be able to deploy Spark you will need to ",(0,o.yg)("a",{parentName:"p",href:"/help"},"ask the DSRI admins")," to enable the Spark Operator in your project. It will be done quickly, once enabled you will be able to start a Spark cluster in a few clicks.")),(0,o.yg)("h2",{id:"deploy-a-spark-cluster"},"Deploy a Spark cluster"),(0,o.yg)("p",null,"Once the DSRI admins have enabled the Spark Operator your project, you should found a ",(0,o.yg)("strong",{parentName:"p"},"Spark Cluster")," entry in the Catalog (in the ",(0,o.yg)("strong",{parentName:"p"},"Operator Backed")," category)"),(0,o.yg)("h3",{id:"deploy-the-cluster-from-the-catalog"},"Deploy the cluster from the catalog"),(0,o.yg)("img",{src:"/img/screenshot-spark-operator1.png",alt:"Apache Spark in the Catalog",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("p",null,"Click on the ",(0,o.yg)("strong",{parentName:"p"},"Spark Cluster")," entry to deploy a Spark cluster."),(0,o.yg)("p",null,"You will be presented a form where you can provide the number of Spark workers in your cluster. "),(0,o.yg)("p",null,"Additionally you can provide a label which can be helpful later to manage or delete the cluster, use the name of your application and the label ",(0,o.yg)("inlineCode",{parentName:"p"},"app"),", e.g.: ",(0,o.yg)("inlineCode",{parentName:"p"},"app=my-spark-cluster")),(0,o.yg)("img",{src:"/img/screenshot-spark-operator2.png",alt:"Deploy a Apache Spark cluster",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("admonition",{title:"Change ",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"The number of Spark workers can be easily updated later in the Spark deployment YAML file.")),(0,o.yg)("h3",{id:"create-a-route-to-the-spark-dashboard"},"Create a route to the Spark dashboard"),(0,o.yg)("p",null,"Once the cluster has been started you can create a route to access the Spark web UI:"),(0,o.yg)("p",null,"Go to ",(0,o.yg)("strong",{parentName:"p"},"Search")," > Click on ",(0,o.yg)("strong",{parentName:"p"},"Resources")," and search for ",(0,o.yg)("strong",{parentName:"p"},"Route")," > Click on ",(0,o.yg)("strong",{parentName:"p"},"Route")),(0,o.yg)("p",null,"You should now see the routes deployed in your project. Click on the button ",(0,o.yg)("strong",{parentName:"p"},"Create Route")),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},"Give a short meaningful name to your route, e.g. ",(0,o.yg)("inlineCode",{parentName:"li"},"my-spark-ui")),(0,o.yg)("li",{parentName:"ul"},"Keep Hostname and Path as it is"),(0,o.yg)("li",{parentName:"ul"},"Select the ",(0,o.yg)("strong",{parentName:"li"},"Service")," corresponding your Spark cluster suffixed with ",(0,o.yg)("inlineCode",{parentName:"li"},"-ui"),", e.g. ",(0,o.yg)("inlineCode",{parentName:"li"},"my-spark-cluster-ui")),(0,o.yg)("li",{parentName:"ul"},"Select the ",(0,o.yg)("strong",{parentName:"li"},"Target Port")," of the route, it should be 8080")),(0,o.yg)("p",null,"You can now access the Spark web UI at the generated URL to see which jobs are running and the nodes in your cluster."),(0,o.yg)("h2",{id:"run-on-spark"},"Run on Spark"),(0,o.yg)("p",null,"You can now start a spark-enabled JupyterLab, or any other spark-enabled applications, to use the Spark cluster deployed."),(0,o.yg)("h3",{id:"using-pyspark"},"Using PySpark"),(0,o.yg)("p",null,"The easiest is to use a Spark-enabled JupyterLab image, such as ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/jupyter/docker-stacks/tree/master/pyspark-notebook"},"jupyter/pyspark-notebook")),(0,o.yg)("p",null,"But you can also use any image as long as you download the jar file, install all requirements, such as ",(0,o.yg)("inlineCode",{parentName:"p"},"pyspark"),", and set the right environment variable, such as ",(0,o.yg)("inlineCode",{parentName:"p"},"SPARK_HOME")),(0,o.yg)("p",null,"Connect to a Spark cluster deployed in the same project, replace ",(0,o.yg)("inlineCode",{parentName:"p"},"spark-cluster")," by your Spark cluster name:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"from pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SparkSession\n# Stop existing Spark Context\nspark = SparkSession.builder.master(\"spark://spark-cluster:7077\").getOrCreate()\nspark.sparkContext.stop()\n# Connect to the Spark cluster\nconf = SparkConf().setAppName('sansa').setMaster('spark://spark-cluster:7077') \nsc = SparkContext(conf=conf)\n\n# Run basic Spark test\nx = ['spark', 'rdd', 'example', 'sample', 'example'] \ny = sc.parallelize(x)\ny.collect()\n")),(0,o.yg)("h3",{id:"rdf-analytics-with-sansa-and-zeppelin-notebooks"},"RDF analytics with SANSA and Zeppelin notebooks"),(0,o.yg)("p",null,(0,o.yg)("a",{parentName:"p",href:"http://sansa-stack.net"},"SANSA")," is a big data engine for scalable processing of large-scale RDF data. SANSA uses Spark, or Flink, which offer fault-tolerant, highly available and scalable approaches to efficiently process massive sized datasets. SANSA provides the facilities for Semantic data representation, Querying, Inference, and Analytics."),(0,o.yg)("p",null,"Use the ",(0,o.yg)("strong",{parentName:"p"},"Zeppelin notebook for Spark")," template in the catalog to start a Spark-enabled Zeppelin notebook. You can find more information on the Zeppelin image at ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/rimolive/zeppelin-openshift"},"https://github.com/rimolive/zeppelin-openshift")),(0,o.yg)("p",null,"Connect and test Spark in a Zeppelin notebook, replace ",(0,o.yg)("inlineCode",{parentName:"p"},"spark-cluster")," by your Spark cluster name:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},"%pyspark\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SparkSession\n# Stop existing Spark Context\nspark = SparkSession.builder.master(\"spark://spark-cluster:7077\").getOrCreate()\nspark.sparkContext.stop()\n# Connect to the Spark cluster\nconf = SparkConf().setAppName('sansa').setMaster('spark://spark-cluster:7077') \nsc = SparkContext(conf=conf)\n\n# Run basic Spark test\nx = [1, 2, 3, 4, 5] \ny = sc.parallelize(x)\ny.collect()\n")),(0,o.yg)("p",null,"You should see the job running in the Spark web UI, kill the job with the ",(0,o.yg)("strong",{parentName:"p"},"kill")," button in the Spark dashboard."),(0,o.yg)("p",null,"You can now start to run your workload on the Spark cluster"),(0,o.yg)("admonition",{title:"Reset a Zeppelin notebook",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"Click on the cranked wheel in the top right of the note: ",(0,o.yg)("strong",{parentName:"p"},"Interpreter binding"),", and reset the interpreter")),(0,o.yg)("p",null,"Use the official ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/SANSA-Stack/SANSA-Notebooks/tree/stack-merge/sansa-notebooks"},"SANSA notebooks examples")),(0,o.yg)("p",null,"See more examples:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("a",{parentName:"li",href:"https://github.com/rimolive/zeppelin-openshift"},"https://github.com/rimolive/zeppelin-openshift"))),(0,o.yg)("h2",{id:"connect-spark-to-the-persistent-storage"},"Connect Spark to the persistent storage"),(0,o.yg)("p",null,"Instructions available at ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/rimolive/ceph-spark-integration"},"https://github.com/rimolive/ceph-spark-integration")),(0,o.yg)("p",null,"Requirements:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"pip install boto\n")),(0,o.yg)("p",null,"Check the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/rimolive/ceph-spark-integration/blob/master/notebooks/ceph-example.ipynb"},"example notebook for Ceph storage")),(0,o.yg)("h2",{id:"delete-a-running-spark-cluster"},"Delete a running Spark cluster"),(0,o.yg)("p",null,"Get all objects part of the Spark cluster, change ",(0,o.yg)("inlineCode",{parentName:"p"},"app=spark-cluster")," to match your Spark cluster name:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc get all,secret,configmaps --selector app=spark-cluster\n")),(0,o.yg)("p",null,"Then delete the Operator deployment from the OpenShift web UI overview."))}g.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/ed5ef82a.2bf7b169.js b/assets/js/ed5ef82a.9e89411d.js similarity index 99% rename from assets/js/ed5ef82a.2bf7b169.js rename to assets/js/ed5ef82a.9e89411d.js index 0732fe1aa..1f6a72b5c 100644 --- a/assets/js/ed5ef82a.2bf7b169.js +++ b/assets/js/ed5ef82a.9e89411d.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6683],{5680:(e,t,r)=>{r.d(t,{xA:()=>u,yg:()=>g});var n=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var l=n.createContext({}),p=function(e){var t=n.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},u=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},c=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,l=e.parentName,u=s(e,["components","mdxType","originalType","parentName"]),c=p(r),g=a,m=c["".concat(l,".").concat(g)]||c[g]||d[g]||o;return r?n.createElement(m,i(i({ref:t},u),{},{components:r})):n.createElement(m,i({ref:t},u))}));function g(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=c;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var p=2;p{r.r(t),r.d(t,{assets:()=>u,contentTitle:()=>l,default:()=>g,frontMatter:()=>s,metadata:()=>p,toc:()=>d});var n=r(9668),a=r(1367),o=(r(6540),r(5680)),i=["components"],s={id:"deploy-rstudio",title:"RStudio"},l=void 0,p={unversionedId:"deploy-rstudio",id:"deploy-rstudio",title:"RStudio",description:"Start RStudio",source:"@site/docs/deploy-rstudio.md",sourceDirName:".",slug:"/deploy-rstudio",permalink:"/docs/deploy-rstudio",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-rstudio.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-rstudio",title:"RStudio"},sidebar:"docs",previous:{title:"Jupyter Notebooks",permalink:"/docs/deploy-jupyter"},next:{title:"VisualStudio Code",permalink:"/docs/deploy-vscode"}},u={},d=[{value:"Start RStudio",id:"start-rstudio",level:2},{value:"Restricted RStudio with Shiny server",id:"restricted-rstudio-with-shiny-server",level:2},{value:"Use Git in RStudio",id:"use-git-in-rstudio",level:2},{value:"Run R jobs",id:"run-r-jobs",level:2}],c={toc:d};function g(e){var t=e.components,r=(0,a.A)(e,i);return(0,o.yg)("wrapper",(0,n.A)({},c,r,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"start-rstudio"},"Start RStudio"),(0,o.yg)("p",null,"Start a RStudio container based on ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/rocker-org/rocker/wiki/Using-the-RStudio-image"},"Rocker RStudio tidyverse images")," (debian), with ",(0,o.yg)("inlineCode",{parentName:"p"},"sudo")," privileges to install anything you need (e.g. pip or apt packages)"),(0,o.yg)("p",null,"You can start a container using the ",(0,o.yg)("strong",{parentName:"p"},"RStudio")," template in the ",(0,o.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,o.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,o.yg)("p",null,"Provide a few parameters, and Instantiate the template. The ",(0,o.yg)("strong",{parentName:"p"},"username")," will be ",(0,o.yg)("inlineCode",{parentName:"p"},"rstudio")," and the ",(0,o.yg)("strong",{parentName:"p"},"password")," will be what you configure yourself, the DSRI will automatically create a persistent volume to store data you will put in the ",(0,o.yg)("inlineCode",{parentName:"p"},"/home/rstudio")," folder. You can find the persistent volumes in the DSRI web UI, go to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,o.yg)("strong",{parentName:"p"},"Storage")," > ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),"."),(0,o.yg)("img",{src:"/img/screenshot-deploy-rstudio.png",alt:"Deploy RStudio",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("admonition",{title:"Official image documentation",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"See the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/rocker-org/rocker/wiki/Using-the-RStudio-image"},"official Docker image documentation")," for more details about the container deployed.")),(0,o.yg)("h2",{id:"restricted-rstudio-with-shiny-server"},"Restricted RStudio with Shiny server"),(0,o.yg)("p",null,"Start a RStudio application, with a complementary Shiny server, using a regular ",(0,o.yg)("inlineCode",{parentName:"p"},"rstudio")," user, ",(0,o.yg)("strong",{parentName:"p"},"without ",(0,o.yg)("inlineCode",{parentName:"strong"},"sudo")," privileges"),"."),(0,o.yg)("p",null,"Create the template in your project:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"In the DSRI web UI, go to ",(0,o.yg)("strong",{parentName:"p"},"+ Add"),", then click on ",(0,o.yg)("strong",{parentName:"p"},"YAML"),", add the content of the ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/templates/restricted/template-rstudio-shiny-restricted.yml"},"template-rstudio-shiny-restricted.yml")," file, and validate.")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"You can also do it using the terminal:"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/templates/restricted/template-rstudio-shiny-restricted.yml\n")))),(0,o.yg)("p",null,"Once the template has been created in your project, use the ",(0,o.yg)("strong",{parentName:"p"},"RStudio with Shiny server")," template in the OpenShift web UI catalog. It will automatically create a persistent storage for the data."),(0,o.yg)("admonition",{title:"No sudo privileges",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"You will not have ",(0,o.yg)("inlineCode",{parentName:"p"},"sudo")," privileges in the application.")),(0,o.yg)("h2",{id:"use-git-in-rstudio"},"Use Git in RStudio"),(0,o.yg)("p",null,"The fastest way to get started is to use ",(0,o.yg)("inlineCode",{parentName:"p"},"git")," from the terminal, for example to clone a git repository use ",(0,o.yg)("inlineCode",{parentName:"p"},"git clone")),(0,o.yg)("p",null,"You can also check how to enable Git integration in RStudio at ",(0,o.yg)("a",{parentName:"p",href:"https://support.rstudio.com/hc/en-us/articles/200532077"},"https://support.rstudio.com/hc/en-us/articles/200532077")),(0,o.yg)("p",null,"You can run this command to ask git to save your password for 15min:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"git config credential.helper cache\n")),(0,o.yg)("p",null,"Or store the password/token in a plain text file:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"git config --global credential.helper 'store --file ~/.git-credentials'\n")),(0,o.yg)("p",null,"Before pushing back to GitHub or GitLab, you will need to ",(0,o.yg)("strong",{parentName:"p"},"configure you username and email")," in the terminal:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},'git config --global user.name "Jean Dupont"\ngit config --global user.email jeandupont@gmail.com\n')),(0,o.yg)("admonition",{title:"Git tip",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"We recommend to use SSH instead of HTTPS connection when possible, checkout ",(0,o.yg)("a",{parentName:"p",href:"https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent"},"here")," how to generate SSH keys and use them with your GitHub account.")),(0,o.yg)("h2",{id:"run-r-jobs"},"Run R jobs"),(0,o.yg)("p",null,"You can visit this folder that gives all resources and instructions to explain how to run a standalone R job on the DSRI: ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-demo/tree/main/r-job"},"https://github.com/MaastrichtU-IDS/dsri-demo/tree/main/r-job")),(0,o.yg)("p",null,"If you want to run jobs directly from RStudio, checkout this package to run chunks of R code as jobs directly through RStudio: ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/lindeloev/job"},"https://github.com/lindeloev/job")))}g.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6683],{5680:(e,t,r)=>{r.d(t,{xA:()=>u,yg:()=>g});var n=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var l=n.createContext({}),p=function(e){var t=n.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},u=function(e){var t=p(e.components);return n.createElement(l.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},c=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,l=e.parentName,u=s(e,["components","mdxType","originalType","parentName"]),c=p(r),g=a,m=c["".concat(l,".").concat(g)]||c[g]||d[g]||o;return r?n.createElement(m,i(i({ref:t},u),{},{components:r})):n.createElement(m,i({ref:t},u))}));function g(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=c;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var p=2;p{r.r(t),r.d(t,{assets:()=>u,contentTitle:()=>l,default:()=>g,frontMatter:()=>s,metadata:()=>p,toc:()=>d});var n=r(9668),a=r(1367),o=(r(6540),r(5680)),i=["components"],s={id:"deploy-rstudio",title:"RStudio"},l=void 0,p={unversionedId:"deploy-rstudio",id:"deploy-rstudio",title:"RStudio",description:"Start RStudio",source:"@site/docs/deploy-rstudio.md",sourceDirName:".",slug:"/deploy-rstudio",permalink:"/docs/deploy-rstudio",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-rstudio.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-rstudio",title:"RStudio"},sidebar:"docs",previous:{title:"Jupyter Notebooks",permalink:"/docs/deploy-jupyter"},next:{title:"VisualStudio Code",permalink:"/docs/deploy-vscode"}},u={},d=[{value:"Start RStudio",id:"start-rstudio",level:2},{value:"Restricted RStudio with Shiny server",id:"restricted-rstudio-with-shiny-server",level:2},{value:"Use Git in RStudio",id:"use-git-in-rstudio",level:2},{value:"Run R jobs",id:"run-r-jobs",level:2}],c={toc:d};function g(e){var t=e.components,r=(0,a.A)(e,i);return(0,o.yg)("wrapper",(0,n.A)({},c,r,{components:t,mdxType:"MDXLayout"}),(0,o.yg)("h2",{id:"start-rstudio"},"Start RStudio"),(0,o.yg)("p",null,"Start a RStudio container based on ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/rocker-org/rocker/wiki/Using-the-RStudio-image"},"Rocker RStudio tidyverse images")," (debian), with ",(0,o.yg)("inlineCode",{parentName:"p"},"sudo")," privileges to install anything you need (e.g. pip or apt packages)"),(0,o.yg)("p",null,"You can start a container using the ",(0,o.yg)("strong",{parentName:"p"},"RStudio")," template in the ",(0,o.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/catalog"},"Catalog web UI")," (make sure the ",(0,o.yg)("strong",{parentName:"p"},"Templates")," checkbox is checked)"),(0,o.yg)("p",null,"Provide a few parameters, and Instantiate the template. The ",(0,o.yg)("strong",{parentName:"p"},"username")," will be ",(0,o.yg)("inlineCode",{parentName:"p"},"rstudio")," and the ",(0,o.yg)("strong",{parentName:"p"},"password")," will be what you configure yourself, the DSRI will automatically create a persistent volume to store data you will put in the ",(0,o.yg)("inlineCode",{parentName:"p"},"/home/rstudio")," folder. You can find the persistent volumes in the DSRI web UI, go to the ",(0,o.yg)("strong",{parentName:"p"},"Administrator")," view > ",(0,o.yg)("strong",{parentName:"p"},"Storage")," > ",(0,o.yg)("strong",{parentName:"p"},"Persistent Volume Claims"),"."),(0,o.yg)("img",{src:"/img/screenshot-deploy-rstudio.png",alt:"Deploy RStudio",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,o.yg)("admonition",{title:"Official image documentation",type:"info"},(0,o.yg)("p",{parentName:"admonition"},"See the ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/rocker-org/rocker/wiki/Using-the-RStudio-image"},"official Docker image documentation")," for more details about the container deployed.")),(0,o.yg)("h2",{id:"restricted-rstudio-with-shiny-server"},"Restricted RStudio with Shiny server"),(0,o.yg)("p",null,"Start a RStudio application, with a complementary Shiny server, using a regular ",(0,o.yg)("inlineCode",{parentName:"p"},"rstudio")," user, ",(0,o.yg)("strong",{parentName:"p"},"without ",(0,o.yg)("inlineCode",{parentName:"strong"},"sudo")," privileges"),"."),(0,o.yg)("p",null,"Create the template in your project:"),(0,o.yg)("ul",null,(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"In the DSRI web UI, go to ",(0,o.yg)("strong",{parentName:"p"},"+ Add"),", then click on ",(0,o.yg)("strong",{parentName:"p"},"YAML"),", add the content of the ",(0,o.yg)("a",{parentName:"p",href:"https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/templates/restricted/template-rstudio-shiny-restricted.yml"},"template-rstudio-shiny-restricted.yml")," file, and validate.")),(0,o.yg)("li",{parentName:"ul"},(0,o.yg)("p",{parentName:"li"},"You can also do it using the terminal:"),(0,o.yg)("pre",{parentName:"li"},(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"oc apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/templates/restricted/template-rstudio-shiny-restricted.yml\n")))),(0,o.yg)("p",null,"Once the template has been created in your project, use the ",(0,o.yg)("strong",{parentName:"p"},"RStudio with Shiny server")," template in the OpenShift web UI catalog. It will automatically create a persistent storage for the data."),(0,o.yg)("admonition",{title:"No sudo privileges",type:"caution"},(0,o.yg)("p",{parentName:"admonition"},"You will not have ",(0,o.yg)("inlineCode",{parentName:"p"},"sudo")," privileges in the application.")),(0,o.yg)("h2",{id:"use-git-in-rstudio"},"Use Git in RStudio"),(0,o.yg)("p",null,"The fastest way to get started is to use ",(0,o.yg)("inlineCode",{parentName:"p"},"git")," from the terminal, for example to clone a git repository use ",(0,o.yg)("inlineCode",{parentName:"p"},"git clone")),(0,o.yg)("p",null,"You can also check how to enable Git integration in RStudio at ",(0,o.yg)("a",{parentName:"p",href:"https://support.rstudio.com/hc/en-us/articles/200532077"},"https://support.rstudio.com/hc/en-us/articles/200532077")),(0,o.yg)("p",null,"You can run this command to ask git to save your password for 15min:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"git config credential.helper cache\n")),(0,o.yg)("p",null,"Or store the password/token in a plain text file:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},"git config --global credential.helper 'store --file ~/.git-credentials'\n")),(0,o.yg)("p",null,"Before pushing back to GitHub or GitLab, you will need to ",(0,o.yg)("strong",{parentName:"p"},"configure you username and email")," in the terminal:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},'git config --global user.name "Jean Dupont"\ngit config --global user.email jeandupont@gmail.com\n')),(0,o.yg)("admonition",{title:"Git tip",type:"tip"},(0,o.yg)("p",{parentName:"admonition"},"We recommend to use SSH instead of HTTPS connection when possible, checkout ",(0,o.yg)("a",{parentName:"p",href:"https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent"},"here")," how to generate SSH keys and use them with your GitHub account.")),(0,o.yg)("h2",{id:"run-r-jobs"},"Run R jobs"),(0,o.yg)("p",null,"You can visit this folder that gives all resources and instructions to explain how to run a standalone R job on the DSRI: ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-demo/tree/main/r-job"},"https://github.com/MaastrichtU-IDS/dsri-demo/tree/main/r-job")),(0,o.yg)("p",null,"If you want to run jobs directly from RStudio, checkout this package to run chunks of R code as jobs directly through RStudio: ",(0,o.yg)("a",{parentName:"p",href:"https://github.com/lindeloev/job"},"https://github.com/lindeloev/job")))}g.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/f704770b.9a92e5fb.js b/assets/js/f704770b.3ea04af8.js similarity index 99% rename from assets/js/f704770b.9a92e5fb.js rename to assets/js/f704770b.3ea04af8.js index baf98ae73..9dd059b72 100644 --- a/assets/js/f704770b.9a92e5fb.js +++ b/assets/js/f704770b.3ea04af8.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[3870],{5680:(e,t,a)=>{a.d(t,{xA:()=>u,yg:()=>y});var n=a(6540);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function l(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function r(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var s=n.createContext({}),p=function(e){var t=n.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):r(r({},t),e)),a},u=function(e){var t=p(e.components);return n.createElement(s.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var a=e.components,o=e.mdxType,l=e.originalType,s=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),m=p(a),y=o,d=m["".concat(s,".").concat(y)]||m[y]||c[y]||l;return a?n.createElement(d,r(r({ref:t},u),{},{components:a})):n.createElement(d,r({ref:t},u))}));function y(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var l=a.length,r=new Array(l);r[0]=m;var i={};for(var s in t)hasOwnProperty.call(t,s)&&(i[s]=t[s]);i.originalType=e,i.mdxType="string"==typeof e?e:o,r[1]=i;for(var p=2;p{a.r(t),a.d(t,{assets:()=>u,contentTitle:()=>s,default:()=>y,frontMatter:()=>i,metadata:()=>p,toc:()=>c});var n=a(9668),o=a(1367),l=(a(6540),a(5680)),r=["components"],i={id:"deploy-matlab",title:"Matlab"},s=void 0,p={unversionedId:"deploy-matlab",id:"deploy-matlab",title:"Matlab",description:"Note that we are not expert in Matlab: feel free to contact Mathworks support directly if you are having any issues with their official Docker image. Because since it's closed source we cannot fix it ourselves.",source:"@site/docs/deploy-matlab.md",sourceDirName:".",slug:"/deploy-matlab",permalink:"/docs/deploy-matlab",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-matlab.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-matlab",title:"Matlab"},sidebar:"docs",previous:{title:"Databases",permalink:"/docs/deploy-database"},next:{title:"JupyterHub",permalink:"/docs/deploy-jupyterhub"}},u={},c=[{value:"Use the official Matlab image",id:"use-the-official-matlab-image",level:2},{value:"Use a stable Matlab image",id:"use-a-stable-matlab-image",level:2},{value:"Use Matlab in Jupyter",id:"use-matlab-in-jupyter",level:2},{value:"Deploy Matlab on GPU",id:"deploy-matlab-on-gpu",level:2},{value:"Build your own Matlab image",id:"build-your-own-matlab-image",level:2}],m={toc:c};function y(e){var t=e.components,a=(0,o.A)(e,r);return(0,l.yg)("wrapper",(0,n.A)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,l.yg)("p",null,"Note that we are not expert in Matlab: feel free to contact Mathworks support directly if you are having any issues with their official Docker image. Because since it's closed source we cannot fix it ourselves."),(0,l.yg)("p",null,"You can request official support from Matlab at this address after login and connecting your account to the UM license: ",(0,l.yg)("a",{parentName:"p",href:"https://nl.mathworks.com/academia/tah-portal/maastricht-university-31574866.html#get"},"https://nl.mathworks.com/academia/tah-portal/maastricht-university-31574866.html#get")),(0,l.yg)("h2",{id:"use-the-official-matlab-image"},"Use the official Matlab image"),(0,l.yg)("p",null,"Start Matlab with a desktop UI accessible directly using your web browser at a URL automatically generated. "),(0,l.yg)("p",null,"Go to the ",(0,l.yg)("strong",{parentName:"p"},"Catalog"),", make sure ",(0,l.yg)("strong",{parentName:"p"},"Templates")," are displayed (box checked), and search for ",(0,l.yg)("strong",{parentName:"p"},"Matlab"),", and provide the right parameters:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"You will need to provide the password you will use to access the Matlab UI when filling the template."),(0,l.yg)("li",{parentName:"ul"},"You can also change the Matlab image version, see the latest version released in the ",(0,l.yg)("a",{parentName:"li",href:"https://hub.docker.com/r/mathworks/matlab"},"official Matlab Docker image documentation"))),(0,l.yg)("p",null,"Once Matlab start you can access it through 2 routes (URL), which can be accessed when clicking on the Matlab node in the ",(0,l.yg)("strong",{parentName:"p"},"Topology"),":"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"The main ",(0,l.yg)("inlineCode",{parentName:"li"},"matlab")," route to access Matlab desktop UI directly in your web browser. It is recommended to use this route."),(0,l.yg)("li",{parentName:"ul"},"The ",(0,l.yg)("inlineCode",{parentName:"li"},"matlab-vnc")," route can be used to access Matlab using a VNC client (you will need to use the full URL to your Matlab VNC route). Only use it if you know what you're doing.")),(0,l.yg)("h2",{id:"use-a-stable-matlab-image"},"Use a stable Matlab image"),(0,l.yg)("p",null,"The official Matlab image is infamous for showing a black screening after a few hours of use, making it a bit cumbersome to be used trustfully."),(0,l.yg)("p",null,"We have a solution if you need to have a more stable Matlab image, that will require a bit more manual operations:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"Use the ",(0,l.yg)("strong",{parentName:"li"},"Ubuntu with GUI")," template to setup a Ubuntu pod on the DSRI with the image ",(0,l.yg)("a",{parentName:"li",href:"https://github.com/vemonet/docker-ubuntu-vnc-desktop"},(0,l.yg)("inlineCode",{parentName:"a"},"ghcr.io/vemonet/docker-ubuntu-vnc-desktop:latest"))),(0,l.yg)("li",{parentName:"ul"},"Start firefox and browse to ",(0,l.yg)("a",{parentName:"li",href:"https://nl.mathworks.com"},"https://nl.mathworks.com")),(0,l.yg)("li",{parentName:"ul"},"Login with your personal Matlab account, create one if you don\u2019t have it"),(0,l.yg)("li",{parentName:"ul"},"Choose ",(0,l.yg)("strong",{parentName:"li"},"get matlab")," and download, the linux matlab version"),(0,l.yg)("li",{parentName:"ul"},"Open a terminal window and run the following commands:")),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"sudo apt-get update\nsudo apt-get install unzip\n# Unzip the previous downloaded matlab installation file\n# start the matlab installation with:\nsudo .\\install\n")),(0,l.yg)("p",null,"You will then be prompted the Matlab installation process:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"Fill in your personal matlab account credentials"),(0,l.yg)("li",{parentName:"ul"},"\u26a0\ufe0f Fill in the username as used in the Ubuntu environment, in your case it will most probably be ",(0,l.yg)("strong",{parentName:"li"},"root")," (Matlab gives a license error if this is not correct, check with ",(0,l.yg)("inlineCode",{parentName:"li"},"whoami")," in the terminal when in doubt)"),(0,l.yg)("li",{parentName:"ul"},"Select every Matlab modules you want to be installed"),(0,l.yg)("li",{parentName:"ul"},'Check "symbolic link" and "Improve\u2026\u2026"')),(0,l.yg)("h2",{id:"use-matlab-in-jupyter"},"Use Matlab in Jupyter"),(0,l.yg)("p",null,"You can also use ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/mathworks/jupyter-matlab-proxy"},"mathworks/jupyter-matlab-proxy"),". You can easily install it in a JupyterLab image with ",(0,l.yg)("inlineCode",{parentName:"p"},"pip"),":"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"pip install jupyter-matlab-proxy\n")),(0,l.yg)("p",null,"Follow the instructions on the ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/mathworks/jupyter-matlab-proxy"},"mathworks/jupyter-matlab-proxy repository")," to access it."),(0,l.yg)("h2",{id:"deploy-matlab-on-gpu"},"Deploy Matlab on GPU"),(0,l.yg)("p",null,"We use the Matlab template in the DSRI catalog to deploy a pre-built ",(0,l.yg)("strong",{parentName:"p"},"Nvidia Matlab Deep Learning Container")," on CPU or GPU nodes. See the ",(0,l.yg)("a",{parentName:"p",href:"https://nl.mathworks.com/help/cloudcenter/ug/matlab-deep-learning-container-on-dgx.html"},"official documentation from MathWorks")," for more details about this image."),(0,l.yg)("admonition",{title:"Request access to Matlab",type:"caution"},(0,l.yg)("p",{parentName:"admonition"},"To be able to access the Matlab on GPU template you will need to ",(0,l.yg)("a",{parentName:"p",href:"/help"},"ask the DSRI admins")," to enable it in your project.")),(0,l.yg)("p",null,"2 options are available to connect to your running Matlab pod terminal:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"Go to the matlab pod page on the DSRI web UI "),(0,l.yg)("li",{parentName:"ul"},"Or connect from your terminal with ",(0,l.yg)("inlineCode",{parentName:"li"},"oc rsh MATLAB_POD_ID"))),(0,l.yg)("p",null,"Type ",(0,l.yg)("inlineCode",{parentName:"p"},"bash")," when first accessing to the terminal to have a better experience."),(0,l.yg)("p",null,"Type ",(0,l.yg)("inlineCode",{parentName:"p"},"cd /ContainerDeepLearningData")," to go in the persistent volume, and use this volume to store all data that should be preserved."),(0,l.yg)("p",null,"Type ",(0,l.yg)("inlineCode",{parentName:"p"},"matlab")," to access Matlab from the terminal"),(0,l.yg)("p",null,"It is possible to access the Matlab desktop UI through VNC and a web UI, but the script to start it in ",(0,l.yg)("inlineCode",{parentName:"p"},"/bin/run.sh")," seems to face some errors, let us know if you have any luck with this."),(0,l.yg)("p",null,"By default the image run with the ",(0,l.yg)("inlineCode",{parentName:"p"},"matlab")," user which does not have ",(0,l.yg)("inlineCode",{parentName:"p"},"sudo")," privilege, you can run the container as root if you need to install packages which require admin privileges. "),(0,l.yg)("h2",{id:"build-your-own-matlab-image"},"Build your own Matlab image"),(0,l.yg)("p",null,"Follow the instructions at: ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/mathworks-ref-arch/matlab-dockerfile"},"https://github.com/mathworks-ref-arch/matlab-dockerfile")),(0,l.yg)("p",null,"This will require you to retrieve Matlab installation files to build your own container"),(0,l.yg)("p",null,"Once all the files have been properly placed in the folder and the license server URL has been set, you can start the build on DSRI by following the ",(0,l.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/guide-dockerfile-to-openshift#create-new-build-configuration"},"documentation to deploy from a ",(0,l.yg)("inlineCode",{parentName:"a"},"Dockerfile"))),(0,l.yg)("admonition",{title:"License server not available on your laptop",type:"caution"},(0,l.yg)("p",{parentName:"admonition"},"If you try to build Matlab directly on your laptop it will most probably fail as your machine might not have access to the license server. You will need to build the Matlab container directly on DSRI with ",(0,l.yg)("inlineCode",{parentName:"p"},"oc start-build"))),(0,l.yg)("p",null,"Once Matlab deployed, you will need to edit the matlab deployment YAML before it works."),(0,l.yg)("p",null,"Go to ",(0,l.yg)("strong",{parentName:"p"},"Topology"),", click on the Matlab node, click on the ",(0,l.yg)("strong",{parentName:"p"},"Actions")," button of the matlab details, and ",(0,l.yg)("strong",{parentName:"p"},"Edit deployment"),". In the deployment YAML search for ",(0,l.yg)("inlineCode",{parentName:"p"},"spec:")," which has a ",(0,l.yg)("inlineCode",{parentName:"p"},"containers:")," as child, and add the following under ",(0,l.yg)("inlineCode",{parentName:"p"},"spec:")),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-yaml"},"spec:\n serviceAccountName: anyuid\n containers: ...\n")),(0,l.yg)("p",null,"Your Matlab container should now be running!"),(0,l.yg)("p",null,"2 options are available to connect to your running Matlab pod terminal:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"Go to the matlab pod page on the DSRI web UI "),(0,l.yg)("li",{parentName:"ul"},"Or connect from your terminal with ",(0,l.yg)("inlineCode",{parentName:"li"},"oc rsh MATLAB_POD_ID"))),(0,l.yg)("p",null,"You can access Matlab from the terminal by running ",(0,l.yg)("inlineCode",{parentName:"p"},"matlab")),(0,l.yg)("p",null,"Unfortunately Matlab did not expected their users to need the graphical UI when using Matlab in containers. So only the command line is available by default. You can find more information to ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/mathworks-ref-arch/matlab-dockerfile/issues/18"},"enable the Matlab UI in this issue"),"."))}y.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[3870],{5680:(e,t,a)=>{a.d(t,{xA:()=>u,yg:()=>y});var n=a(6540);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function l(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function r(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var s=n.createContext({}),p=function(e){var t=n.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):r(r({},t),e)),a},u=function(e){var t=p(e.components);return n.createElement(s.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var a=e.components,o=e.mdxType,l=e.originalType,s=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),m=p(a),y=o,d=m["".concat(s,".").concat(y)]||m[y]||c[y]||l;return a?n.createElement(d,r(r({ref:t},u),{},{components:a})):n.createElement(d,r({ref:t},u))}));function y(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var l=a.length,r=new Array(l);r[0]=m;var i={};for(var s in t)hasOwnProperty.call(t,s)&&(i[s]=t[s]);i.originalType=e,i.mdxType="string"==typeof e?e:o,r[1]=i;for(var p=2;p{a.r(t),a.d(t,{assets:()=>u,contentTitle:()=>s,default:()=>y,frontMatter:()=>i,metadata:()=>p,toc:()=>c});var n=a(9668),o=a(1367),l=(a(6540),a(5680)),r=["components"],i={id:"deploy-matlab",title:"Matlab"},s=void 0,p={unversionedId:"deploy-matlab",id:"deploy-matlab",title:"Matlab",description:"Note that we are not expert in Matlab: feel free to contact Mathworks support directly if you are having any issues with their official Docker image. Because since it's closed source we cannot fix it ourselves.",source:"@site/docs/deploy-matlab.md",sourceDirName:".",slug:"/deploy-matlab",permalink:"/docs/deploy-matlab",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-matlab.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-matlab",title:"Matlab"},sidebar:"docs",previous:{title:"Databases",permalink:"/docs/deploy-database"},next:{title:"JupyterHub",permalink:"/docs/deploy-jupyterhub"}},u={},c=[{value:"Use the official Matlab image",id:"use-the-official-matlab-image",level:2},{value:"Use a stable Matlab image",id:"use-a-stable-matlab-image",level:2},{value:"Use Matlab in Jupyter",id:"use-matlab-in-jupyter",level:2},{value:"Deploy Matlab on GPU",id:"deploy-matlab-on-gpu",level:2},{value:"Build your own Matlab image",id:"build-your-own-matlab-image",level:2}],m={toc:c};function y(e){var t=e.components,a=(0,o.A)(e,r);return(0,l.yg)("wrapper",(0,n.A)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,l.yg)("p",null,"Note that we are not expert in Matlab: feel free to contact Mathworks support directly if you are having any issues with their official Docker image. Because since it's closed source we cannot fix it ourselves."),(0,l.yg)("p",null,"You can request official support from Matlab at this address after login and connecting your account to the UM license: ",(0,l.yg)("a",{parentName:"p",href:"https://nl.mathworks.com/academia/tah-portal/maastricht-university-31574866.html#get"},"https://nl.mathworks.com/academia/tah-portal/maastricht-university-31574866.html#get")),(0,l.yg)("h2",{id:"use-the-official-matlab-image"},"Use the official Matlab image"),(0,l.yg)("p",null,"Start Matlab with a desktop UI accessible directly using your web browser at a URL automatically generated. "),(0,l.yg)("p",null,"Go to the ",(0,l.yg)("strong",{parentName:"p"},"Catalog"),", make sure ",(0,l.yg)("strong",{parentName:"p"},"Templates")," are displayed (box checked), and search for ",(0,l.yg)("strong",{parentName:"p"},"Matlab"),", and provide the right parameters:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"You will need to provide the password you will use to access the Matlab UI when filling the template."),(0,l.yg)("li",{parentName:"ul"},"You can also change the Matlab image version, see the latest version released in the ",(0,l.yg)("a",{parentName:"li",href:"https://hub.docker.com/r/mathworks/matlab"},"official Matlab Docker image documentation"))),(0,l.yg)("p",null,"Once Matlab start you can access it through 2 routes (URL), which can be accessed when clicking on the Matlab node in the ",(0,l.yg)("strong",{parentName:"p"},"Topology"),":"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"The main ",(0,l.yg)("inlineCode",{parentName:"li"},"matlab")," route to access Matlab desktop UI directly in your web browser. It is recommended to use this route."),(0,l.yg)("li",{parentName:"ul"},"The ",(0,l.yg)("inlineCode",{parentName:"li"},"matlab-vnc")," route can be used to access Matlab using a VNC client (you will need to use the full URL to your Matlab VNC route). Only use it if you know what you're doing.")),(0,l.yg)("h2",{id:"use-a-stable-matlab-image"},"Use a stable Matlab image"),(0,l.yg)("p",null,"The official Matlab image is infamous for showing a black screening after a few hours of use, making it a bit cumbersome to be used trustfully."),(0,l.yg)("p",null,"We have a solution if you need to have a more stable Matlab image, that will require a bit more manual operations:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"Use the ",(0,l.yg)("strong",{parentName:"li"},"Ubuntu with GUI")," template to setup a Ubuntu pod on the DSRI with the image ",(0,l.yg)("a",{parentName:"li",href:"https://github.com/vemonet/docker-ubuntu-vnc-desktop"},(0,l.yg)("inlineCode",{parentName:"a"},"ghcr.io/vemonet/docker-ubuntu-vnc-desktop:latest"))),(0,l.yg)("li",{parentName:"ul"},"Start firefox and browse to ",(0,l.yg)("a",{parentName:"li",href:"https://nl.mathworks.com"},"https://nl.mathworks.com")),(0,l.yg)("li",{parentName:"ul"},"Login with your personal Matlab account, create one if you don\u2019t have it"),(0,l.yg)("li",{parentName:"ul"},"Choose ",(0,l.yg)("strong",{parentName:"li"},"get matlab")," and download, the linux matlab version"),(0,l.yg)("li",{parentName:"ul"},"Open a terminal window and run the following commands:")),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"sudo apt-get update\nsudo apt-get install unzip\n# Unzip the previous downloaded matlab installation file\n# start the matlab installation with:\nsudo .\\install\n")),(0,l.yg)("p",null,"You will then be prompted the Matlab installation process:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"Fill in your personal matlab account credentials"),(0,l.yg)("li",{parentName:"ul"},"\u26a0\ufe0f Fill in the username as used in the Ubuntu environment, in your case it will most probably be ",(0,l.yg)("strong",{parentName:"li"},"root")," (Matlab gives a license error if this is not correct, check with ",(0,l.yg)("inlineCode",{parentName:"li"},"whoami")," in the terminal when in doubt)"),(0,l.yg)("li",{parentName:"ul"},"Select every Matlab modules you want to be installed"),(0,l.yg)("li",{parentName:"ul"},'Check "symbolic link" and "Improve\u2026\u2026"')),(0,l.yg)("h2",{id:"use-matlab-in-jupyter"},"Use Matlab in Jupyter"),(0,l.yg)("p",null,"You can also use ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/mathworks/jupyter-matlab-proxy"},"mathworks/jupyter-matlab-proxy"),". You can easily install it in a JupyterLab image with ",(0,l.yg)("inlineCode",{parentName:"p"},"pip"),":"),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-bash"},"pip install jupyter-matlab-proxy\n")),(0,l.yg)("p",null,"Follow the instructions on the ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/mathworks/jupyter-matlab-proxy"},"mathworks/jupyter-matlab-proxy repository")," to access it."),(0,l.yg)("h2",{id:"deploy-matlab-on-gpu"},"Deploy Matlab on GPU"),(0,l.yg)("p",null,"We use the Matlab template in the DSRI catalog to deploy a pre-built ",(0,l.yg)("strong",{parentName:"p"},"Nvidia Matlab Deep Learning Container")," on CPU or GPU nodes. See the ",(0,l.yg)("a",{parentName:"p",href:"https://nl.mathworks.com/help/cloudcenter/ug/matlab-deep-learning-container-on-dgx.html"},"official documentation from MathWorks")," for more details about this image."),(0,l.yg)("admonition",{title:"Request access to Matlab",type:"caution"},(0,l.yg)("p",{parentName:"admonition"},"To be able to access the Matlab on GPU template you will need to ",(0,l.yg)("a",{parentName:"p",href:"/help"},"ask the DSRI admins")," to enable it in your project.")),(0,l.yg)("p",null,"2 options are available to connect to your running Matlab pod terminal:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"Go to the matlab pod page on the DSRI web UI "),(0,l.yg)("li",{parentName:"ul"},"Or connect from your terminal with ",(0,l.yg)("inlineCode",{parentName:"li"},"oc rsh MATLAB_POD_ID"))),(0,l.yg)("p",null,"Type ",(0,l.yg)("inlineCode",{parentName:"p"},"bash")," when first accessing to the terminal to have a better experience."),(0,l.yg)("p",null,"Type ",(0,l.yg)("inlineCode",{parentName:"p"},"cd /ContainerDeepLearningData")," to go in the persistent volume, and use this volume to store all data that should be preserved."),(0,l.yg)("p",null,"Type ",(0,l.yg)("inlineCode",{parentName:"p"},"matlab")," to access Matlab from the terminal"),(0,l.yg)("p",null,"It is possible to access the Matlab desktop UI through VNC and a web UI, but the script to start it in ",(0,l.yg)("inlineCode",{parentName:"p"},"/bin/run.sh")," seems to face some errors, let us know if you have any luck with this."),(0,l.yg)("p",null,"By default the image run with the ",(0,l.yg)("inlineCode",{parentName:"p"},"matlab")," user which does not have ",(0,l.yg)("inlineCode",{parentName:"p"},"sudo")," privilege, you can run the container as root if you need to install packages which require admin privileges. "),(0,l.yg)("h2",{id:"build-your-own-matlab-image"},"Build your own Matlab image"),(0,l.yg)("p",null,"Follow the instructions at: ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/mathworks-ref-arch/matlab-dockerfile"},"https://github.com/mathworks-ref-arch/matlab-dockerfile")),(0,l.yg)("p",null,"This will require you to retrieve Matlab installation files to build your own container"),(0,l.yg)("p",null,"Once all the files have been properly placed in the folder and the license server URL has been set, you can start the build on DSRI by following the ",(0,l.yg)("a",{parentName:"p",href:"https://maastrichtu-ids.github.io/dsri-documentation/docs/guide-dockerfile-to-openshift#create-new-build-configuration"},"documentation to deploy from a ",(0,l.yg)("inlineCode",{parentName:"a"},"Dockerfile"))),(0,l.yg)("admonition",{title:"License server not available on your laptop",type:"caution"},(0,l.yg)("p",{parentName:"admonition"},"If you try to build Matlab directly on your laptop it will most probably fail as your machine might not have access to the license server. You will need to build the Matlab container directly on DSRI with ",(0,l.yg)("inlineCode",{parentName:"p"},"oc start-build"))),(0,l.yg)("p",null,"Once Matlab deployed, you will need to edit the matlab deployment YAML before it works."),(0,l.yg)("p",null,"Go to ",(0,l.yg)("strong",{parentName:"p"},"Topology"),", click on the Matlab node, click on the ",(0,l.yg)("strong",{parentName:"p"},"Actions")," button of the matlab details, and ",(0,l.yg)("strong",{parentName:"p"},"Edit deployment"),". In the deployment YAML search for ",(0,l.yg)("inlineCode",{parentName:"p"},"spec:")," which has a ",(0,l.yg)("inlineCode",{parentName:"p"},"containers:")," as child, and add the following under ",(0,l.yg)("inlineCode",{parentName:"p"},"spec:")),(0,l.yg)("pre",null,(0,l.yg)("code",{parentName:"pre",className:"language-yaml"},"spec:\n serviceAccountName: anyuid\n containers: ...\n")),(0,l.yg)("p",null,"Your Matlab container should now be running!"),(0,l.yg)("p",null,"2 options are available to connect to your running Matlab pod terminal:"),(0,l.yg)("ul",null,(0,l.yg)("li",{parentName:"ul"},"Go to the matlab pod page on the DSRI web UI "),(0,l.yg)("li",{parentName:"ul"},"Or connect from your terminal with ",(0,l.yg)("inlineCode",{parentName:"li"},"oc rsh MATLAB_POD_ID"))),(0,l.yg)("p",null,"You can access Matlab from the terminal by running ",(0,l.yg)("inlineCode",{parentName:"p"},"matlab")),(0,l.yg)("p",null,"Unfortunately Matlab did not expected their users to need the graphical UI when using Matlab in containers. So only the command line is available by default. You can find more information to ",(0,l.yg)("a",{parentName:"p",href:"https://github.com/mathworks-ref-arch/matlab-dockerfile/issues/18"},"enable the Matlab UI in this issue"),"."))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/f8d48938.d768df8f.js b/assets/js/f8d48938.a5c11f2a.js similarity index 99% rename from assets/js/f8d48938.d768df8f.js rename to assets/js/f8d48938.a5c11f2a.js index 210e0e62f..33d6c1043 100644 --- a/assets/js/f8d48938.d768df8f.js +++ b/assets/js/f8d48938.a5c11f2a.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2526],{5680:(e,t,r)=>{r.d(t,{xA:()=>p,yg:()=>y});var o=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function i(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function n(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var u=o.createContext({}),l=function(e){var t=o.useContext(u),r=t;return e&&(r="function"==typeof e?e(t):n(n({},t),e)),r},p=function(e){var t=l(e.components);return o.createElement(u.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},h=o.forwardRef((function(e,t){var r=e.components,a=e.mdxType,i=e.originalType,u=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),h=l(r),y=a,d=h["".concat(u,".").concat(y)]||h[y]||c[y]||i;return r?o.createElement(d,n(n({ref:t},p),{},{components:r})):o.createElement(d,n({ref:t},p))}));function y(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var i=r.length,n=new Array(i);n[0]=h;var s={};for(var u in t)hasOwnProperty.call(t,u)&&(s[u]=t[u]);s.originalType=e,s.mdxType="string"==typeof e?e:a,n[1]=s;for(var l=2;l{r.r(t),r.d(t,{assets:()=>p,contentTitle:()=>u,default:()=>y,frontMatter:()=>s,metadata:()=>l,toc:()=>c});var o=r(9668),a=r(1367),i=(r(6540),r(5680)),n=["components"],s={id:"guide-workshop",title:"Prepare a workshop"},u=void 0,l={unversionedId:"guide-workshop",id:"guide-workshop",title:"Prepare a workshop",description:"The DSRI is a good platform to run a training or class within Maastricht University.",source:"@site/docs/guide-workshop.md",sourceDirName:".",slug:"/guide-workshop",permalink:"/docs/guide-workshop",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-workshop.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-workshop",title:"Prepare a workshop"},sidebar:"docs",previous:{title:"Install UM VPN",permalink:"/docs/guide-vpn"},next:{title:"Login to Docker registries",permalink:"/docs/login-docker-registry"}},p={},c=[{value:"Request VPN accounts for users",id:"request-vpn-accounts-for-users",level:2},{value:"Fill a form",id:"fill-a-form",level:2},{value:"Prepare you workshop",id:"prepare-you-workshop",level:2},{value:"Publish an image for your training",id:"publish-an-image-for-your-training",level:3},{value:"Show your users how to start a workspace",id:"show-your-users-how-to-start-a-workspace",level:3}],h={toc:c};function y(e){var t=e.components,r=(0,a.A)(e,n);return(0,i.yg)("wrapper",(0,o.A)({},h,r,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("p",null,"The DSRI is a good platform to run a training or class within Maastricht University."),(0,i.yg)("h2",{id:"request-vpn-accounts-for-users"},"Request VPN accounts for users"),(0,i.yg)("p",null,"If the users are ",(0,i.yg)("strong",{parentName:"p"},"students")," from Maastricht University, ",(0,i.yg)("strong",{parentName:"p"},"or not")," from Maastricht University (without an email @maastrichtuniversity.nl, or @maastro.nl), you will need to contact the ICT support of your department to request the creation of accounts so that your users can connect to the UM VPN."),(0,i.yg)("p",null,"At FSE, you will need to send an email to ",(0,i.yg)("a",{parentName:"p",href:"mailto:lo-fse@maastrichtuniversity.nl"},"lo-fse@maastrichtuniversity.nl")," and ",(0,i.yg)("a",{parentName:"p",href:"mailto:DSRI-SUPPORT-L@maastrichtuniversity.nl"},"DSRI-SUPPORT-L@maastrichtuniversity.nl")," with the following information:"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},"Emails of the users"),(0,i.yg)("li",{parentName:"ul"},"Why they need access to the DSRI (provide the ID of the course at Maastricht University if it is for a course)"),(0,i.yg)("li",{parentName:"ul"},"Until which date the users will need those VPN accounts")),(0,i.yg)("h2",{id:"fill-a-form"},"Fill a form"),(0,i.yg)("p",null,"Fill this ",(0,i.yg)("a",{parentName:"p",href:"/register"},"form \ud83d\udcec")," to give us more details on your project (you don't need to do it if you have already filled it in the past)."),(0,i.yg)("h2",{id:"prepare-you-workshop"},"Prepare you workshop"),(0,i.yg)("p",null,"Use the ",(0,i.yg)("a",{parentName:"p",href:"/docs/access-dsri"},"DSRI documentation")," to explain to your users how to access the DSRI."),(0,i.yg)("h3",{id:"publish-an-image-for-your-training"},"Publish an image for your training"),(0,i.yg)("p",null,"Feel free to use the existing templates for JupyterLab, RStudio, or Visual Studio Code in the DSRI catalog."),(0,i.yg)("p",null,"You can easily reuse our images to adapt it to your training need and install all required dependencies:"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/jupyterlab"},"https://github.com/MaastrichtU-IDS/jupyterlab")),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/rstudio"},"https://github.com/MaastrichtU-IDS/rstudio")),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/code-server"},"https://github.com/MaastrichtU-IDS/code-server"))),(0,i.yg)("p",null,"Then you will just need to instruct your users to start an existing templates with your newly published image."),(0,i.yg)("p",null,"With the JupyterLab template you can also prepare a git repository to be cloned in the workspace as soon as they start it."),(0,i.yg)("p",null,"You can find some examples of python scripts with database to run on the DSRI in this repository: ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-demo"},"https://github.com/MaastrichtU-IDS/dsri-demo")),(0,i.yg)("h3",{id:"show-your-users-how-to-start-a-workspace"},"Show your users how to start a workspace"),(0,i.yg)("p",null,"You can use this video showing how to start a RStudio workspace, the process is similar for JupyterLab and VisualStudio Code: ",(0,i.yg)("a",{parentName:"p",href:"https://www.youtube.com/watch?v=Y0BjotH1LiE"},"https://www.youtube.com/watch?v=Y0BjotH1LiE")),(0,i.yg)("p",null,"Otherwise just do it directly with them."))}y.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2526],{5680:(e,t,r)=>{r.d(t,{xA:()=>p,yg:()=>y});var o=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function i(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function n(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var u=o.createContext({}),l=function(e){var t=o.useContext(u),r=t;return e&&(r="function"==typeof e?e(t):n(n({},t),e)),r},p=function(e){var t=l(e.components);return o.createElement(u.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},h=o.forwardRef((function(e,t){var r=e.components,a=e.mdxType,i=e.originalType,u=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),h=l(r),y=a,d=h["".concat(u,".").concat(y)]||h[y]||c[y]||i;return r?o.createElement(d,n(n({ref:t},p),{},{components:r})):o.createElement(d,n({ref:t},p))}));function y(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var i=r.length,n=new Array(i);n[0]=h;var s={};for(var u in t)hasOwnProperty.call(t,u)&&(s[u]=t[u]);s.originalType=e,s.mdxType="string"==typeof e?e:a,n[1]=s;for(var l=2;l{r.r(t),r.d(t,{assets:()=>p,contentTitle:()=>u,default:()=>y,frontMatter:()=>s,metadata:()=>l,toc:()=>c});var o=r(9668),a=r(1367),i=(r(6540),r(5680)),n=["components"],s={id:"guide-workshop",title:"Prepare a workshop"},u=void 0,l={unversionedId:"guide-workshop",id:"guide-workshop",title:"Prepare a workshop",description:"The DSRI is a good platform to run a training or class within Maastricht University.",source:"@site/docs/guide-workshop.md",sourceDirName:".",slug:"/guide-workshop",permalink:"/docs/guide-workshop",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/guide-workshop.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"guide-workshop",title:"Prepare a workshop"},sidebar:"docs",previous:{title:"Install UM VPN",permalink:"/docs/guide-vpn"},next:{title:"Login to Docker registries",permalink:"/docs/login-docker-registry"}},p={},c=[{value:"Request VPN accounts for users",id:"request-vpn-accounts-for-users",level:2},{value:"Fill a form",id:"fill-a-form",level:2},{value:"Prepare you workshop",id:"prepare-you-workshop",level:2},{value:"Publish an image for your training",id:"publish-an-image-for-your-training",level:3},{value:"Show your users how to start a workspace",id:"show-your-users-how-to-start-a-workspace",level:3}],h={toc:c};function y(e){var t=e.components,r=(0,a.A)(e,n);return(0,i.yg)("wrapper",(0,o.A)({},h,r,{components:t,mdxType:"MDXLayout"}),(0,i.yg)("p",null,"The DSRI is a good platform to run a training or class within Maastricht University."),(0,i.yg)("h2",{id:"request-vpn-accounts-for-users"},"Request VPN accounts for users"),(0,i.yg)("p",null,"If the users are ",(0,i.yg)("strong",{parentName:"p"},"students")," from Maastricht University, ",(0,i.yg)("strong",{parentName:"p"},"or not")," from Maastricht University (without an email @maastrichtuniversity.nl, or @maastro.nl), you will need to contact the ICT support of your department to request the creation of accounts so that your users can connect to the UM VPN."),(0,i.yg)("p",null,"At FSE, you will need to send an email to ",(0,i.yg)("a",{parentName:"p",href:"mailto:lo-fse@maastrichtuniversity.nl"},"lo-fse@maastrichtuniversity.nl")," and ",(0,i.yg)("a",{parentName:"p",href:"mailto:DSRI-SUPPORT-L@maastrichtuniversity.nl"},"DSRI-SUPPORT-L@maastrichtuniversity.nl")," with the following information:"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},"Emails of the users"),(0,i.yg)("li",{parentName:"ul"},"Why they need access to the DSRI (provide the ID of the course at Maastricht University if it is for a course)"),(0,i.yg)("li",{parentName:"ul"},"Until which date the users will need those VPN accounts")),(0,i.yg)("h2",{id:"fill-a-form"},"Fill a form"),(0,i.yg)("p",null,"Fill this ",(0,i.yg)("a",{parentName:"p",href:"/register"},"form \ud83d\udcec")," to give us more details on your project (you don't need to do it if you have already filled it in the past)."),(0,i.yg)("h2",{id:"prepare-you-workshop"},"Prepare you workshop"),(0,i.yg)("p",null,"Use the ",(0,i.yg)("a",{parentName:"p",href:"/docs/access-dsri"},"DSRI documentation")," to explain to your users how to access the DSRI."),(0,i.yg)("h3",{id:"publish-an-image-for-your-training"},"Publish an image for your training"),(0,i.yg)("p",null,"Feel free to use the existing templates for JupyterLab, RStudio, or Visual Studio Code in the DSRI catalog."),(0,i.yg)("p",null,"You can easily reuse our images to adapt it to your training need and install all required dependencies:"),(0,i.yg)("ul",null,(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/jupyterlab"},"https://github.com/MaastrichtU-IDS/jupyterlab")),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/rstudio"},"https://github.com/MaastrichtU-IDS/rstudio")),(0,i.yg)("li",{parentName:"ul"},(0,i.yg)("a",{parentName:"li",href:"https://github.com/MaastrichtU-IDS/code-server"},"https://github.com/MaastrichtU-IDS/code-server"))),(0,i.yg)("p",null,"Then you will just need to instruct your users to start an existing templates with your newly published image."),(0,i.yg)("p",null,"With the JupyterLab template you can also prepare a git repository to be cloned in the workspace as soon as they start it."),(0,i.yg)("p",null,"You can find some examples of python scripts with database to run on the DSRI in this repository: ",(0,i.yg)("a",{parentName:"p",href:"https://github.com/MaastrichtU-IDS/dsri-demo"},"https://github.com/MaastrichtU-IDS/dsri-demo")),(0,i.yg)("h3",{id:"show-your-users-how-to-start-a-workspace"},"Show your users how to start a workspace"),(0,i.yg)("p",null,"You can use this video showing how to start a RStudio workspace, the process is similar for JupyterLab and VisualStudio Code: ",(0,i.yg)("a",{parentName:"p",href:"https://www.youtube.com/watch?v=Y0BjotH1LiE"},"https://www.youtube.com/watch?v=Y0BjotH1LiE")),(0,i.yg)("p",null,"Otherwise just do it directly with them."))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/f931684d.db345f4e.js b/assets/js/f931684d.35ed4819.js similarity index 99% rename from assets/js/f931684d.db345f4e.js rename to assets/js/f931684d.35ed4819.js index da46a892c..dd42239b7 100644 --- a/assets/js/f931684d.db345f4e.js +++ b/assets/js/f931684d.35ed4819.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[8850],{5680:(e,o,t)=>{t.d(o,{xA:()=>s,yg:()=>y});var r=t(6540);function n(e,o,t){return o in e?Object.defineProperty(e,o,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[o]=t,e}function a(e,o){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);o&&(r=r.filter((function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable}))),t.push.apply(t,r)}return t}function i(e){for(var o=1;o=0||(n[t]=e[t]);return n}(e,o);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(n[t]=e[t])}return n}var p=r.createContext({}),c=function(e){var o=r.useContext(p),t=o;return e&&(t="function"==typeof e?e(o):i(i({},o),e)),t},s=function(e){var o=c(e.components);return r.createElement(p.Provider,{value:o},e.children)},u={inlineCode:"code",wrapper:function(e){var o=e.children;return r.createElement(r.Fragment,{},o)}},d=r.forwardRef((function(e,o){var t=e.components,n=e.mdxType,a=e.originalType,p=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),d=c(t),y=n,m=d["".concat(p,".").concat(y)]||d[y]||u[y]||a;return t?r.createElement(m,i(i({ref:o},s),{},{components:t})):r.createElement(m,i({ref:o},s))}));function y(e,o){var t=arguments,n=o&&o.mdxType;if("string"==typeof e||n){var a=t.length,i=new Array(a);i[0]=d;var l={};for(var p in o)hasOwnProperty.call(o,p)&&(l[p]=o[p]);l.originalType=e,l.mdxType="string"==typeof e?e:n,i[1]=l;for(var c=2;c{t.r(o),t.d(o,{assets:()=>s,contentTitle:()=>p,default:()=>y,frontMatter:()=>l,metadata:()=>c,toc:()=>u});var r=t(9668),n=t(1367),a=(t(6540),t(5680)),i=["components"],l={id:"deploy-from-docker",title:"Deploy from a Docker image"},p=void 0,c={unversionedId:"deploy-from-docker",id:"deploy-from-docker",title:"Deploy from a Docker image",description:"The DSRI is an OpenShift OKD cluster, based on Kubernetes. It uses Docker containers to deploy services and applications in pods.",source:"@site/docs/deploy-from-docker.md",sourceDirName:".",slug:"/deploy-from-docker",permalink:"/docs/deploy-from-docker",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-from-docker.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-from-docker",title:"Deploy from a Docker image"},sidebar:"docs",previous:{title:"Deploy from a Dockerfile",permalink:"/docs/guide-dockerfile-to-openshift"},next:{title:"Anatomy of a DSRI application",permalink:"/docs/anatomy-of-an-application"}},s={},u=[{value:"Find an image for your service",id:"find-an-image-for-your-service",level:2},{value:"Deploy the image on DSRI",id:"deploy-the-image-on-dsri",level:2},{value:"Build and push a new Docker image",id:"build-and-push-a-new-docker-image",level:2},{value:"Define a Dockerfile",id:"define-a-dockerfile",level:3},{value:"Build the image",id:"build-the-image",level:3},{value:"Push to DockerHub",id:"push-to-dockerhub",level:3}],d={toc:u};function y(e){var o=e.components,t=(0,n.A)(e,i);return(0,a.yg)("wrapper",(0,r.A)({},d,t,{components:o,mdxType:"MDXLayout"}),(0,a.yg)("p",null,"The DSRI is an ",(0,a.yg)("a",{parentName:"p",href:"https://www.okd.io/"},"OpenShift OKD")," cluster, based on ",(0,a.yg)("a",{parentName:"p",href:"https://kubernetes.io/"},"Kubernetes"),". It uses ",(0,a.yg)("a",{parentName:"p",href:"https://www.docker.com"},"Docker containers")," to deploy services and applications in ",(0,a.yg)("strong",{parentName:"p"},"pods"),"."),(0,a.yg)("p",null,"Any service or job can be run in a Docker container. If you want to run a service in Python for example, you will find Docker images for Python. "),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"You can find already existing images for the service you want to run on DockerHub"),(0,a.yg)("li",{parentName:"ul"},"or create a custom Docker image in a few minutes. ")),(0,a.yg)("h2",{id:"find-an-image-for-your-service"},"Find an image for your service"),(0,a.yg)("p",null,"The easiest way to deploy a service on the DSRI is to use a Docker image from ",(0,a.yg)("a",{parentName:"p",href:"https://hub.docker.com/"},"DockerHub \ud83d\udc33"),"."),(0,a.yg)("p",null,"Search for an image for your service published on ",(0,a.yg)("a",{parentName:"p",href:"https://hub.docker.com/"},"DockerHub")),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("a",{parentName:"li",href:"https://www.google.com/search?q=dockerhub+python"},'Google "dockerhub my_service_name"')),(0,a.yg)("li",{parentName:"ul"},"Sometimes multiple images can be found for your service. Take the official image when possible, or the one most relevant to your use-case.")),(0,a.yg)("admonition",{title:"Deploy from a Dockerfile",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"If no suitable image can be found on ",(0,a.yg)("a",{parentName:"p",href:"https://hub.docker.com/"},"DockerHub"),", it can be ",(0,a.yg)("strong",{parentName:"p"},"deployed from a Dockerfile"),". See above to do so.")),(0,a.yg)("hr",null),(0,a.yg)("h2",{id:"deploy-the-image-on-dsri"},"Deploy the image on DSRI"),(0,a.yg)("p",null,"Once you have a Docker image for your application you can deploy it using the ",(0,a.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/console/projects"},"DSRI web UI"),"."),(0,a.yg)("p",null,"Go to the ",(0,a.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/console/projects"},"Overview page")," of your project."),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"Click the ",(0,a.yg)("strong",{parentName:"li"},"Add to Project")," button in top right corner > ",(0,a.yg)("strong",{parentName:"li"},"Deploy Image")),(0,a.yg)("li",{parentName:"ul"},"Select to deploy from ",(0,a.yg)("strong",{parentName:"li"},"Image Name"),(0,a.yg)("ul",{parentName:"li"},(0,a.yg)("li",{parentName:"ul"},"Provide your image name, e.g. ",(0,a.yg)("inlineCode",{parentName:"li"},"umdsri/freesurfer")),(0,a.yg)("li",{parentName:"ul"},"Eventually change the ",(0,a.yg)("strong",{parentName:"li"},"Name"),", it needs to be unique by project."),(0,a.yg)("li",{parentName:"ul"},"Click ",(0,a.yg)("strong",{parentName:"li"},"Deploy"),".")))),(0,a.yg)("img",{src:"/img/screenshot-deploy_image_from_ui.png",alt:"Deploy image from UI",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,a.yg)("admonition",{title:"Fix a common problem",type:"caution"},(0,a.yg)("p",{parentName:"admonition"},"Once the application is deployed it will most probably fail because it has not been optimized to work with OpenShift random user ID. You will need to add an entry to the deployment to enable your image to run using any user ID.")),(0,a.yg)("p",null,"Go to ",(0,a.yg)("strong",{parentName:"p"},"Topology"),", click on your application node, click on the ",(0,a.yg)("strong",{parentName:"p"},"Actions")," button of your application details, and ",(0,a.yg)("strong",{parentName:"p"},"Edit deployment"),". In the deployment YAML search for ",(0,a.yg)("inlineCode",{parentName:"p"},"spec:")," which has a ",(0,a.yg)("inlineCode",{parentName:"p"},"containers:")," as child, and add the following under ",(0,a.yg)("inlineCode",{parentName:"p"},"spec:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"},"spec:\n serviceAccountName: anyuid\n containers: ...\n")),(0,a.yg)("admonition",{title:"Access the application",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"You should now see your pod deployed on the ",(0,a.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/console/projects"},"Overview")," page of your project."),(0,a.yg)("p",{parentName:"admonition"},"You can expose routes to this pod in the ",(0,a.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/console/projects"},"Overview")," page: ",(0,a.yg)("strong",{parentName:"p"},"Create route"))),(0,a.yg)("hr",null),(0,a.yg)("h2",{id:"build-and-push-a-new-docker-image"},"Build and push a new Docker image"),(0,a.yg)("p",null,"In case you there is no Docker image for your application you can build and push one."),(0,a.yg)("p",null,"To build and push a Docker image you will need to have ",(0,a.yg)("a",{parentName:"p",href:"https://docs.docker.com/get-docker/"},"Docker installed"),"."),(0,a.yg)("admonition",{title:"Install Docker",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"See the ",(0,a.yg)("a",{parentName:"p",href:"https://docs.docker.com/get-docker/"},"official documentation to install Docker"),".")),(0,a.yg)("h3",{id:"define-a-dockerfile"},"Define a Dockerfile"),(0,a.yg)("p",null,"If no images are available on DockerHub, it is still possible that the developers created the ",(0,a.yg)("a",{parentName:"p",href:"https://docs.docker.com/engine/reference/builder/"},"Dockerfile to build the image")," without pushing it to DockerHub. Go to the GitHub/GitLab source code repository and search for a ",(0,a.yg)("inlineCode",{parentName:"p"},"Dockerfile"),", it can usually be found in"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"the source code repository root folder"),(0,a.yg)("li",{parentName:"ul"},"a ",(0,a.yg)("inlineCode",{parentName:"li"},"docker")," subfolder"),(0,a.yg)("li",{parentName:"ul"},"as instructions in the ",(0,a.yg)("inlineCode",{parentName:"li"},"README.md"))),(0,a.yg)("p",null,"If no ",(0,a.yg)("inlineCode",{parentName:"p"},"Dockerfile")," are available we will need to define one. "),(0,a.yg)("admonition",{title:"Contact us",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"Feel free to ",(0,a.yg)("a",{parentName:"p",href:"/help"},"contact us")," to get help with this, especially if you are unfamiliar with ",(0,a.yg)("a",{parentName:"p",href:"https://docs.docker.com/get-started/"},"Docker"),".")),(0,a.yg)("h3",{id:"build-the-image"},"Build the image"),(0,a.yg)("p",null,"Once a Dockerfile has been defined for the service you can build it by running the following command from the source code root folder, where the Dockerfile is:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"docker build -t username/my-service .\n")),(0,a.yg)("p",null,"Arguments can be provided when starting the build, they need to be defined in the Dockerfile to be used."),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"docker build -t username/my-service --build-args MY_ARG=my_value .\n")),(0,a.yg)("h3",{id:"push-to-dockerhub"},"Push to DockerHub"),(0,a.yg)("p",null,"Before pushing it to DockerHub you will need to create a repository. To do so, click on ",(0,a.yg)("strong",{parentName:"p"},(0,a.yg)("a",{parentName:"strong",href:"https://hub.docker.com/repository/create"},"Create Repository")),"."),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"DockerHub is free for public repositories"),(0,a.yg)("li",{parentName:"ul"},"Images can be published under your DockerHub user or an organization you belong to")),(0,a.yg)("p",null,"Login to DockerHub, if not already done:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"docker login\n")),(0,a.yg)("p",null,"Push the image previously built to DockerHub:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"docker push username/my-service\n")),(0,a.yg)("p",null,"You can link DockerHub to your source code repository and ask it to build the Docker image automatically (from the Dockerfile in the root folder). It should take between 10 and 30min for DockerHub to build your image"),(0,a.yg)("admonition",{title:"Deploy from a local Dockerfile",type:"tip"},(0,a.yg)("p",{parentName:"admonition"},"You can also deploy a service on the DSRI directly from a local ",(0,a.yg)("inlineCode",{parentName:"p"},"Dockerfile"),", to avoid using DockerHub. See ",(0,a.yg)("a",{parentName:"p",href:"/docs/guide-dockerfile-to-openshift"},"this page to deploy a service from a local Dockerfile")," for more instructions")))}y.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[8850],{5680:(e,o,t)=>{t.d(o,{xA:()=>s,yg:()=>y});var r=t(6540);function n(e,o,t){return o in e?Object.defineProperty(e,o,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[o]=t,e}function a(e,o){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);o&&(r=r.filter((function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable}))),t.push.apply(t,r)}return t}function i(e){for(var o=1;o=0||(n[t]=e[t]);return n}(e,o);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(n[t]=e[t])}return n}var p=r.createContext({}),c=function(e){var o=r.useContext(p),t=o;return e&&(t="function"==typeof e?e(o):i(i({},o),e)),t},s=function(e){var o=c(e.components);return r.createElement(p.Provider,{value:o},e.children)},u={inlineCode:"code",wrapper:function(e){var o=e.children;return r.createElement(r.Fragment,{},o)}},d=r.forwardRef((function(e,o){var t=e.components,n=e.mdxType,a=e.originalType,p=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),d=c(t),y=n,m=d["".concat(p,".").concat(y)]||d[y]||u[y]||a;return t?r.createElement(m,i(i({ref:o},s),{},{components:t})):r.createElement(m,i({ref:o},s))}));function y(e,o){var t=arguments,n=o&&o.mdxType;if("string"==typeof e||n){var a=t.length,i=new Array(a);i[0]=d;var l={};for(var p in o)hasOwnProperty.call(o,p)&&(l[p]=o[p]);l.originalType=e,l.mdxType="string"==typeof e?e:n,i[1]=l;for(var c=2;c{t.r(o),t.d(o,{assets:()=>s,contentTitle:()=>p,default:()=>y,frontMatter:()=>l,metadata:()=>c,toc:()=>u});var r=t(9668),n=t(1367),a=(t(6540),t(5680)),i=["components"],l={id:"deploy-from-docker",title:"Deploy from a Docker image"},p=void 0,c={unversionedId:"deploy-from-docker",id:"deploy-from-docker",title:"Deploy from a Docker image",description:"The DSRI is an OpenShift OKD cluster, based on Kubernetes. It uses Docker containers to deploy services and applications in pods.",source:"@site/docs/deploy-from-docker.md",sourceDirName:".",slug:"/deploy-from-docker",permalink:"/docs/deploy-from-docker",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/deploy-from-docker.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"deploy-from-docker",title:"Deploy from a Docker image"},sidebar:"docs",previous:{title:"Deploy from a Dockerfile",permalink:"/docs/guide-dockerfile-to-openshift"},next:{title:"Anatomy of a DSRI application",permalink:"/docs/anatomy-of-an-application"}},s={},u=[{value:"Find an image for your service",id:"find-an-image-for-your-service",level:2},{value:"Deploy the image on DSRI",id:"deploy-the-image-on-dsri",level:2},{value:"Build and push a new Docker image",id:"build-and-push-a-new-docker-image",level:2},{value:"Define a Dockerfile",id:"define-a-dockerfile",level:3},{value:"Build the image",id:"build-the-image",level:3},{value:"Push to DockerHub",id:"push-to-dockerhub",level:3}],d={toc:u};function y(e){var o=e.components,t=(0,n.A)(e,i);return(0,a.yg)("wrapper",(0,r.A)({},d,t,{components:o,mdxType:"MDXLayout"}),(0,a.yg)("p",null,"The DSRI is an ",(0,a.yg)("a",{parentName:"p",href:"https://www.okd.io/"},"OpenShift OKD")," cluster, based on ",(0,a.yg)("a",{parentName:"p",href:"https://kubernetes.io/"},"Kubernetes"),". It uses ",(0,a.yg)("a",{parentName:"p",href:"https://www.docker.com"},"Docker containers")," to deploy services and applications in ",(0,a.yg)("strong",{parentName:"p"},"pods"),"."),(0,a.yg)("p",null,"Any service or job can be run in a Docker container. If you want to run a service in Python for example, you will find Docker images for Python. "),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"You can find already existing images for the service you want to run on DockerHub"),(0,a.yg)("li",{parentName:"ul"},"or create a custom Docker image in a few minutes. ")),(0,a.yg)("h2",{id:"find-an-image-for-your-service"},"Find an image for your service"),(0,a.yg)("p",null,"The easiest way to deploy a service on the DSRI is to use a Docker image from ",(0,a.yg)("a",{parentName:"p",href:"https://hub.docker.com/"},"DockerHub \ud83d\udc33"),"."),(0,a.yg)("p",null,"Search for an image for your service published on ",(0,a.yg)("a",{parentName:"p",href:"https://hub.docker.com/"},"DockerHub")),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("a",{parentName:"li",href:"https://www.google.com/search?q=dockerhub+python"},'Google "dockerhub my_service_name"')),(0,a.yg)("li",{parentName:"ul"},"Sometimes multiple images can be found for your service. Take the official image when possible, or the one most relevant to your use-case.")),(0,a.yg)("admonition",{title:"Deploy from a Dockerfile",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"If no suitable image can be found on ",(0,a.yg)("a",{parentName:"p",href:"https://hub.docker.com/"},"DockerHub"),", it can be ",(0,a.yg)("strong",{parentName:"p"},"deployed from a Dockerfile"),". See above to do so.")),(0,a.yg)("hr",null),(0,a.yg)("h2",{id:"deploy-the-image-on-dsri"},"Deploy the image on DSRI"),(0,a.yg)("p",null,"Once you have a Docker image for your application you can deploy it using the ",(0,a.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/console/projects"},"DSRI web UI"),"."),(0,a.yg)("p",null,"Go to the ",(0,a.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/console/projects"},"Overview page")," of your project."),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"Click the ",(0,a.yg)("strong",{parentName:"li"},"Add to Project")," button in top right corner > ",(0,a.yg)("strong",{parentName:"li"},"Deploy Image")),(0,a.yg)("li",{parentName:"ul"},"Select to deploy from ",(0,a.yg)("strong",{parentName:"li"},"Image Name"),(0,a.yg)("ul",{parentName:"li"},(0,a.yg)("li",{parentName:"ul"},"Provide your image name, e.g. ",(0,a.yg)("inlineCode",{parentName:"li"},"umdsri/freesurfer")),(0,a.yg)("li",{parentName:"ul"},"Eventually change the ",(0,a.yg)("strong",{parentName:"li"},"Name"),", it needs to be unique by project."),(0,a.yg)("li",{parentName:"ul"},"Click ",(0,a.yg)("strong",{parentName:"li"},"Deploy"),".")))),(0,a.yg)("img",{src:"/img/screenshot-deploy_image_from_ui.png",alt:"Deploy image from UI",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,a.yg)("admonition",{title:"Fix a common problem",type:"caution"},(0,a.yg)("p",{parentName:"admonition"},"Once the application is deployed it will most probably fail because it has not been optimized to work with OpenShift random user ID. You will need to add an entry to the deployment to enable your image to run using any user ID.")),(0,a.yg)("p",null,"Go to ",(0,a.yg)("strong",{parentName:"p"},"Topology"),", click on your application node, click on the ",(0,a.yg)("strong",{parentName:"p"},"Actions")," button of your application details, and ",(0,a.yg)("strong",{parentName:"p"},"Edit deployment"),". In the deployment YAML search for ",(0,a.yg)("inlineCode",{parentName:"p"},"spec:")," which has a ",(0,a.yg)("inlineCode",{parentName:"p"},"containers:")," as child, and add the following under ",(0,a.yg)("inlineCode",{parentName:"p"},"spec:")),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-yaml"},"spec:\n serviceAccountName: anyuid\n containers: ...\n")),(0,a.yg)("admonition",{title:"Access the application",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"You should now see your pod deployed on the ",(0,a.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/console/projects"},"Overview")," page of your project."),(0,a.yg)("p",{parentName:"admonition"},"You can expose routes to this pod in the ",(0,a.yg)("a",{parentName:"p",href:"https://console-openshift-console.apps.dsri2.unimaas.nl/console/projects"},"Overview")," page: ",(0,a.yg)("strong",{parentName:"p"},"Create route"))),(0,a.yg)("hr",null),(0,a.yg)("h2",{id:"build-and-push-a-new-docker-image"},"Build and push a new Docker image"),(0,a.yg)("p",null,"In case you there is no Docker image for your application you can build and push one."),(0,a.yg)("p",null,"To build and push a Docker image you will need to have ",(0,a.yg)("a",{parentName:"p",href:"https://docs.docker.com/get-docker/"},"Docker installed"),"."),(0,a.yg)("admonition",{title:"Install Docker",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"See the ",(0,a.yg)("a",{parentName:"p",href:"https://docs.docker.com/get-docker/"},"official documentation to install Docker"),".")),(0,a.yg)("h3",{id:"define-a-dockerfile"},"Define a Dockerfile"),(0,a.yg)("p",null,"If no images are available on DockerHub, it is still possible that the developers created the ",(0,a.yg)("a",{parentName:"p",href:"https://docs.docker.com/engine/reference/builder/"},"Dockerfile to build the image")," without pushing it to DockerHub. Go to the GitHub/GitLab source code repository and search for a ",(0,a.yg)("inlineCode",{parentName:"p"},"Dockerfile"),", it can usually be found in"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"the source code repository root folder"),(0,a.yg)("li",{parentName:"ul"},"a ",(0,a.yg)("inlineCode",{parentName:"li"},"docker")," subfolder"),(0,a.yg)("li",{parentName:"ul"},"as instructions in the ",(0,a.yg)("inlineCode",{parentName:"li"},"README.md"))),(0,a.yg)("p",null,"If no ",(0,a.yg)("inlineCode",{parentName:"p"},"Dockerfile")," are available we will need to define one. "),(0,a.yg)("admonition",{title:"Contact us",type:"info"},(0,a.yg)("p",{parentName:"admonition"},"Feel free to ",(0,a.yg)("a",{parentName:"p",href:"/help"},"contact us")," to get help with this, especially if you are unfamiliar with ",(0,a.yg)("a",{parentName:"p",href:"https://docs.docker.com/get-started/"},"Docker"),".")),(0,a.yg)("h3",{id:"build-the-image"},"Build the image"),(0,a.yg)("p",null,"Once a Dockerfile has been defined for the service you can build it by running the following command from the source code root folder, where the Dockerfile is:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"docker build -t username/my-service .\n")),(0,a.yg)("p",null,"Arguments can be provided when starting the build, they need to be defined in the Dockerfile to be used."),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"docker build -t username/my-service --build-args MY_ARG=my_value .\n")),(0,a.yg)("h3",{id:"push-to-dockerhub"},"Push to DockerHub"),(0,a.yg)("p",null,"Before pushing it to DockerHub you will need to create a repository. To do so, click on ",(0,a.yg)("strong",{parentName:"p"},(0,a.yg)("a",{parentName:"strong",href:"https://hub.docker.com/repository/create"},"Create Repository")),"."),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},"DockerHub is free for public repositories"),(0,a.yg)("li",{parentName:"ul"},"Images can be published under your DockerHub user or an organization you belong to")),(0,a.yg)("p",null,"Login to DockerHub, if not already done:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"docker login\n")),(0,a.yg)("p",null,"Push the image previously built to DockerHub:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-shell"},"docker push username/my-service\n")),(0,a.yg)("p",null,"You can link DockerHub to your source code repository and ask it to build the Docker image automatically (from the Dockerfile in the root folder). It should take between 10 and 30min for DockerHub to build your image"),(0,a.yg)("admonition",{title:"Deploy from a local Dockerfile",type:"tip"},(0,a.yg)("p",{parentName:"admonition"},"You can also deploy a service on the DSRI directly from a local ",(0,a.yg)("inlineCode",{parentName:"p"},"Dockerfile"),", to avoid using DockerHub. See ",(0,a.yg)("a",{parentName:"p",href:"/docs/guide-dockerfile-to-openshift"},"this page to deploy a service from a local Dockerfile")," for more instructions")))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/f97cc2c7.2c1cd784.js b/assets/js/f97cc2c7.0cec9eeb.js similarity index 99% rename from assets/js/f97cc2c7.2c1cd784.js rename to assets/js/f97cc2c7.0cec9eeb.js index 0b04fd043..a25299406 100644 --- a/assets/js/f97cc2c7.2c1cd784.js +++ b/assets/js/f97cc2c7.0cec9eeb.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5167],{5680:(e,n,t)=>{t.d(n,{xA:()=>p,yg:()=>d});var o=t(6540);function i(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function a(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function r(e){for(var n=1;n=0||(i[t]=e[t]);return i}(e,n);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(i[t]=e[t])}return i}var c=o.createContext({}),s=function(e){var n=o.useContext(c),t=n;return e&&(t="function"==typeof e?e(n):r(r({},n),e)),t},p=function(e){var n=s(e.components);return o.createElement(c.Provider,{value:n},e.children)},h={inlineCode:"code",wrapper:function(e){var n=e.children;return o.createElement(o.Fragment,{},n)}},m=o.forwardRef((function(e,n){var t=e.components,i=e.mdxType,a=e.originalType,c=e.parentName,p=l(e,["components","mdxType","originalType","parentName"]),m=s(t),d=i,u=m["".concat(c,".").concat(d)]||m[d]||h[d]||a;return t?o.createElement(u,r(r({ref:n},p),{},{components:t})):o.createElement(u,r({ref:n},p))}));function d(e,n){var t=arguments,i=n&&n.mdxType;if("string"==typeof e||i){var a=t.length,r=new Array(a);r[0]=m;var l={};for(var c in n)hasOwnProperty.call(n,c)&&(l[c]=n[c]);l.originalType=e,l.mdxType="string"==typeof e?e:i,r[1]=l;for(var s=2;s{t.r(n),t.d(n,{assets:()=>p,contentTitle:()=>c,default:()=>d,frontMatter:()=>l,metadata:()=>s,toc:()=>h});var o=t(9668),i=t(1367),a=(t(6540),t(5680)),r=["components"],l={id:"checkpointing-ml-training",title:"Checkpointing Machine Learning Training"},c=void 0,s={unversionedId:"checkpointing-ml-training",id:"checkpointing-ml-training",title:"Checkpointing Machine Learning Training",description:"What is Checkpointing?",source:"@site/docs/checkpointing-ml-training-models.md",sourceDirName:".",slug:"/checkpointing-ml-training",permalink:"/docs/checkpointing-ml-training",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/checkpointing-ml-training-models.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"checkpointing-ml-training",title:"Checkpointing Machine Learning Training"},sidebar:"docs",previous:{title:"Access UM servers",permalink:"/docs/access-um-servers"},next:{title:"Parallelization using Dask",permalink:"/docs/dask-tutorial"}},p={},h=[{value:"What is Checkpointing?",id:"what-is-checkpointing",level:2},{value:"Checkpointing fequency?",id:"checkpointing-fequency",level:2},{value:"Support for Checkpointing in Tensorflow/Keras and PyTorch ?",id:"support-for-checkpointing-in-tensorflowkeras-and-pytorch-",level:2},{value:"Example of Tensorflow/Keras based checkpointing:",id:"example-of-tensorflowkeras-based-checkpointing",level:2},{value:"Example of PyTorch based checkpointing:",id:"example-of-pytorch-based-checkpointing",level:2},{value:"External Resources",id:"external-resources",level:2}],m={toc:h};function d(e){var n=e.components,t=(0,i.A)(e,r);return(0,a.yg)("wrapper",(0,o.A)({},m,t,{components:n,mdxType:"MDXLayout"}),(0,a.yg)("h2",{id:"what-is-checkpointing"},"What is Checkpointing?"),(0,a.yg)("p",null,"Checkpointing is periodically saving the learned model parameters and current hyperparameter values during training. It helps to resume training of a model where you left off, instead of restarting the training from the beginning."),(0,a.yg)("p",null,"On shared DSRI cluster, you might have access to a GPU node for a limited number of time in one stretch, for example, maybe for 24 hours.\nTherefore, whenever the training job fails (due to timelimit expiry or otherwise), many hours of training can be lost. This problem is mitigated by a frequent checkpoint saving. When the training is resumed it'll continue from the last checkpoint saved. If the failure occurred 12 hours after the last checkpoint has been saved, 12 hours of training is lost and needs to be re-done. This can be very expensive."),(0,a.yg)("h2",{id:"checkpointing-fequency"},"Checkpointing fequency?"),(0,a.yg)("p",null,"In theory one could save a checkpoint every 10 minutes and only ever lose 10 minutes of training time, but this too would dramatically delay the reaching of the finish line because large models can't be saved quickly and if the saving time starts to create a bottleneck for the training this approach becomes counterproductive."),(0,a.yg)("p",null,"Depending on your checkpointing methodology and the speed of your IO storage partition the saving of a large model can take from dozens of seconds to several minutes. Therefore, the optimal approach to saving frequency lies somewhere in the middle."),(0,a.yg)("p",null,"The math is quite simple - measure the amount of time it takes to save the checkpoint, multiply it by how many times you'd want to save it and see how much of an additional delay the checkpoint saving will contribute to the total training time."),(0,a.yg)("p",null,"For instance, Let suppose, "),(0,a.yg)("p",null,"1) Training Time (TT), i.e. allocated time on cluster : x days\n2) Time needed to save every checkpoint: y seconds\n3) Checkpoint fequencty: every z hours"),(0,a.yg)("p",null,"=> Then, Total Number of Checkpoints during the complete training time (NCP) = (x *24)/ z"),(0,a.yg)("p",null,"=> Total Time Spent on Checkpointing (TTSC) ","[in hours]"," = NCP * y/3600 "),(0,a.yg)("p",null,"=> % of Training time spent on checkpointing = (TTSC/TT",(0,a.yg)("em",{parentName:"p"},"24) ")," 100"),(0,a.yg)("p",null,"------------------Example calculations------------------------------------"),(0,a.yg)("p",null,"Training Time (TT or x): 7 days"),(0,a.yg)("p",null,"Time needed to save every checkpoint (y): 20 secs"),(0,a.yg)("p",null,"Checkpoint fequency (z): every 30 minutes, i.e., 0.5 hours"),(0,a.yg)("p",null,"Then, "),(0,a.yg)("p",null,"NCP = 7*24/0.5 = 336"),(0,a.yg)("p",null,"TTSC = 336* 20/3600 = 1.87 hours"),(0,a.yg)("p",null,"% of Training time spent on checkpointing = (1.87/7",(0,a.yg)("em",{parentName:"p"},"24)"),"100 ~ 1.2 % "),(0,a.yg)("h2",{id:"support-for-checkpointing-in-tensorflowkeras-and-pytorch-"},"Support for Checkpointing in Tensorflow/Keras and PyTorch ?"),(0,a.yg)("p",null,"Both PyTorch and TensorFlow/Keras support checkpointing. The follwoing sections provide an example of how Checkpointing can be done in these libraries."),(0,a.yg)("h2",{id:"example-of-tensorflowkeras-based-checkpointing"},"Example of Tensorflow/Keras based checkpointing:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-python"},"import tensorflow as tf\n\n#Imports the ModelCheckpoint class\nfrom tensorflow.keras.callbacks import ModelCheckpoint\n\n# Create your model as you normally would and compile it:\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Dense(64, activation='relu', input_shape=(32,)),\n tf.keras.layers.Dense(10, activation='softmax')\n])\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n# Create a Checkpoint Callback\ncheckpoint_callback = ModelCheckpoint(\n#filepath should be a path to your persistent volume. Example, /home/jovyan path in your JupyterLab pod.\n filepath='model_checkpoint.h5', # You can use formats like .hdf5 or .ckpt. \n save_best_only=True,\n monitor='val_loss',\n mode='min',\n verbose=1\n)\n\n# Train the Model with the Checkpoint Callback\nhistory = model.fit(\n x_train, y_train,\n validation_data=(x_val, y_val),\n epochs=10,\n callbacks=[checkpoint_callback]\n)\n\n# Loading a Saved Checkpoint\n# Load the model architecture + weights if you saved the full model\nmodel = tf.keras.models.load_model('model_checkpoint.h5')\n\n# If you saved only the weights, you would need to create the model architecture first, then load weights:\nmodel.load_weights('model_checkpoint.h5')\n\n# Optional Parameters for Checkpointing, Example with Custom Save Intervals\ncheckpoint_callback = ModelCheckpoint(\n filepath='model_checkpoint_epoch_{epoch:02d}.h5',\n save_freq='epoch',\n save_weights_only=True,\n verbose=1\n)\n\n\n")),(0,a.yg)("h2",{id:"example-of-pytorch-based-checkpointing"},"Example of PyTorch based checkpointing:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-python"},"import torch\n\n# Example model\nmodel = torch.nn.Linear(10, 2)\n\n# Save the entire model\ntorch.save(model, 'model.pth')\n\n# Loading the Entire Model\nmodel = torch.load('model.pth')\n\n# Saving and Loading Optimizer State, i.e., To continue training exactly as before, you may want to save the optimizer state as well.\n\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01)\n\n# Save model and optimizer state_dicts\ncheckpoint = {\n 'epoch': 5,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': 0.5,\n}\ntorch.save(checkpoint, 'checkpoint.pth')\n\n# Load checkpoint\ncheckpoint = torch.load('checkpoint.pth')\nmodel.load_state_dict(checkpoint['model_state_dict'])\noptimizer.load_state_dict(checkpoint['optimizer_state_dict'])\nepoch = checkpoint['epoch']\nloss = checkpoint['loss']\nmodel.train() # Ensure model is in training mode if needed\n\n\n\n\n\n")),(0,a.yg)("h2",{id:"external-resources"},"External Resources"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"PyTorch Documentation: ",(0,a.yg)("a",{parentName:"p",href:"https://pytorch.org/tutorials/beginner/saving_loading_models.html#save-on-gpu-load-on-cpu"},"https://pytorch.org/tutorials/beginner/saving_loading_models.html#save-on-gpu-load-on-cpu"))),(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"Tensorflow/Keras Documentation:"),(0,a.yg)("p",{parentName:"li"},(0,a.yg)("a",{parentName:"p",href:"https://www.digitalocean.com/community/tutorials/checkpointing-in-tensorflow"},"https://www.digitalocean.com/community/tutorials/checkpointing-in-tensorflow")),(0,a.yg)("p",{parentName:"li"},(0,a.yg)("a",{parentName:"p",href:"https://keras.io/api/callbacks/model_checkpoint/"},"https://keras.io/api/callbacks/model_checkpoint/"))),(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"Machine Learning Engineering by stas bekman:\n",(0,a.yg)("a",{parentName:"p",href:"https://stasosphere.com/machine-learning/"},"https://stasosphere.com/machine-learning/")))))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[5167],{5680:(e,n,t)=>{t.d(n,{xA:()=>p,yg:()=>d});var o=t(6540);function i(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function a(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function r(e){for(var n=1;n=0||(i[t]=e[t]);return i}(e,n);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(i[t]=e[t])}return i}var c=o.createContext({}),s=function(e){var n=o.useContext(c),t=n;return e&&(t="function"==typeof e?e(n):r(r({},n),e)),t},p=function(e){var n=s(e.components);return o.createElement(c.Provider,{value:n},e.children)},h={inlineCode:"code",wrapper:function(e){var n=e.children;return o.createElement(o.Fragment,{},n)}},m=o.forwardRef((function(e,n){var t=e.components,i=e.mdxType,a=e.originalType,c=e.parentName,p=l(e,["components","mdxType","originalType","parentName"]),m=s(t),d=i,u=m["".concat(c,".").concat(d)]||m[d]||h[d]||a;return t?o.createElement(u,r(r({ref:n},p),{},{components:t})):o.createElement(u,r({ref:n},p))}));function d(e,n){var t=arguments,i=n&&n.mdxType;if("string"==typeof e||i){var a=t.length,r=new Array(a);r[0]=m;var l={};for(var c in n)hasOwnProperty.call(n,c)&&(l[c]=n[c]);l.originalType=e,l.mdxType="string"==typeof e?e:i,r[1]=l;for(var s=2;s{t.r(n),t.d(n,{assets:()=>p,contentTitle:()=>c,default:()=>d,frontMatter:()=>l,metadata:()=>s,toc:()=>h});var o=t(9668),i=t(1367),a=(t(6540),t(5680)),r=["components"],l={id:"checkpointing-ml-training",title:"Checkpointing Machine Learning Training"},c=void 0,s={unversionedId:"checkpointing-ml-training",id:"checkpointing-ml-training",title:"Checkpointing Machine Learning Training",description:"What is Checkpointing?",source:"@site/docs/checkpointing-ml-training-models.md",sourceDirName:".",slug:"/checkpointing-ml-training",permalink:"/docs/checkpointing-ml-training",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/checkpointing-ml-training-models.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"checkpointing-ml-training",title:"Checkpointing Machine Learning Training"},sidebar:"docs",previous:{title:"Access UM servers",permalink:"/docs/access-um-servers"},next:{title:"Parallelization using Dask",permalink:"/docs/dask-tutorial"}},p={},h=[{value:"What is Checkpointing?",id:"what-is-checkpointing",level:2},{value:"Checkpointing fequency?",id:"checkpointing-fequency",level:2},{value:"Support for Checkpointing in Tensorflow/Keras and PyTorch ?",id:"support-for-checkpointing-in-tensorflowkeras-and-pytorch-",level:2},{value:"Example of Tensorflow/Keras based checkpointing:",id:"example-of-tensorflowkeras-based-checkpointing",level:2},{value:"Example of PyTorch based checkpointing:",id:"example-of-pytorch-based-checkpointing",level:2},{value:"External Resources",id:"external-resources",level:2}],m={toc:h};function d(e){var n=e.components,t=(0,i.A)(e,r);return(0,a.yg)("wrapper",(0,o.A)({},m,t,{components:n,mdxType:"MDXLayout"}),(0,a.yg)("h2",{id:"what-is-checkpointing"},"What is Checkpointing?"),(0,a.yg)("p",null,"Checkpointing is periodically saving the learned model parameters and current hyperparameter values during training. It helps to resume training of a model where you left off, instead of restarting the training from the beginning."),(0,a.yg)("p",null,"On shared DSRI cluster, you might have access to a GPU node for a limited number of time in one stretch, for example, maybe for 24 hours.\nTherefore, whenever the training job fails (due to timelimit expiry or otherwise), many hours of training can be lost. This problem is mitigated by a frequent checkpoint saving. When the training is resumed it'll continue from the last checkpoint saved. If the failure occurred 12 hours after the last checkpoint has been saved, 12 hours of training is lost and needs to be re-done. This can be very expensive."),(0,a.yg)("h2",{id:"checkpointing-fequency"},"Checkpointing fequency?"),(0,a.yg)("p",null,"In theory one could save a checkpoint every 10 minutes and only ever lose 10 minutes of training time, but this too would dramatically delay the reaching of the finish line because large models can't be saved quickly and if the saving time starts to create a bottleneck for the training this approach becomes counterproductive."),(0,a.yg)("p",null,"Depending on your checkpointing methodology and the speed of your IO storage partition the saving of a large model can take from dozens of seconds to several minutes. Therefore, the optimal approach to saving frequency lies somewhere in the middle."),(0,a.yg)("p",null,"The math is quite simple - measure the amount of time it takes to save the checkpoint, multiply it by how many times you'd want to save it and see how much of an additional delay the checkpoint saving will contribute to the total training time."),(0,a.yg)("p",null,"For instance, Let suppose, "),(0,a.yg)("p",null,"1) Training Time (TT), i.e. allocated time on cluster : x days\n2) Time needed to save every checkpoint: y seconds\n3) Checkpoint fequencty: every z hours"),(0,a.yg)("p",null,"=> Then, Total Number of Checkpoints during the complete training time (NCP) = (x *24)/ z"),(0,a.yg)("p",null,"=> Total Time Spent on Checkpointing (TTSC) ","[in hours]"," = NCP * y/3600 "),(0,a.yg)("p",null,"=> % of Training time spent on checkpointing = (TTSC/TT",(0,a.yg)("em",{parentName:"p"},"24) ")," 100"),(0,a.yg)("p",null,"------------------Example calculations------------------------------------"),(0,a.yg)("p",null,"Training Time (TT or x): 7 days"),(0,a.yg)("p",null,"Time needed to save every checkpoint (y): 20 secs"),(0,a.yg)("p",null,"Checkpoint fequency (z): every 30 minutes, i.e., 0.5 hours"),(0,a.yg)("p",null,"Then, "),(0,a.yg)("p",null,"NCP = 7*24/0.5 = 336"),(0,a.yg)("p",null,"TTSC = 336* 20/3600 = 1.87 hours"),(0,a.yg)("p",null,"% of Training time spent on checkpointing = (1.87/7",(0,a.yg)("em",{parentName:"p"},"24)"),"100 ~ 1.2 % "),(0,a.yg)("h2",{id:"support-for-checkpointing-in-tensorflowkeras-and-pytorch-"},"Support for Checkpointing in Tensorflow/Keras and PyTorch ?"),(0,a.yg)("p",null,"Both PyTorch and TensorFlow/Keras support checkpointing. The follwoing sections provide an example of how Checkpointing can be done in these libraries."),(0,a.yg)("h2",{id:"example-of-tensorflowkeras-based-checkpointing"},"Example of Tensorflow/Keras based checkpointing:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-python"},"import tensorflow as tf\n\n#Imports the ModelCheckpoint class\nfrom tensorflow.keras.callbacks import ModelCheckpoint\n\n# Create your model as you normally would and compile it:\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Dense(64, activation='relu', input_shape=(32,)),\n tf.keras.layers.Dense(10, activation='softmax')\n])\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n# Create a Checkpoint Callback\ncheckpoint_callback = ModelCheckpoint(\n#filepath should be a path to your persistent volume. Example, /home/jovyan path in your JupyterLab pod.\n filepath='model_checkpoint.h5', # You can use formats like .hdf5 or .ckpt. \n save_best_only=True,\n monitor='val_loss',\n mode='min',\n verbose=1\n)\n\n# Train the Model with the Checkpoint Callback\nhistory = model.fit(\n x_train, y_train,\n validation_data=(x_val, y_val),\n epochs=10,\n callbacks=[checkpoint_callback]\n)\n\n# Loading a Saved Checkpoint\n# Load the model architecture + weights if you saved the full model\nmodel = tf.keras.models.load_model('model_checkpoint.h5')\n\n# If you saved only the weights, you would need to create the model architecture first, then load weights:\nmodel.load_weights('model_checkpoint.h5')\n\n# Optional Parameters for Checkpointing, Example with Custom Save Intervals\ncheckpoint_callback = ModelCheckpoint(\n filepath='model_checkpoint_epoch_{epoch:02d}.h5',\n save_freq='epoch',\n save_weights_only=True,\n verbose=1\n)\n\n\n")),(0,a.yg)("h2",{id:"example-of-pytorch-based-checkpointing"},"Example of PyTorch based checkpointing:"),(0,a.yg)("pre",null,(0,a.yg)("code",{parentName:"pre",className:"language-python"},"import torch\n\n# Example model\nmodel = torch.nn.Linear(10, 2)\n\n# Save the entire model\ntorch.save(model, 'model.pth')\n\n# Loading the Entire Model\nmodel = torch.load('model.pth')\n\n# Saving and Loading Optimizer State, i.e., To continue training exactly as before, you may want to save the optimizer state as well.\n\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01)\n\n# Save model and optimizer state_dicts\ncheckpoint = {\n 'epoch': 5,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': 0.5,\n}\ntorch.save(checkpoint, 'checkpoint.pth')\n\n# Load checkpoint\ncheckpoint = torch.load('checkpoint.pth')\nmodel.load_state_dict(checkpoint['model_state_dict'])\noptimizer.load_state_dict(checkpoint['optimizer_state_dict'])\nepoch = checkpoint['epoch']\nloss = checkpoint['loss']\nmodel.train() # Ensure model is in training mode if needed\n\n\n\n\n\n")),(0,a.yg)("h2",{id:"external-resources"},"External Resources"),(0,a.yg)("ul",null,(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"PyTorch Documentation: ",(0,a.yg)("a",{parentName:"p",href:"https://pytorch.org/tutorials/beginner/saving_loading_models.html#save-on-gpu-load-on-cpu"},"https://pytorch.org/tutorials/beginner/saving_loading_models.html#save-on-gpu-load-on-cpu"))),(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"Tensorflow/Keras Documentation:"),(0,a.yg)("p",{parentName:"li"},(0,a.yg)("a",{parentName:"p",href:"https://www.digitalocean.com/community/tutorials/checkpointing-in-tensorflow"},"https://www.digitalocean.com/community/tutorials/checkpointing-in-tensorflow")),(0,a.yg)("p",{parentName:"li"},(0,a.yg)("a",{parentName:"p",href:"https://keras.io/api/callbacks/model_checkpoint/"},"https://keras.io/api/callbacks/model_checkpoint/"))),(0,a.yg)("li",{parentName:"ul"},(0,a.yg)("p",{parentName:"li"},"Machine Learning Engineering by stas bekman:\n",(0,a.yg)("a",{parentName:"p",href:"https://stasosphere.com/machine-learning/"},"https://stasosphere.com/machine-learning/")))))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/fdea7475.78a5ce30.js b/assets/js/fdea7475.a3583034.js similarity index 99% rename from assets/js/fdea7475.78a5ce30.js rename to assets/js/fdea7475.a3583034.js index ea4e5f719..f837562bd 100644 --- a/assets/js/fdea7475.78a5ce30.js +++ b/assets/js/fdea7475.a3583034.js @@ -1 +1 @@ -"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[918],{5680:(e,t,r)=>{r.d(t,{xA:()=>u,yg:()=>f});var s=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function n(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(e);t&&(s=s.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,s)}return r}function o(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);for(s=0;s=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var l=s.createContext({}),c=function(e){var t=s.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):o(o({},t),e)),r},u=function(e){var t=c(e.components);return s.createElement(l.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return s.createElement(s.Fragment,{},t)}},p=s.forwardRef((function(e,t){var r=e.components,a=e.mdxType,n=e.originalType,l=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),p=c(r),f=a,h=p["".concat(l,".").concat(f)]||p[f]||d[f]||n;return r?s.createElement(h,o(o({ref:t},u),{},{components:r})):s.createElement(h,o({ref:t},u))}));function f(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var n=r.length,o=new Array(n);o[0]=p;var i={};for(var l in t)hasOwnProperty.call(t,l)&&(i[l]=t[l]);i.originalType=e,i.mdxType="string"==typeof e?e:a,o[1]=i;for(var c=2;c{r.r(t),r.d(t,{assets:()=>u,contentTitle:()=>l,default:()=>f,frontMatter:()=>i,metadata:()=>c,toc:()=>d});var s=r(9668),a=r(1367),n=(r(6540),r(5680)),o=["components"],i={id:"surf-offerings",title:"SURF Offerings"},l=void 0,c={unversionedId:"surf-offerings",id:"surf-offerings",title:"SURF Offerings",description:"SURF's Digital Services for Research and Development",source:"@site/docs/surf-offerings.md",sourceDirName:".",slug:"/surf-offerings",permalink:"/docs/surf-offerings",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/surf-offerings.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733328677,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"surf-offerings",title:"SURF Offerings"},sidebar:"docs",previous:{title:"Tensorflow Optimization",permalink:"/docs/speeding-tensorflow-dl"}},u={},d=[{value:"SURF's Digital Services for Research and Development",id:"surfs-digital-services-for-research-and-development",level:2},{value:"What is SURF?",id:"what-is-surf",level:2},{value:"What is a cluster computer?",id:"what-is-a-cluster-computer",level:2},{value:"Different types of Services provided by SURF:",id:"different-types-of-services-provided-by-surf",level:2},{value:"How to Get Started with SURF Services?",id:"how-to-get-started-with-surf-services",level:2},{value:"External Resources and references",id:"external-resources-and-references",level:2}],p={toc:d};function f(e){var t=e.components,r=(0,a.A)(e,o);return(0,n.yg)("wrapper",(0,s.A)({},p,r,{components:t,mdxType:"MDXLayout"}),(0,n.yg)("h2",{id:"surfs-digital-services-for-research-and-development"},"SURF's Digital Services for Research and Development"),(0,n.yg)("h2",{id:"what-is-surf"},"What is SURF?"),(0,n.yg)("p",null,"SURF is the ICT cooperative for Dutch education and research institutions. As a collaborative organization, SURF\u2019s members\u2014its owners\u2014work together to deliver top-tier digital services, address complex innovation challenges, and exchange valuable knowledge."),(0,n.yg)("p",null,"Computing and storage infrastructure are essential for cutting-edge research. SURF supports researchers with a diverse range of computing and storage services. But before diving into these services, let\u2019s briefly explore what a cluster computer is."),(0,n.yg)("h2",{id:"what-is-a-cluster-computer"},"What is a cluster computer?"),(0,n.yg)("p",null,"A cluster computer is essentially a group of interconnected computers, called nodes, working together as a unified system. Each node has its own CPU, memory, and disk space, along with access to a shared file system. Imagine these nodes connected by network cables, like those in your home or office."),(0,n.yg)("p",null,"Cluster computers are designed for high-performance workloads, allowing users to run hundreds of computational tasks simultaneously."),(0,n.yg)("img",{src:"/img/screenshot_cluster.png",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,n.yg)("h2",{id:"different-types-of-services-provided-by-surf"},"Different types of Services provided by SURF:"),(0,n.yg)("p",null,"Some of the computing and storage solution provided by SURF are:"),(0,n.yg)("p",null,"1) Spider Cluster - High-performance Data Processing (DP) platform:"),(0,n.yg)("p",null," Spider is a versatile DP platform aimed at processing large structured data sets. Spider is an in house compute cluster built on top of SURF\u2019s in-house elastic Cloud. This allows for scalable processing of many terabytes or even petabytes of data, utilizing many hundreds of cores simultaneously, in exceedingly short timespans. Superb network throughput ensures connectivity to external data storage systems. Spider is used for large scale multi-year data intensive projects, for users to actively process their data, such are large static data sets or continuously growing data sets. Examples include genomics data, astronomic telescope data, physics detector data and satellite earth observations."),(0,n.yg)("p",null,"2) Snellius Cluster - the Dutch National supercomputer:"),(0,n.yg)("p",null," Snellius is the Dutch National supercomputer hosted at SURF. The system facilitates scientific research carried out in many Universities, independent research institutes, governmental organizations, and private companies in the Netherlands. Snellius is a cluster of heterogeneous nodes built by Lenovo, containing predominantly AMD technology, with capabilities for high performance computing (parallel, symmetric multiprocessing). The system also has several system-specific storage resources, that are geared towards supporting the various types of computing."),(0,n.yg)("p",null,"3) SURF Research Cloud (SRC):"),(0,n.yg)("p",null,' SURF Research Cloud is a service to facilitate scientists\u2019 collaborative work. The central idea in SRC is collaborative workspace. A workspcae translates directly to a "Virtual Machine".\nThese hosted workspaces aka virtual machines can be used for conducting research and development individually or together with your team/project members. '),(0,n.yg)("p",null,"4) Research Data Storage Services:"),(0,n.yg)("p",null," 4.1) Data Archive : The SURF Data Archive allows users to safely archive up to petabytes of valuable research data to ensure the long term accessibility and reproducibility of their work. The Data Archive is also connected to SURF\u2019s compute infrastructure, via a fast network connection, allowing for the seamless depositing and retrieval of data."),(0,n.yg)("p",null," 4.2) Data Repository : The Data Repository service is a web-based data publication and archiving platform that allows researchers to store, annotate and publish research data to ensure long-term preservation and availability of their datasets. All published datasets get their own DOI and Handle, while every file gets its own independent Handle to allow persistent reference on all levels."),(0,n.yg)("p",null," 4.3) dCache : dCache is scalable storage system. It contains more than 50 petabytes of scientific data, accessible through several authentication methods and protocols. It consists of\nmagnetic tape storage and hard disk storage and both are addressed by a common file system."),(0,n.yg)("p",null," 4.4) Object Store : Object storage is ideal for storing unstructured data that can grow without bound. Object storage does not have a directory-type structure like a normal file system has\nbut it organises its data in so-called containers that contain objects. There is no tree-like structure with files and directories. There are only containers with objects in them. SURF Object Store service is based on Ceph RGW and provides access using the S3 protocol, which is the defacto standard for addressing object storage."),(0,n.yg)("h2",{id:"how-to-get-started-with-surf-services"},"How to Get Started with SURF Services?"),(0,n.yg)("p",null,"The DSRI team is here to help you navigate SURF\u2019s services, including:"),(0,n.yg)("p",null,"1) Grant Applications:",(0,n.yg)("br",{parentName:"p"}),"\n","We assist researchers in applying for SURF grants. For instance:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre"},"* Small applications: Up to 1 million System Billing Units (SBU) on Snellius and/or 100 TB of dCache storage.(https://www.surf.nl/en/small-compute-applications-nwo)\n* Large applications: Customized resource allocations based on project needs.\n")),(0,n.yg)("p",null,"2) Resource Estimation:",(0,n.yg)("br",{parentName:"p"}),"\n","Unsure about your computing and storage requirements? We help estimate your needs in terms of SURF\u2019s billing units."),(0,n.yg)("p",null,"3) Use Case Analysis:",(0,n.yg)("br",{parentName:"p"}),"\n","We assess whether your research project is a good fit for SURF\u2019s services."),(0,n.yg)("h2",{id:"external-resources-and-references"},"External Resources and references"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"SURF: ",(0,n.yg)("a",{parentName:"li",href:"https://www.surf.nl/en"},"https://www.surf.nl/en")),(0,n.yg)("li",{parentName:"ul"},"Deep Learning Tutorials by UvA: ",(0,n.yg)("a",{parentName:"li",href:"https://uvadlc-notebooks.readthedocs.io/en/latest/index.html"},"https://uvadlc-notebooks.readthedocs.io/en/latest/index.html"))))}f.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[918],{5680:(e,t,r)=>{r.d(t,{xA:()=>u,yg:()=>f});var s=r(6540);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function n(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(e);t&&(s=s.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,s)}return r}function o(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);for(s=0;s=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var l=s.createContext({}),c=function(e){var t=s.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):o(o({},t),e)),r},u=function(e){var t=c(e.components);return s.createElement(l.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return s.createElement(s.Fragment,{},t)}},p=s.forwardRef((function(e,t){var r=e.components,a=e.mdxType,n=e.originalType,l=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),p=c(r),f=a,h=p["".concat(l,".").concat(f)]||p[f]||d[f]||n;return r?s.createElement(h,o(o({ref:t},u),{},{components:r})):s.createElement(h,o({ref:t},u))}));function f(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var n=r.length,o=new Array(n);o[0]=p;var i={};for(var l in t)hasOwnProperty.call(t,l)&&(i[l]=t[l]);i.originalType=e,i.mdxType="string"==typeof e?e:a,o[1]=i;for(var c=2;c{r.r(t),r.d(t,{assets:()=>u,contentTitle:()=>l,default:()=>f,frontMatter:()=>i,metadata:()=>c,toc:()=>d});var s=r(9668),a=r(1367),n=(r(6540),r(5680)),o=["components"],i={id:"surf-offerings",title:"SURF Offerings"},l=void 0,c={unversionedId:"surf-offerings",id:"surf-offerings",title:"SURF Offerings",description:"SURF's Digital Services for Research and Development",source:"@site/docs/surf-offerings.md",sourceDirName:".",slug:"/surf-offerings",permalink:"/docs/surf-offerings",draft:!1,editUrl:"https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/surf-offerings.md",tags:[],version:"current",lastUpdatedBy:"Adekunle Onaopepo",lastUpdatedAt:1733329555,formattedLastUpdatedAt:"Dec 4, 2024",frontMatter:{id:"surf-offerings",title:"SURF Offerings"},sidebar:"docs",previous:{title:"Tensorflow Optimization",permalink:"/docs/speeding-tensorflow-dl"}},u={},d=[{value:"SURF's Digital Services for Research and Development",id:"surfs-digital-services-for-research-and-development",level:2},{value:"What is SURF?",id:"what-is-surf",level:2},{value:"What is a cluster computer?",id:"what-is-a-cluster-computer",level:2},{value:"Different types of Services provided by SURF:",id:"different-types-of-services-provided-by-surf",level:2},{value:"How to Get Started with SURF Services?",id:"how-to-get-started-with-surf-services",level:2},{value:"External Resources and references",id:"external-resources-and-references",level:2}],p={toc:d};function f(e){var t=e.components,r=(0,a.A)(e,o);return(0,n.yg)("wrapper",(0,s.A)({},p,r,{components:t,mdxType:"MDXLayout"}),(0,n.yg)("h2",{id:"surfs-digital-services-for-research-and-development"},"SURF's Digital Services for Research and Development"),(0,n.yg)("h2",{id:"what-is-surf"},"What is SURF?"),(0,n.yg)("p",null,"SURF is the ICT cooperative for Dutch education and research institutions. As a collaborative organization, SURF\u2019s members\u2014its owners\u2014work together to deliver top-tier digital services, address complex innovation challenges, and exchange valuable knowledge."),(0,n.yg)("p",null,"Computing and storage infrastructure are essential for cutting-edge research. SURF supports researchers with a diverse range of computing and storage services. But before diving into these services, let\u2019s briefly explore what a cluster computer is."),(0,n.yg)("h2",{id:"what-is-a-cluster-computer"},"What is a cluster computer?"),(0,n.yg)("p",null,"A cluster computer is essentially a group of interconnected computers, called nodes, working together as a unified system. Each node has its own CPU, memory, and disk space, along with access to a shared file system. Imagine these nodes connected by network cables, like those in your home or office."),(0,n.yg)("p",null,"Cluster computers are designed for high-performance workloads, allowing users to run hundreds of computational tasks simultaneously."),(0,n.yg)("img",{src:"/img/screenshot_cluster.png",style:{maxWidth:"100%",maxHeight:"100%"}}),(0,n.yg)("h2",{id:"different-types-of-services-provided-by-surf"},"Different types of Services provided by SURF:"),(0,n.yg)("p",null,"Some of the computing and storage solution provided by SURF are:"),(0,n.yg)("p",null,"1) Spider Cluster - High-performance Data Processing (DP) platform:"),(0,n.yg)("p",null," Spider is a versatile DP platform aimed at processing large structured data sets. Spider is an in house compute cluster built on top of SURF\u2019s in-house elastic Cloud. This allows for scalable processing of many terabytes or even petabytes of data, utilizing many hundreds of cores simultaneously, in exceedingly short timespans. Superb network throughput ensures connectivity to external data storage systems. Spider is used for large scale multi-year data intensive projects, for users to actively process their data, such are large static data sets or continuously growing data sets. Examples include genomics data, astronomic telescope data, physics detector data and satellite earth observations."),(0,n.yg)("p",null,"2) Snellius Cluster - the Dutch National supercomputer:"),(0,n.yg)("p",null," Snellius is the Dutch National supercomputer hosted at SURF. The system facilitates scientific research carried out in many Universities, independent research institutes, governmental organizations, and private companies in the Netherlands. Snellius is a cluster of heterogeneous nodes built by Lenovo, containing predominantly AMD technology, with capabilities for high performance computing (parallel, symmetric multiprocessing). The system also has several system-specific storage resources, that are geared towards supporting the various types of computing."),(0,n.yg)("p",null,"3) SURF Research Cloud (SRC):"),(0,n.yg)("p",null,' SURF Research Cloud is a service to facilitate scientists\u2019 collaborative work. The central idea in SRC is collaborative workspace. A workspcae translates directly to a "Virtual Machine".\nThese hosted workspaces aka virtual machines can be used for conducting research and development individually or together with your team/project members. '),(0,n.yg)("p",null,"4) Research Data Storage Services:"),(0,n.yg)("p",null," 4.1) Data Archive : The SURF Data Archive allows users to safely archive up to petabytes of valuable research data to ensure the long term accessibility and reproducibility of their work. The Data Archive is also connected to SURF\u2019s compute infrastructure, via a fast network connection, allowing for the seamless depositing and retrieval of data."),(0,n.yg)("p",null," 4.2) Data Repository : The Data Repository service is a web-based data publication and archiving platform that allows researchers to store, annotate and publish research data to ensure long-term preservation and availability of their datasets. All published datasets get their own DOI and Handle, while every file gets its own independent Handle to allow persistent reference on all levels."),(0,n.yg)("p",null," 4.3) dCache : dCache is scalable storage system. It contains more than 50 petabytes of scientific data, accessible through several authentication methods and protocols. It consists of\nmagnetic tape storage and hard disk storage and both are addressed by a common file system."),(0,n.yg)("p",null," 4.4) Object Store : Object storage is ideal for storing unstructured data that can grow without bound. Object storage does not have a directory-type structure like a normal file system has\nbut it organises its data in so-called containers that contain objects. There is no tree-like structure with files and directories. There are only containers with objects in them. SURF Object Store service is based on Ceph RGW and provides access using the S3 protocol, which is the defacto standard for addressing object storage."),(0,n.yg)("h2",{id:"how-to-get-started-with-surf-services"},"How to Get Started with SURF Services?"),(0,n.yg)("p",null,"The DSRI team is here to help you navigate SURF\u2019s services, including:"),(0,n.yg)("p",null,"1) Grant Applications:",(0,n.yg)("br",{parentName:"p"}),"\n","We assist researchers in applying for SURF grants. For instance:"),(0,n.yg)("pre",null,(0,n.yg)("code",{parentName:"pre"},"* Small applications: Up to 1 million System Billing Units (SBU) on Snellius and/or 100 TB of dCache storage.(https://www.surf.nl/en/small-compute-applications-nwo)\n* Large applications: Customized resource allocations based on project needs.\n")),(0,n.yg)("p",null,"2) Resource Estimation:",(0,n.yg)("br",{parentName:"p"}),"\n","Unsure about your computing and storage requirements? We help estimate your needs in terms of SURF\u2019s billing units."),(0,n.yg)("p",null,"3) Use Case Analysis:",(0,n.yg)("br",{parentName:"p"}),"\n","We assess whether your research project is a good fit for SURF\u2019s services."),(0,n.yg)("h2",{id:"external-resources-and-references"},"External Resources and references"),(0,n.yg)("ul",null,(0,n.yg)("li",{parentName:"ul"},"SURF: ",(0,n.yg)("a",{parentName:"li",href:"https://www.surf.nl/en"},"https://www.surf.nl/en")),(0,n.yg)("li",{parentName:"ul"},"Deep Learning Tutorials by UvA: ",(0,n.yg)("a",{parentName:"li",href:"https://uvadlc-notebooks.readthedocs.io/en/latest/index.html"},"https://uvadlc-notebooks.readthedocs.io/en/latest/index.html"))))}f.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.50131110.js b/assets/js/runtime~main.97c8b5aa.js similarity index 81% rename from assets/js/runtime~main.50131110.js rename to assets/js/runtime~main.97c8b5aa.js index e5d957fe2..77cf67eac 100644 --- a/assets/js/runtime~main.50131110.js +++ b/assets/js/runtime~main.97c8b5aa.js @@ -1 +1 @@ -(()=>{"use strict";var e,f,d,a,c={},b={};function r(e){var f=b[e];if(void 0!==f)return f.exports;var d=b[e]={exports:{}};return c[e].call(d.exports,d,d.exports,r),d.exports}r.m=c,e=[],r.O=(f,d,a,c)=>{if(!d){var b=1/0;for(i=0;i=c)&&Object.keys(r.O).every(e=>r.O[e](d[o]))?d.splice(o--,1):(t=!1,c0&&e[i-1][2]>c;i--)e[i]=e[i-1];e[i]=[d,a,c]},r.n=e=>{var f=e&&e.__esModule?()=>e.default:()=>e;return r.d(f,{a:f}),f},d=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,a){if(1&a&&(e=this(e)),8&a)return e;if("object"==typeof e&&e){if(4&a&&e.__esModule)return e;if(16&a&&"function"==typeof e.then)return e}var c=Object.create(null);r.r(c);var b={};f=f||[null,d({}),d([]),d(d)];for(var t=2&a&&e;"object"==typeof t&&!~f.indexOf(t);t=d(t))Object.getOwnPropertyNames(t).forEach(f=>b[f]=()=>e[f]);return b.default=()=>e,r.d(c,b),c},r.d=(e,f)=>{for(var d in f)r.o(f,d)&&!r.o(e,d)&&Object.defineProperty(e,d,{enumerable:!0,get:f[d]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce((f,d)=>(r.f[d](e,f),f),[])),r.u=e=>"assets/js/"+({93:"6b741ffd",145:"d8f096f7",248:"707d3f57",610:"389e2b0f",695:"94db8302",747:"d6e8f7a3",893:"a51ad1ed",918:"fdea7475",1052:"dfadc8c2",1107:"e0dd0623",1332:"66c10cbb",1590:"4d7bd50c",1991:"b2b675dd",2138:"1a4e3797",2412:"2e2e5152",2479:"5db33872",2526:"f8d48938",2567:"391576c4",2711:"9e4087bc",2865:"5514662e",2957:"0e4359fd",3249:"ccc49370",3265:"c7a22f1b",3348:"4d826c8d",3360:"09f56f30",3432:"1c23ce5e",3684:"3a93ea81",3762:"bd7c3f6d",3807:"e9a2555b",3836:"47a4a695",3870:"f704770b",4062:"8cf96c0d",4070:"dbeba2b5",4100:"d306cda8",4190:"3f4d8b80",4572:"3985f3fe",4583:"1df93b7f",4792:"3cfdff65",4809:"11da9ee4",4896:"21dd1498",4969:"bbb26d62",4989:"d9be6bc9",5020:"9f389101",5167:"f97cc2c7",5210:"36f9137d",5217:"7a61fedb",5299:"d013d563",5416:"de77a223",5447:"2b7d82ba",5803:"63528e5f",5894:"b2f554cd",5899:"a09c2993",6061:"1f391b9e",6083:"10a2e8c0",6108:"613f0a4f",6236:"cdd0b013",6251:"bbb095bf",6268:"7d87cf11",6364:"517aa66b",6669:"d8f7a64b",6683:"ed5ef82a",6690:"dca73612",6746:"682467b2",6970:"97d64f0a",7051:"e747ec83",7160:"1a119cf5",7234:"521adc3d",7340:"b18de7ec",7472:"814f3328",7512:"9e298cf7",7643:"a6aa9e1f",7668:"99b0f44e",7692:"e53f82ff",7953:"ecdc8e44",7968:"e9bae93a",8151:"9bfda053",8401:"17896441",8424:"bf9e930a",8433:"a1c3d222",8581:"935f2afb",8603:"5890eb41",8714:"1be78505",8793:"2b1adfae",8796:"ad7efe8c",8850:"f931684d",8926:"02589645",9030:"2406662c",9215:"cfec30f8",9328:"1dc85e61",9591:"4ba7e5a3",9676:"09722083",9725:"b258fc9c",9759:"b6e2013e"}[e]||e)+"."+{93:"a3e8248b",145:"0dc3596e",248:"95128426",489:"43ddf844",610:"7b1734df",695:"cce39f08",747:"8ffcce61",893:"dfd6a2bc",918:"78a5ce30",1052:"507ea768",1107:"e15016e9",1332:"e6dbdfe0",1590:"0e342838",1774:"c5497b3c",1991:"8295f9b5",2138:"938b1b26",2412:"33980a74",2479:"14f5bb11",2526:"d768df8f",2567:"9b8ecdf8",2711:"05c91cea",2865:"9a79f6b2",2957:"41168bc6",3143:"95e3e65d",3249:"83a7dce8",3265:"7ae41ddc",3348:"66ff6c74",3360:"1eba0a1e",3432:"be3add26",3570:"bd5c2691",3684:"afbe68e0",3762:"f9421184",3807:"9a06c52f",3836:"c2e3bbad",3870:"9a92e5fb",3892:"44490d1e",4062:"7f6201a0",4070:"075f10f0",4100:"b590a76f",4190:"1b019b6c",4572:"18f5b487",4583:"71042b21",4792:"fb0bd44a",4809:"1303f746",4896:"f137dea3",4969:"ee73a795",4989:"63cf328c",4991:"aa3354e1",5020:"a1aff6ed",5167:"2c1cd784",5210:"4437e9f9",5217:"8ef3b0cc",5299:"7bbc8097",5416:"3f497115",5447:"e100cab7",5741:"520592e9",5803:"07c1d667",5894:"969e9940",5899:"d90872b2",6046:"302d4d7a",6061:"5f16e25a",6083:"6dcdea3d",6108:"a0904496",6236:"6d4e6062",6251:"cdacde83",6268:"fc840608",6364:"b5b100c7",6669:"14c2af01",6683:"2bf7b169",6685:"cd784568",6690:"7d8098f0",6746:"681b6d9e",6970:"285b90ae",7051:"7fef5581",7160:"d1d83948",7234:"4acaee7f",7340:"2c708b3c",7472:"ed75de6b",7512:"254832af",7643:"e4b7b6f4",7668:"48e96a60",7692:"cb5ff291",7953:"a365e563",7968:"e2db0242",8151:"12c4ad81",8401:"ee3fde71",8424:"719a6716",8433:"14eb0f1e",8581:"100b9b0b",8603:"464e3bf2",8714:"6e68d6f9",8793:"ba3c5060",8796:"5d6e6a92",8850:"db345f4e",8926:"87c0aa0b",9030:"ab664a38",9176:"6f3f7333",9215:"e51dd3e9",9328:"b9965823",9591:"07c00a5e",9676:"c9a62a66",9725:"612ce544",9759:"571c43af"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,f)=>Object.prototype.hasOwnProperty.call(e,f),a={},r.l=(e,f,d,c)=>{if(a[e])a[e].push(f);else{var b,t;if(void 0!==d)for(var o=document.getElementsByTagName("script"),n=0;n{b.onerror=b.onload=null,clearTimeout(l);var c=a[e];if(delete a[e],b.parentNode&&b.parentNode.removeChild(b),c&&c.forEach(e=>e(d)),f)return f(d)},l=setTimeout(u.bind(null,void 0,{type:"timeout",target:b}),12e4);b.onerror=u.bind(null,b.onerror),b.onload=u.bind(null,b.onload),t&&document.head.appendChild(b)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/",r.gca=function(e){return e={17896441:"8401","6b741ffd":"93",d8f096f7:"145","707d3f57":"248","389e2b0f":"610","94db8302":"695",d6e8f7a3:"747",a51ad1ed:"893",fdea7475:"918",dfadc8c2:"1052",e0dd0623:"1107","66c10cbb":"1332","4d7bd50c":"1590",b2b675dd:"1991","1a4e3797":"2138","2e2e5152":"2412","5db33872":"2479",f8d48938:"2526","391576c4":"2567","9e4087bc":"2711","5514662e":"2865","0e4359fd":"2957",ccc49370:"3249",c7a22f1b:"3265","4d826c8d":"3348","09f56f30":"3360","1c23ce5e":"3432","3a93ea81":"3684",bd7c3f6d:"3762",e9a2555b:"3807","47a4a695":"3836",f704770b:"3870","8cf96c0d":"4062",dbeba2b5:"4070",d306cda8:"4100","3f4d8b80":"4190","3985f3fe":"4572","1df93b7f":"4583","3cfdff65":"4792","11da9ee4":"4809","21dd1498":"4896",bbb26d62:"4969",d9be6bc9:"4989","9f389101":"5020",f97cc2c7:"5167","36f9137d":"5210","7a61fedb":"5217",d013d563:"5299",de77a223:"5416","2b7d82ba":"5447","63528e5f":"5803",b2f554cd:"5894",a09c2993:"5899","1f391b9e":"6061","10a2e8c0":"6083","613f0a4f":"6108",cdd0b013:"6236",bbb095bf:"6251","7d87cf11":"6268","517aa66b":"6364",d8f7a64b:"6669",ed5ef82a:"6683",dca73612:"6690","682467b2":"6746","97d64f0a":"6970",e747ec83:"7051","1a119cf5":"7160","521adc3d":"7234",b18de7ec:"7340","814f3328":"7472","9e298cf7":"7512",a6aa9e1f:"7643","99b0f44e":"7668",e53f82ff:"7692",ecdc8e44:"7953",e9bae93a:"7968","9bfda053":"8151",bf9e930a:"8424",a1c3d222:"8433","935f2afb":"8581","5890eb41":"8603","1be78505":"8714","2b1adfae":"8793",ad7efe8c:"8796",f931684d:"8850","02589645":"8926","2406662c":"9030",cfec30f8:"9215","1dc85e61":"9328","4ba7e5a3":"9591","09722083":"9676",b258fc9c:"9725",b6e2013e:"9759"}[e]||e,r.p+r.u(e)},(()=>{var e={5354:0,1869:0};r.f.j=(f,d)=>{var a=r.o(e,f)?e[f]:void 0;if(0!==a)if(a)d.push(a[2]);else if(/^(1869|5354)$/.test(f))e[f]=0;else{var c=new Promise((d,c)=>a=e[f]=[d,c]);d.push(a[2]=c);var b=r.p+r.u(f),t=new Error;r.l(b,d=>{if(r.o(e,f)&&(0!==(a=e[f])&&(e[f]=void 0),a)){var c=d&&("load"===d.type?"missing":d.type),b=d&&d.target&&d.target.src;t.message="Loading chunk "+f+" failed.\n("+c+": "+b+")",t.name="ChunkLoadError",t.type=c,t.request=b,a[1](t)}},"chunk-"+f,f)}},r.O.j=f=>0===e[f];var f=(f,d)=>{var a,c,[b,t,o]=d,n=0;if(b.some(f=>0!==e[f])){for(a in t)r.o(t,a)&&(r.m[a]=t[a]);if(o)var i=o(r)}for(f&&f(d);n{"use strict";var e,f,d,a,c={},b={};function r(e){var f=b[e];if(void 0!==f)return f.exports;var d=b[e]={exports:{}};return c[e].call(d.exports,d,d.exports,r),d.exports}r.m=c,e=[],r.O=(f,d,a,c)=>{if(!d){var b=1/0;for(i=0;i=c)&&Object.keys(r.O).every(e=>r.O[e](d[o]))?d.splice(o--,1):(t=!1,c0&&e[i-1][2]>c;i--)e[i]=e[i-1];e[i]=[d,a,c]},r.n=e=>{var f=e&&e.__esModule?()=>e.default:()=>e;return r.d(f,{a:f}),f},d=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,a){if(1&a&&(e=this(e)),8&a)return e;if("object"==typeof e&&e){if(4&a&&e.__esModule)return e;if(16&a&&"function"==typeof e.then)return e}var c=Object.create(null);r.r(c);var b={};f=f||[null,d({}),d([]),d(d)];for(var t=2&a&&e;"object"==typeof t&&!~f.indexOf(t);t=d(t))Object.getOwnPropertyNames(t).forEach(f=>b[f]=()=>e[f]);return b.default=()=>e,r.d(c,b),c},r.d=(e,f)=>{for(var d in f)r.o(f,d)&&!r.o(e,d)&&Object.defineProperty(e,d,{enumerable:!0,get:f[d]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce((f,d)=>(r.f[d](e,f),f),[])),r.u=e=>"assets/js/"+({93:"6b741ffd",145:"d8f096f7",248:"707d3f57",610:"389e2b0f",695:"94db8302",747:"d6e8f7a3",893:"a51ad1ed",918:"fdea7475",1052:"dfadc8c2",1107:"e0dd0623",1332:"66c10cbb",1590:"4d7bd50c",1991:"b2b675dd",2138:"1a4e3797",2412:"2e2e5152",2479:"5db33872",2526:"f8d48938",2567:"391576c4",2711:"9e4087bc",2865:"5514662e",2957:"0e4359fd",3249:"ccc49370",3265:"c7a22f1b",3348:"4d826c8d",3360:"09f56f30",3432:"1c23ce5e",3684:"3a93ea81",3762:"bd7c3f6d",3807:"e9a2555b",3836:"47a4a695",3870:"f704770b",4062:"8cf96c0d",4070:"dbeba2b5",4100:"d306cda8",4190:"3f4d8b80",4572:"3985f3fe",4583:"1df93b7f",4792:"3cfdff65",4809:"11da9ee4",4896:"21dd1498",4969:"bbb26d62",4989:"d9be6bc9",5020:"9f389101",5167:"f97cc2c7",5210:"36f9137d",5217:"7a61fedb",5299:"d013d563",5416:"de77a223",5447:"2b7d82ba",5803:"63528e5f",5894:"b2f554cd",5899:"a09c2993",6061:"1f391b9e",6083:"10a2e8c0",6108:"613f0a4f",6236:"cdd0b013",6251:"bbb095bf",6268:"7d87cf11",6364:"517aa66b",6669:"d8f7a64b",6683:"ed5ef82a",6690:"dca73612",6746:"682467b2",6970:"97d64f0a",7051:"e747ec83",7160:"1a119cf5",7234:"521adc3d",7340:"b18de7ec",7472:"814f3328",7512:"9e298cf7",7643:"a6aa9e1f",7668:"99b0f44e",7692:"e53f82ff",7953:"ecdc8e44",7968:"e9bae93a",8151:"9bfda053",8401:"17896441",8424:"bf9e930a",8433:"a1c3d222",8581:"935f2afb",8603:"5890eb41",8714:"1be78505",8793:"2b1adfae",8796:"ad7efe8c",8850:"f931684d",8926:"02589645",9030:"2406662c",9215:"cfec30f8",9328:"1dc85e61",9591:"4ba7e5a3",9676:"09722083",9725:"b258fc9c",9759:"b6e2013e"}[e]||e)+"."+{93:"2028c3a5",145:"ae3fb799",248:"6bb3e3d6",489:"43ddf844",610:"d07c3e77",695:"6b3b63a5",747:"8ffcce61",893:"dfd6a2bc",918:"a3583034",1052:"507ea768",1107:"e15016e9",1332:"e6dbdfe0",1590:"0e342838",1774:"c5497b3c",1991:"8295f9b5",2138:"938b1b26",2412:"e9751cc7",2479:"75d53e63",2526:"a5c11f2a",2567:"8129e6e7",2711:"05c91cea",2865:"7f729865",2957:"ba9a5169",3143:"95e3e65d",3249:"83a7dce8",3265:"7ae41ddc",3348:"66ff6c74",3360:"bc3e8a3f",3432:"be3add26",3570:"bd5c2691",3684:"ba42fafc",3762:"64962f6e",3807:"9f6e4e96",3836:"1d355b94",3870:"3ea04af8",3892:"44490d1e",4062:"4e15a67b",4070:"84064520",4100:"5dd22227",4190:"e31fb42b",4572:"18f5b487",4583:"71042b21",4792:"fcef3080",4809:"0594975c",4896:"353c9298",4969:"1c3dafe1",4989:"63cf328c",4991:"aa3354e1",5020:"1c470da1",5167:"0cec9eeb",5210:"af2cc1b8",5217:"b40e5606",5299:"3c7e8cb7",5416:"d065e143",5447:"7e2c4a40",5741:"520592e9",5803:"1f846c29",5894:"969e9940",5899:"ccaeace5",6046:"302d4d7a",6061:"5f16e25a",6083:"6dcdea3d",6108:"af857580",6236:"54b1a2a7",6251:"cdacde83",6268:"fc840608",6364:"b5b100c7",6669:"14c2af01",6683:"9e89411d",6685:"cd784568",6690:"61ddb0b9",6746:"94ea0f7b",6970:"9ca2404d",7051:"75332ce7",7160:"d1d83948",7234:"4741c575",7340:"2c708b3c",7472:"ed75de6b",7512:"379a0bd6",7643:"e4b7b6f4",7668:"eb4f1c72",7692:"ed442302",7953:"6e432d61",7968:"48910b99",8151:"43b3657e",8401:"ee3fde71",8424:"719a6716",8433:"4714947a",8581:"100b9b0b",8603:"bc2091d8",8714:"6e68d6f9",8793:"65f6cb6c",8796:"5d6e6a92",8850:"35ed4819",8926:"87c0aa0b",9030:"eb0d4ab5",9176:"6f3f7333",9215:"4752e40c",9328:"9b1a218e",9591:"942b72bd",9676:"ab4a93ab",9725:"ab7ec905",9759:"16157cdc"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,f)=>Object.prototype.hasOwnProperty.call(e,f),a={},r.l=(e,f,d,c)=>{if(a[e])a[e].push(f);else{var b,t;if(void 0!==d)for(var o=document.getElementsByTagName("script"),n=0;n{b.onerror=b.onload=null,clearTimeout(l);var c=a[e];if(delete a[e],b.parentNode&&b.parentNode.removeChild(b),c&&c.forEach(e=>e(d)),f)return f(d)},l=setTimeout(u.bind(null,void 0,{type:"timeout",target:b}),12e4);b.onerror=u.bind(null,b.onerror),b.onload=u.bind(null,b.onload),t&&document.head.appendChild(b)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/",r.gca=function(e){return e={17896441:"8401","6b741ffd":"93",d8f096f7:"145","707d3f57":"248","389e2b0f":"610","94db8302":"695",d6e8f7a3:"747",a51ad1ed:"893",fdea7475:"918",dfadc8c2:"1052",e0dd0623:"1107","66c10cbb":"1332","4d7bd50c":"1590",b2b675dd:"1991","1a4e3797":"2138","2e2e5152":"2412","5db33872":"2479",f8d48938:"2526","391576c4":"2567","9e4087bc":"2711","5514662e":"2865","0e4359fd":"2957",ccc49370:"3249",c7a22f1b:"3265","4d826c8d":"3348","09f56f30":"3360","1c23ce5e":"3432","3a93ea81":"3684",bd7c3f6d:"3762",e9a2555b:"3807","47a4a695":"3836",f704770b:"3870","8cf96c0d":"4062",dbeba2b5:"4070",d306cda8:"4100","3f4d8b80":"4190","3985f3fe":"4572","1df93b7f":"4583","3cfdff65":"4792","11da9ee4":"4809","21dd1498":"4896",bbb26d62:"4969",d9be6bc9:"4989","9f389101":"5020",f97cc2c7:"5167","36f9137d":"5210","7a61fedb":"5217",d013d563:"5299",de77a223:"5416","2b7d82ba":"5447","63528e5f":"5803",b2f554cd:"5894",a09c2993:"5899","1f391b9e":"6061","10a2e8c0":"6083","613f0a4f":"6108",cdd0b013:"6236",bbb095bf:"6251","7d87cf11":"6268","517aa66b":"6364",d8f7a64b:"6669",ed5ef82a:"6683",dca73612:"6690","682467b2":"6746","97d64f0a":"6970",e747ec83:"7051","1a119cf5":"7160","521adc3d":"7234",b18de7ec:"7340","814f3328":"7472","9e298cf7":"7512",a6aa9e1f:"7643","99b0f44e":"7668",e53f82ff:"7692",ecdc8e44:"7953",e9bae93a:"7968","9bfda053":"8151",bf9e930a:"8424",a1c3d222:"8433","935f2afb":"8581","5890eb41":"8603","1be78505":"8714","2b1adfae":"8793",ad7efe8c:"8796",f931684d:"8850","02589645":"8926","2406662c":"9030",cfec30f8:"9215","1dc85e61":"9328","4ba7e5a3":"9591","09722083":"9676",b258fc9c:"9725",b6e2013e:"9759"}[e]||e,r.p+r.u(e)},(()=>{var e={5354:0,1869:0};r.f.j=(f,d)=>{var a=r.o(e,f)?e[f]:void 0;if(0!==a)if(a)d.push(a[2]);else if(/^(1869|5354)$/.test(f))e[f]=0;else{var c=new Promise((d,c)=>a=e[f]=[d,c]);d.push(a[2]=c);var b=r.p+r.u(f),t=new Error;r.l(b,d=>{if(r.o(e,f)&&(0!==(a=e[f])&&(e[f]=void 0),a)){var c=d&&("load"===d.type?"missing":d.type),b=d&&d.target&&d.target.src;t.message="Loading chunk "+f+" failed.\n("+c+": "+b+")",t.name="ChunkLoadError",t.type=c,t.request=b,a[1](t)}},"chunk-"+f,f)}},r.O.j=f=>0===e[f];var f=(f,d)=>{var a,c,[b,t,o]=d,n=0;if(b.some(f=>0!==e[f])){for(a in t)r.o(t,a)&&(r.m[a]=t[a]);if(o)var i=o(r)}for(f&&f(d);n - +

Blog Title

· 3 min read

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus elementum massa eget nulla aliquet sagittis. Proin odio tortor, vulputate ut odio in, ultrices ultricies augue. Cras ornare ultrices lorem malesuada iaculis. Etiam sit amet libero tempor, pulvinar mauris sed, sollicitudin sapien.

Mauris vestibulum ullamcorper nibh, ut semper purus pulvinar ut. Donec volutpat orci sit amet mauris malesuada, non pulvinar augue aliquam. Vestibulum ultricies at urna ut suscipit. Morbi iaculis, erat at imperdiet semper, ipsum nulla sodales erat, eget tincidunt justo dui quis justo. Pellentesque dictum bibendum diam at aliquet. Sed pulvinar, dolor quis finibus ornare, eros odio facilisis erat, eu rhoncus nunc dui sed ex. Nunc gravida dui massa, sed ornare arcu tincidunt sit amet. Maecenas efficitur sapien neque, a laoreet libero feugiat ut.

Nulla facilisi. Maecenas sodales nec purus eget posuere. Sed sapien quam, pretium a risus in, porttitor dapibus erat. Sed sit amet fringilla ipsum, eget iaculis augue. Integer sollicitudin tortor quis ultricies aliquam. Suspendisse fringilla nunc in tellus cursus, at placerat tellus scelerisque. Sed tempus elit a sollicitudin rhoncus. Nulla facilisi. Morbi nec dolor dolor. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Cras et aliquet lectus. Pellentesque sit amet eros nisi. Quisque ac sapien in sapien congue accumsan. Nullam in posuere ante. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin lacinia leo a nibh fringilla pharetra.

Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin venenatis lectus dui, vel ultrices ante bibendum hendrerit. Aenean egestas feugiat dui id hendrerit. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Curabitur in tellus laoreet, eleifend nunc id, viverra leo. Proin vulputate non dolor vel vulputate. Curabitur pretium lobortis felis, sit amet finibus lorem suscipit ut. Sed non mollis risus. Duis sagittis, mi in euismod tincidunt, nunc mauris vestibulum urna, at euismod est elit quis erat. Phasellus accumsan vitae neque eu placerat. In elementum arcu nec tellus imperdiet, eget maximus nulla sodales. Curabitur eu sapien eget nisl sodales fermentum.

Phasellus pulvinar ex id commodo imperdiet. Praesent odio nibh, sollicitudin sit amet faucibus id, placerat at metus. Donec vitae eros vitae tortor hendrerit finibus. Interdum et malesuada fames ac ante ipsum primis in faucibus. Quisque vitae purus dolor. Duis suscipit ac nulla et finibus. Phasellus ac sem sed dui dictum gravida. Phasellus eleifend vestibulum facilisis. Integer pharetra nec enim vitae mattis. Duis auctor, lectus quis condimentum bibendum, nunc dolor aliquam massa, id bibendum orci velit quis magna. Ut volutpat nulla nunc, sed interdum magna condimentum non. Sed urna metus, scelerisque vitae consectetur a, feugiat quis magna. Donec dignissim ornare nisl, eget tempor risus malesuada quis.

- + \ No newline at end of file diff --git a/blog/2017/04/10/blog-post-two/index.html b/blog/2017/04/10/blog-post-two/index.html index eee69cad4..09d44d427 100644 --- a/blog/2017/04/10/blog-post-two/index.html +++ b/blog/2017/04/10/blog-post-two/index.html @@ -16,13 +16,13 @@ - +

New Blog Post

· 3 min read

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus elementum massa eget nulla aliquet sagittis. Proin odio tortor, vulputate ut odio in, ultrices ultricies augue. Cras ornare ultrices lorem malesuada iaculis. Etiam sit amet libero tempor, pulvinar mauris sed, sollicitudin sapien.

Mauris vestibulum ullamcorper nibh, ut semper purus pulvinar ut. Donec volutpat orci sit amet mauris malesuada, non pulvinar augue aliquam. Vestibulum ultricies at urna ut suscipit. Morbi iaculis, erat at imperdiet semper, ipsum nulla sodales erat, eget tincidunt justo dui quis justo. Pellentesque dictum bibendum diam at aliquet. Sed pulvinar, dolor quis finibus ornare, eros odio facilisis erat, eu rhoncus nunc dui sed ex. Nunc gravida dui massa, sed ornare arcu tincidunt sit amet. Maecenas efficitur sapien neque, a laoreet libero feugiat ut.

Nulla facilisi. Maecenas sodales nec purus eget posuere. Sed sapien quam, pretium a risus in, porttitor dapibus erat. Sed sit amet fringilla ipsum, eget iaculis augue. Integer sollicitudin tortor quis ultricies aliquam. Suspendisse fringilla nunc in tellus cursus, at placerat tellus scelerisque. Sed tempus elit a sollicitudin rhoncus. Nulla facilisi. Morbi nec dolor dolor. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Cras et aliquet lectus. Pellentesque sit amet eros nisi. Quisque ac sapien in sapien congue accumsan. Nullam in posuere ante. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin lacinia leo a nibh fringilla pharetra.

Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin venenatis lectus dui, vel ultrices ante bibendum hendrerit. Aenean egestas feugiat dui id hendrerit. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Curabitur in tellus laoreet, eleifend nunc id, viverra leo. Proin vulputate non dolor vel vulputate. Curabitur pretium lobortis felis, sit amet finibus lorem suscipit ut. Sed non mollis risus. Duis sagittis, mi in euismod tincidunt, nunc mauris vestibulum urna, at euismod est elit quis erat. Phasellus accumsan vitae neque eu placerat. In elementum arcu nec tellus imperdiet, eget maximus nulla sodales. Curabitur eu sapien eget nisl sodales fermentum.

Phasellus pulvinar ex id commodo imperdiet. Praesent odio nibh, sollicitudin sit amet faucibus id, placerat at metus. Donec vitae eros vitae tortor hendrerit finibus. Interdum et malesuada fames ac ante ipsum primis in faucibus. Quisque vitae purus dolor. Duis suscipit ac nulla et finibus. Phasellus ac sem sed dui dictum gravida. Phasellus eleifend vestibulum facilisis. Integer pharetra nec enim vitae mattis. Duis auctor, lectus quis condimentum bibendum, nunc dolor aliquam massa, id bibendum orci velit quis magna. Ut volutpat nulla nunc, sed interdum magna condimentum non. Sed urna metus, scelerisque vitae consectetur a, feugiat quis magna. Donec dignissim ornare nisl, eget tempor risus malesuada quis.

- + \ No newline at end of file diff --git a/blog/2017/09/25/testing-rss/index.html b/blog/2017/09/25/testing-rss/index.html index 956ae4933..1fa5244d1 100644 --- a/blog/2017/09/25/testing-rss/index.html +++ b/blog/2017/09/25/testing-rss/index.html @@ -16,13 +16,13 @@ - +

Adding RSS Support - RSS Truncation Test

· One min read

1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890

This should be truncated.

This line should never render in XML.

- + \ No newline at end of file diff --git a/blog/2017/09/26/adding-rss/index.html b/blog/2017/09/26/adding-rss/index.html index f3b0607d5..a0ded61cc 100644 --- a/blog/2017/09/26/adding-rss/index.html +++ b/blog/2017/09/26/adding-rss/index.html @@ -16,13 +16,13 @@ - + - + \ No newline at end of file diff --git a/blog/2017/10/24/new-version-1.0.0/index.html b/blog/2017/10/24/new-version-1.0.0/index.html index bd8a9b142..e09a85535 100644 --- a/blog/2017/10/24/new-version-1.0.0/index.html +++ b/blog/2017/10/24/new-version-1.0.0/index.html @@ -16,13 +16,13 @@ - +

New Version 1.0.0

· One min read

This blog post will test file name parsing issues when periods are present.

- + \ No newline at end of file diff --git a/blog/archive/index.html b/blog/archive/index.html index dbb21625a..f63dfa87b 100644 --- a/blog/archive/index.html +++ b/blog/archive/index.html @@ -16,13 +16,13 @@ - + - + \ No newline at end of file diff --git a/blog/index.html b/blog/index.html index 1cdfd31cd..0639035a1 100644 --- a/blog/index.html +++ b/blog/index.html @@ -16,13 +16,13 @@ - +

· One min read

1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890

This should be truncated.

· 3 min read

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus elementum massa eget nulla aliquet sagittis. Proin odio tortor, vulputate ut odio in, ultrices ultricies augue. Cras ornare ultrices lorem malesuada iaculis. Etiam sit amet libero tempor, pulvinar mauris sed, sollicitudin sapien.

· 3 min read

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus elementum massa eget nulla aliquet sagittis. Proin odio tortor, vulputate ut odio in, ultrices ultricies augue. Cras ornare ultrices lorem malesuada iaculis. Etiam sit amet libero tempor, pulvinar mauris sed, sollicitudin sapien.

- + \ No newline at end of file diff --git a/contact/index.html b/contact/index.html index f61c95dc8..146f224dd 100644 --- a/contact/index.html +++ b/contact/index.html @@ -16,13 +16,13 @@ - +

📬 Contact us

For any technical questions, please contact us through the ticketing system. Click here to submit a ticket. For non-technical questions you can contact us at dsri-support-l@maastrichtuniversity.nl. The Research Computing Support team is located at the UM inner city Library location: Grote Looiersstraat 17 (GL17). We will reply during office hours (Mon-Fri: 08.30-17.30), except on public holidays.

The Research Computing Support team members:

  • Chris Kuipers - Coordinator Research Computing Support at UB
  • Laurent Winckers - DevOps Engineer at ICTS
  • Manu Agarwal - Research Software Engineer (HPC) at UB
  • Seun Adekunle - DevOps Engineer at ICTS
  • vacancy - Research Software Engineer (Software Stewardship) at UB
- + \ No newline at end of file diff --git a/docs/access-dsri/index.html b/docs/access-dsri/index.html index 653eff639..564f16f54 100644 --- a/docs/access-dsri/index.html +++ b/docs/access-dsri/index.html @@ -16,13 +16,13 @@ - +
-

Access the DSRI

Request an account

  1. You will need to have an account at Maastricht University with an email ending with @maastrichtuniversity.nl or @student.maastrichtuniversity.nl.

  2. Request access to the DSRI for your account Please fill this form 📬 to provide us some information on what you plan to do with the DSRI. Once you fill the form, you will receive an email with detailed instructions on how to log in.

Connect to the UM network

You need to be connected to the UM network to access the DSRI.

🐧 On Linux: use openconnect to connect to the UM VPN. You can easily install it on Ubuntu and Debian distributions with apt:

sudo apt install openconnect
sudo openconnect -u YOUR.USER --authgroup 01-Employees vpn.maastrichtuniversity.nl

🍎 On MacOS and Windows: download and install the Maastricht University VPN client available at vpn.maastrichtuniversity.nl

⚠️ If your are a student you will need to request access to the UM VPN first
  • You can try to use the Athena Student Desktop at athenadesktop.maastrichtuniversity.nl, to access the VPN through a virtual desktop
  • Or ask one of your teachers to request VPN access for you. You will need to send an email to the IT helpdesk of your department with the following information:
    • Email of the student who will get VPN
    • for which course (provide the course ID) or project does the student need the VPN
    • until which date the student will need the VPN.

Access the web UI

Access the DSRI web UI at https://console-openshift-console.apps.dsri2.unimaas.nl

Password

Use your general UM password.

If you do not have access to the DSRI contact us.

You will be able to login at https://console-openshift-console.apps.dsri2.unimaas.nl using the standard maastricht portal upon clicking the login button:

Login screenLogin screen
Command line interface

We recommend you to install the oc command line interface to perform additional operations on your applications, such as loading large amount of data using oc cp, or deploying an application from a local Dockerfile. Instructions on installing the client can be found ➡ here

Access your project

In the DSRI OpenShift web UI, applications are deployed in projects.

  1. Create a new project with a meaningful name describing what you are doing, such as workspace-yourname.

  2. Go to your project (applications are deployed in a project).

Reuse your project

Only create new projects when it is necessary (for a new project). You can easily clean up your current project instead of creating a new one every time you want to try something.

Login screen
Access permissions for developers to your project

You can use the Project view in the Developer perspective to grant or revoke access permissions to your project collaborators. For More Info: Access permissions for developers to your project

About the web UI

Developers can use the web console to visualize, browse, and manage the contents of projects in new version of OKD4.

The OpenShift Container Platform web console provides two perspectives;

  • the Administrator perspective
  • the Developer perspective.

The Developer perspective provides workflows specific to developer use cases, such as the ability to:

  • Create and deploy applications on OpenShift Container Platform by importing existing codebases, images, and dockerfiles.

  • Visually interact with applications, components, and services associated with them within a project and monitor their deployment and build status.

  • Group components within an application and connect the components within and across applications.

Accessing the Developer perspective

You can access the Developer perspective from the web console as follows:

  1. Log in to the OpenShift Container Platform web console using your login credentials.

    • The default view for the OpenShift Container Platform web console is the Administrator perspective.
  2. Use the perspective switcher to switch to the Developer perspective. The Topology view with a list of all the projects in your cluster is displayed.

    Developer Perspective
  3. Select an existing project from the list or use the Project drop-down list to create a new project.

info

If you have no workloads or applications in the project, the Topology view displays the available options to create applications. If you have existing workloads, the Topology view graphically displays your workload nodes.

Topology View
- +

Access the DSRI

Request an account

  1. You will need to have an account at Maastricht University with an email ending with @maastrichtuniversity.nl or @student.maastrichtuniversity.nl.

  2. Request access to the DSRI for your account Please fill this form 📬 to provide us some information on what you plan to do with the DSRI. Once you fill the form, you will receive an email with detailed instructions on how to log in.

Connect to the UM network

You need to be connected to the UM network to access the DSRI.

🐧 On Linux: use openconnect to connect to the UM VPN. You can easily install it on Ubuntu and Debian distributions with apt:

sudo apt install openconnect
sudo openconnect -u YOUR.USER --authgroup 01-Employees vpn.maastrichtuniversity.nl

🍎 On MacOS and Windows: download and install the Maastricht University VPN client available at vpn.maastrichtuniversity.nl

⚠️ If your are a student you will need to request access to the UM VPN first
  • You can try to use the Athena Student Desktop at athenadesktop.maastrichtuniversity.nl, to access the VPN through a virtual desktop
  • Or ask one of your teachers to request VPN access for you. You will need to send an email to the IT helpdesk of your department with the following information:
    • Email of the student who will get VPN
    • for which course (provide the course ID) or project does the student need the VPN
    • until which date the student will need the VPN.

Access the web UI

Access the DSRI web UI at https://console-openshift-console.apps.dsri2.unimaas.nl

Password

Use your general UM password.

If you do not have access to the DSRI contact us.

You will be able to login at https://console-openshift-console.apps.dsri2.unimaas.nl using the standard maastricht portal upon clicking the login button:

Login screenLogin screen
Command line interface

We recommend you to install the oc command line interface to perform additional operations on your applications, such as loading large amount of data using oc cp, or deploying an application from a local Dockerfile. Instructions on installing the client can be found ➡ here

Access your project

In the DSRI OpenShift web UI, applications are deployed in projects.

  1. Create a new project with a meaningful name describing what you are doing, such as workspace-yourname.

  2. Go to your project (applications are deployed in a project).

Reuse your project

Only create new projects when it is necessary (for a new project). You can easily clean up your current project instead of creating a new one every time you want to try something.

Login screen
Access permissions for developers to your project

You can use the Project view in the Developer perspective to grant or revoke access permissions to your project collaborators. For More Info: Access permissions for developers to your project

About the web UI

Developers can use the web console to visualize, browse, and manage the contents of projects in new version of OKD4.

The OpenShift Container Platform web console provides two perspectives;

  • the Administrator perspective
  • the Developer perspective.

The Developer perspective provides workflows specific to developer use cases, such as the ability to:

  • Create and deploy applications on OpenShift Container Platform by importing existing codebases, images, and dockerfiles.

  • Visually interact with applications, components, and services associated with them within a project and monitor their deployment and build status.

  • Group components within an application and connect the components within and across applications.

Accessing the Developer perspective

You can access the Developer perspective from the web console as follows:

  1. Log in to the OpenShift Container Platform web console using your login credentials.

    • The default view for the OpenShift Container Platform web console is the Administrator perspective.
  2. Use the perspective switcher to switch to the Developer perspective. The Topology view with a list of all the projects in your cluster is displayed.

    Developer Perspective
  3. Select an existing project from the list or use the Project drop-down list to create a new project.

info

If you have no workloads or applications in the project, the Topology view displays the available options to create applications. If you have existing workloads, the Topology view graphically displays your workload nodes.

Topology View
+ \ No newline at end of file diff --git a/docs/access-um-servers/index.html b/docs/access-um-servers/index.html index 22f4d7486..c833a9b36 100644 --- a/docs/access-um-servers/index.html +++ b/docs/access-um-servers/index.html @@ -16,13 +16,13 @@ - +
-

Access UM servers

Request access to internal UM servers

In certain cases, UM servers are not accessible by default from the DSRI. This is even the case for servers that are normally publicly accessible. To be able to access these UM servers from the DSRI, we need to put in the request to open the connection.

Please let us know either the servername and port you like to access, or the URL (e.g. um-vm0057.unimaas.nl on port 443 or https://gitlab.maastrichtuniversity.nl). You can reach out to us either by mail or by Slack.

UM services that are not accessible from DSRI right now:

  • central UM fileservices (MFS)

The procedure is described in the diagram below:

Access procedure UM servers
- +

Access UM servers

Request access to internal UM servers

In certain cases, UM servers are not accessible by default from the DSRI. This is even the case for servers that are normally publicly accessible. To be able to access these UM servers from the DSRI, we need to put in the request to open the connection.

Please let us know either the servername and port you like to access, or the URL (e.g. um-vm0057.unimaas.nl on port 443 or https://gitlab.maastrichtuniversity.nl). You can reach out to us either by mail or by Slack.

UM services that are not accessible from DSRI right now:

  • central UM fileservices (MFS)

The procedure is described in the diagram below:

Access procedure UM servers
+ \ No newline at end of file diff --git a/docs/anatomy-of-an-application/index.html b/docs/anatomy-of-an-application/index.html index 06550a599..53cba81f9 100644 --- a/docs/anatomy-of-an-application/index.html +++ b/docs/anatomy-of-an-application/index.html @@ -16,13 +16,13 @@ - +
-

Anatomy of a DSRI application

This page will present you how an applications is typically built using an OpenShift template. This will also help you understand more in general the different objects that needs to be defined when deploying an application on a Kubernetes cluster. Even if OpenShift templates can only be deployed to OpenShift, the objects they define are the same as in Kubernetes (apart from the Route which becomes Ingress).

There are other ways to describe applications on OpenShift cluster (here the DSRI), such as Helm or Operators. But OpenShift templates are the easiest and quickest way to build an application that can be deployed from the DSRI web UI catalog in a few clicks, and by providing a few parameters.

It is better to have a basic understanding of what a docker container is to fully understand this walkthrough, but it should already gives a good idea of the different objects deployed with each DSRI application.

We will use the template used to deploy JupyterLab as example, and we will describe the goal, importance and caveats of each parts of the application definition. But the same template and instructions can be easily reused for other applications with a web UI to access.

Checkout the complete JupyterLab template here (it will be slightly different with a bit more comments, but there are globally the same)

You will see that deploying on Kubernetes (and by extension, here OpenShift), is just about defining objects in a YAML file, like a complex docker-compose.yml file.

Do you got what it takes?

The amount of objects might seems a bit overwhelming at first, but this is what it takes to automatically deploy a complex application on a large cluster, automatically available through a generated URL, with HTTPS encryption to protect your passwords when you log to a web UI!

Application walkthrough

First, you need to create your Template objects, this will be the main object we will create here as all other objects defined will be deployed by this template.

In this part we mainly just provide the description and information that will be shown to users when deploying the application from the DSRI web UI catalog.

---
kind: Template
apiVersion: template.openshift.io/v1
labels:
template: jupyterlab-root
metadata:
name: jupyterlab-root
annotations:
openshift.io/display-name: JupyterLab
description: |-
Start JupyterLab images as the `jovyan` user, with sudo privileges to install anything you need.
📂 Use the `/home/jovyan` folder (workspace of the JupyterLab UI) to store your data in the persistent storage automatically created
You can find the persistent storage in the DSRI web UI, go to Administrator view > Storage > Persistent Volume Claims
You can use any image based on the official Jupyter docker stack https://github.com/jupyter/docker-stacks
- jupyter/tensorflow-notebook
- jupyter/r-notebook
- jupyter/all-spark-notebook
- ghcr.io/maastrichtu-ids/jupyterlab (with Java and SPARQL kernels)
Or build your own! Checkout https://github.com/MaastrichtU-IDS/jupyterlab for an example of custom image
Once JupyterLab is deployed you can install any pip packages, JupyterLab extensions, and apt packages.
iconClass: icon-python
tags: python,jupyter,notebook
openshift.io/provider-display-name: Institute of Data Science, UM
openshift.io/documentation-url: https://maastrichtu-ids.github.io/dsri-documentation/docs/deploy-jupyter
openshift.io/support-url: https://maastrichtu-ids.github.io/dsri-documentation/help

Parameters

Then define the parameters the user will be able to define in the DSRI catalog web UI when instantiating the application. APPLICATION_NAME is the most important as it will be used everywhere to create the objects and identify the application.

parameters:
- name: APPLICATION_NAME
displayName: Name for the application
description: Must be without spaces (use -), and unique in the project.
value: jupyterlab
required: true
- name: PASSWORD
displayName: JupyterLab UI Password
description: The password/token to access the JupyterLab web UI
required: true
- name: APPLICATION_IMAGE
displayName: Jupyter notebook Docker image
value: ghcr.io/maastrichtu-ids/jupyterlab:latest
required: true
description: You can use any image based on https://github.com/jupyter/docker-stacks
- name: STORAGE_SIZE
displayName: Storage size
description: Size of the storage allocated to the notebook persistent storage in `/home/jovyan`.
value: 5Gi
required: true

We can then refer to those parameters value (filled by the users of the template) in the rest of the template using this syntax: ${APPLICATION_NAME}

We will now describe all objects deployed when we instantiate this template (to start an application).

Image

First we define the ImageStream object to import the Docker image(s) of your application(s) on the DSRI cluster

Setting the importPolicy: scheduled to true will have the DSRI to automatically check for new version of this image, which can be useful if you want to always have the latest published version of an applications. Visit the OpenShift ImageStreams documentation for more details. Be careful as enabling this feature without real need will cause the DSRI to query DockerHub more, which might require you to login to DockerHub to increase your pull request quota.

objects:
- kind: "ImageStream"
apiVersion: image.openshift.io/v1
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
tags:
- name: latest
from:
kind: DockerImage
name: ${APPLICATION_IMAGE}
importPolicy:
scheduled: true
lookupPolicy:
local: true

Create storage

Then we define the PersistentVolumeClaim, which is a persistent storage on which we will mount the /home/jovyan folder to avoid loosing data if our application is restarted.

Any file outside of a persistent volume can be lost at any moment if the pod restart, usually it only consists in temporary file if you are properly working in the persistent volume folder. This can be useful also if your application is crashing, stopping and restarting your pod (application) might fix it.

- kind: "PersistentVolumeClaim"
apiVersion: "v1"
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
accessModes:
- "ReadWriteMany"
resources:
requests:
storage: ${STORAGE_SIZE}

Secret

Then the Secret to store the password

- kind: "Secret"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: ${APPLICATION_NAME}
stringData:
application-password: "${PASSWORD}"

Deployment

Then the DeploymentConfig (aka. Deployment) define how to deploy the JupyterLab image, if you want to deploy another application alongside JupyterLab you can do it by adding as many deployments as you want! (and use the same, or different, persistent volume claims for storage). Checkout the OpenShift Deployments documentation for more details.

In this first block we will define the strategy to update and recreate our applications if you change the YAML configuration, or when a new latest docker image is updated, allowing your service to always use the latest up-to-date version of a software without any intervention from you.

We chose the Recreate release option to make sure the container is properly recreated and avoid unnecessary resources consumption, but you can also use Rolling to have a downtime free transition between deployments.

- kind: "DeploymentConfig"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: "${APPLICATION_NAME}"
spec:
replicas: 1
strategy:
type: "Recreate"
triggers:
- type: "ConfigChange"
- type: "ImageChange"
imageChangeParams:
automatic: true
containerNames:
- jupyter-notebook
from:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
selector:
app: "${APPLICATION_NAME}"
deploymentconfig: "${APPLICATION_NAME}"

Pod spec

Then we define the spec of the pod that will be deployed by this DeploymentConfig.

Setting the serviceAccountName: anyuid is required for most Docker containers as it allows to run a container using any user ID (e.g. root). Otherwise OpenShift expect to use a random user ID, which is require to build the Docker image especially to work with random user IDs.

We then create the containers: array which is where we will define the containers deployed in the pod. It is recommended to deploy 1 container per pod, as it enables a better separation and management of the applications, apart if you know what you are doing. You can also provide the command to run at the start of the container to overwrite the default one, and define the exposed ports (here 8080).

    template:
metadata:
labels:
app: "${APPLICATION_NAME}"
deploymentconfig: "${APPLICATION_NAME}"
spec:
serviceAccountName: "anyuid"
containers:
- name: "jupyter-notebook"
image: "${APPLICATION_NAME}:latest"
command:
- "start-notebook.sh"
- "--no-browser"
- "--ip=0.0.0.0"
ports:
- containerPort: 8888
protocol: TCP

Environment variables in the container

Then define the environment variables used in your container, usually the password and most parameters are set here, such as enabling sudo in the container.

          env:
- name: JUPYTER_TOKEN
valueFrom:
secretKeyRef:
key: "application-password"
name: "${APPLICATION_NAME}"
- name: JUPYTER_ENABLE_LAB
value: "yes"
- name: GRANT_SUDO
value: "yes"

Mount storage

Then we need to mount the previously created PersistentVolume on /home/jovyan , the workspace of JupyterLab. Be careful: volumeMounts is in the containers: object, and volumes is defined in the spec: object

          volumeMounts:
- name: data
mountPath: "/home/jovyan"
volumes:
- name: data
persistentVolumeClaim:
claimName: "${APPLICATION_NAME}"

Security context

Then we define the securityContext to allow JupyterLab to run as root, this is not required for most applications, just a specificity of the official Jupyter images to run with root privileges.

        securityContext:
runAsUser: 0
supplementalGroups:
- 100
automountServiceAccountToken: false

Service

Then we create the Service to expose the port 8888 of our JupyterLab container on the project network. This means that the JupyterLab web UI will reachable by all other application deployed in your project using its application name as hostname (e.g. jupyterlab)

- kind: "Service"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: ${APPLICATION_NAME}
spec:
ports:
- name: 8888-tcp
protocol: TCP
port: 8888
targetPort: 8888
selector:
app: ${APPLICATION_NAME}
deploymentconfig: "${APPLICATION_NAME}"
type: ClusterIP

Route

Finally, we define the Route which will automatically generate a URL for the service of your application based following this template: APPLICATION_NAME-PROJECT_ID-DSRI_URL

- kind: "Route"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: ${APPLICATION_NAME}
spec:
host: ''
to:
kind: Service
name: "${APPLICATION_NAME}"
weight: 100
port:
targetPort: 8888-tcp
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect

The complete application

Here is a complete file to describe the JupyterLab deployment template, you can add it to your project catalog by going to +Add in the DSRI web UI, then click on the option to add a YAML file content, and copy paste the template YAML.

---
kind: Template
apiVersion: template.openshift.io/v1
labels:
template: jupyterlab-root
metadata:
name: jupyterlab-root
annotations:
openshift.io/display-name: JupyterLab
description: |-
Start JupyterLab images as the `jovyan` user, with sudo privileges to install anything you need.
📂 Use the `/home/jovyan` folder (workspace of the JupyterLab UI) to store your data in the persistent storage automatically created
You can find the persistent storage in the DSRI web UI, go to Administrator view > Storage > Persistent Volume Claims
You can use any image based on the official Jupyter docker stack https://github.com/jupyter/docker-stacks
- jupyter/tensorflow-notebook
- jupyter/r-notebook
- jupyter/all-spark-notebook
- ghcr.io/maastrichtu-ids/jupyterlab (with Java and SPARQL kernels)
Or build your own! Checkout https://github.com/MaastrichtU-IDS/jupyterlab for an example of custom image
Once JupyterLab is deployed you can install any pip packages, JupyterLab extensions, and apt packages.
iconClass: icon-python
tags: python,jupyter,notebook
openshift.io/provider-display-name: Institute of Data Science, UM
openshift.io/documentation-url: https://maastrichtu-ids.github.io/dsri-documentation/docs/deploy-jupyter
openshift.io/support-url: https://maastrichtu-ids.github.io/dsri-documentation/help

parameters:
- name: APPLICATION_NAME
displayName: Name for the application
description: Must be without spaces (use -), and unique in the project.
value: jupyterlab
required: true
- name: PASSWORD
displayName: JupyterLab UI Password
description: The password/token to access the JupyterLab web UI
required: true
- name: APPLICATION_IMAGE
displayName: Jupyter notebook Docker image
value: ghcr.io/maastrichtu-ids/jupyterlab:latest
required: true
description: You can use any image based on https://github.com/jupyter/docker-stacks
- name: STORAGE_SIZE
displayName: Storage size
description: Size of the storage allocated to the notebook persistent storage in `/home/jovyan`.
value: 5Gi
required: true

objects:
- kind: "ImageStream"
apiVersion: image.openshift.io/v1
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
tags:
- name: latest
from:
kind: DockerImage
name: ${APPLICATION_IMAGE}
lookupPolicy:
local: true

- kind: "PersistentVolumeClaim"
apiVersion: "v1"
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
accessModes:
- "ReadWriteMany"
resources:
requests:
storage: ${STORAGE_SIZE}

- kind: "Secret"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: ${APPLICATION_NAME}
stringData:
application-password: "${PASSWORD}"

- kind: "DeploymentConfig"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: "${APPLICATION_NAME}"
spec:
replicas: 1
strategy:
type: Recreate
triggers:
- type: ConfigChange
- type: ImageChange
imageChangeParams:
automatic: true
containerNames:
- jupyter-notebook
from:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
selector:
app: "${APPLICATION_NAME}"
deploymentconfig: "${APPLICATION_NAME}"

template:
metadata:
labels:
app: "${APPLICATION_NAME}"
deploymentconfig: "${APPLICATION_NAME}"
spec:
serviceAccountName: "anyuid"
containers:
- name: jupyter-notebook
image: "${APPLICATION_NAME}:latest"
command:
- "start-notebook.sh"
- "--no-browser"
- "--ip=0.0.0.0"
ports:
- containerPort: 8888
protocol: TCP

env:
- name: "JUPYTER_TOKEN"
valueFrom:
secretKeyRef:
key: application-password
name: "${APPLICATION_NAME}"
- name: JUPYTER_ENABLE_LAB
value: "yes"
- name: GRANT_SUDO
value: "yes"

volumeMounts:
- name: data
mountPath: "/home/jovyan"
volumes:
- name: data
persistentVolumeClaim:
claimName: "${APPLICATION_NAME}"

securityContext:
runAsUser: 0
supplementalGroups:
- 100
automountServiceAccountToken: false

- kind: "Service"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: ${APPLICATION_NAME}
spec:
ports:
- name: 8888-tcp
protocol: TCP
port: 8888
targetPort: 8888
selector:
app: ${APPLICATION_NAME}
deploymentconfig: "${APPLICATION_NAME}"
type: ClusterIP

- kind: "Route"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: ${APPLICATION_NAME}
spec:
host: ''
to:
kind: Service
name: "${APPLICATION_NAME}"
weight: 100
port:
targetPort: 8888-tcp
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect

Add a configuration file

This practice is more advanced, and is not required for most deployments, but you can easily create a ConfigMap object to define any file to be provided at runtime to the application.

For example here we are going to define a python script that will be run when starting JupyterLab (jupyter_notebook_config.py). It will clone the git repository URL, provided by the user when creating the template, at the start of JupyterLab in the workspace. If this repo contains files with list of packages in the root folder (requirements.txt and packages.txt), they will be installed at start

- kind: ConfigMap
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}-cfg"
labels:
app: "${APPLICATION_NAME}"
data:
# Clone git repo, then install requirements.txt and packages.txt
jupyter_notebook_config.py: |
import os
git_url = os.environ.get('GIT_URL')
home_dir = os.environ.get('HOME')
os.chdir(home_dir)
if git_url:
repo_id = git_url.rsplit('/', 1)[-1]
os.system('git clone --quiet --recursive ' + git_url)
os.chdir(repo_id)
if os.path.exists('packages.txt'):
os.system('sudo apt-get update')
os.system('cat packages.txt | xargs sudo apt-get install -y')
if os.path.exists('requirements.txt'):
os.system('pip install -r requirements.txt')
os.chdir(home_dir)

We will then need to mount this config file like a persistent volume in the path we want it to be (here /etc/jupyter/openshift), change the volumes and volumeMounts of your DeploymentConfig:

          volumeMounts:
- name: data
mountPath: "/home/jovyan"
- name: configs
mountPath: "/etc/jupyter/openshift"
automountServiceAccountToken: false
volumes:
- name: data
persistentVolumeClaim:
claimName: "${APPLICATION_NAME}"
- name: configs
configMap:
name: "${APPLICATION_NAME}-cfg"

Then change the jupyter-notebook container start command to include this config file:

          command:
- "start-notebook.sh"
- "--no-browser"
- "--ip=0.0.0.0"
- "--config=/etc/jupyter/openshift/jupyter_notebook_config.py"

Add the optional parameter to get the git URL to clone when the user create the template:

parameters:
- name: GIT_URL
displayName: URL of the git repository to clone (optional)
required: false
description: Source code will be automatically cloned, then requirements.txt and packages.txt content will be automatically installed if presents

Finally, add the git URL parameter provided by the user as environment variable of the container, so that it is picked up by the config script when running at the start of JupyterLab:

          env:
- name: GIT_URL
value: "${GIT_URL}"

Add automated health checks

You can add readiness and liveness probes to a container to automatically check if the web application is up and ready. This will allow to wait for the JupyterLab web UI to be accessible before showing the application as ready in the Topology. Useful if you are cloning a repository and installing packages, which will take more time to start JupyterLab.

        containers:
- name: jupyter-notebook
readinessProbe:
tcpSocket:
port: 8888
livenessProbe:
initialDelaySeconds: 15
tcpSocket:
port: 8888
failureThreshold: 40
periodSeconds: 10
timeoutSeconds: 2

Checkout the OpenShift Application health documentation for more details.

Define resource limits

You can also define resources request and limits for each DeploymentConfig, in spec:

        spec:
resources:
requests:
cpu: "1"
memory: "2Gi"
limits:
cpu: "128"
memory: "300Gi"

Build your own application template

The easiest way to build a template for a new application is to start from this JupyterLab template:

  • Replace jupyterlab-root by your application name
  • Replace 8888 by your application
  • Change the template and parameters descriptions to match your application
  • Remove the securityContext part, and other objects you do not need

If you need to start multiple containers, copy/paste the objects you need to create and edit them

- +

Anatomy of a DSRI application

This page will present you how an applications is typically built using an OpenShift template. This will also help you understand more in general the different objects that needs to be defined when deploying an application on a Kubernetes cluster. Even if OpenShift templates can only be deployed to OpenShift, the objects they define are the same as in Kubernetes (apart from the Route which becomes Ingress).

There are other ways to describe applications on OpenShift cluster (here the DSRI), such as Helm or Operators. But OpenShift templates are the easiest and quickest way to build an application that can be deployed from the DSRI web UI catalog in a few clicks, and by providing a few parameters.

It is better to have a basic understanding of what a docker container is to fully understand this walkthrough, but it should already gives a good idea of the different objects deployed with each DSRI application.

We will use the template used to deploy JupyterLab as example, and we will describe the goal, importance and caveats of each parts of the application definition. But the same template and instructions can be easily reused for other applications with a web UI to access.

Checkout the complete JupyterLab template here (it will be slightly different with a bit more comments, but there are globally the same)

You will see that deploying on Kubernetes (and by extension, here OpenShift), is just about defining objects in a YAML file, like a complex docker-compose.yml file.

Do you got what it takes?

The amount of objects might seems a bit overwhelming at first, but this is what it takes to automatically deploy a complex application on a large cluster, automatically available through a generated URL, with HTTPS encryption to protect your passwords when you log to a web UI!

Application walkthrough

First, you need to create your Template objects, this will be the main object we will create here as all other objects defined will be deployed by this template.

In this part we mainly just provide the description and information that will be shown to users when deploying the application from the DSRI web UI catalog.

---
kind: Template
apiVersion: template.openshift.io/v1
labels:
template: jupyterlab-root
metadata:
name: jupyterlab-root
annotations:
openshift.io/display-name: JupyterLab
description: |-
Start JupyterLab images as the `jovyan` user, with sudo privileges to install anything you need.
📂 Use the `/home/jovyan` folder (workspace of the JupyterLab UI) to store your data in the persistent storage automatically created
You can find the persistent storage in the DSRI web UI, go to Administrator view > Storage > Persistent Volume Claims
You can use any image based on the official Jupyter docker stack https://github.com/jupyter/docker-stacks
- jupyter/tensorflow-notebook
- jupyter/r-notebook
- jupyter/all-spark-notebook
- ghcr.io/maastrichtu-ids/jupyterlab (with Java and SPARQL kernels)
Or build your own! Checkout https://github.com/MaastrichtU-IDS/jupyterlab for an example of custom image
Once JupyterLab is deployed you can install any pip packages, JupyterLab extensions, and apt packages.
iconClass: icon-python
tags: python,jupyter,notebook
openshift.io/provider-display-name: Institute of Data Science, UM
openshift.io/documentation-url: https://maastrichtu-ids.github.io/dsri-documentation/docs/deploy-jupyter
openshift.io/support-url: https://maastrichtu-ids.github.io/dsri-documentation/help

Parameters

Then define the parameters the user will be able to define in the DSRI catalog web UI when instantiating the application. APPLICATION_NAME is the most important as it will be used everywhere to create the objects and identify the application.

parameters:
- name: APPLICATION_NAME
displayName: Name for the application
description: Must be without spaces (use -), and unique in the project.
value: jupyterlab
required: true
- name: PASSWORD
displayName: JupyterLab UI Password
description: The password/token to access the JupyterLab web UI
required: true
- name: APPLICATION_IMAGE
displayName: Jupyter notebook Docker image
value: ghcr.io/maastrichtu-ids/jupyterlab:latest
required: true
description: You can use any image based on https://github.com/jupyter/docker-stacks
- name: STORAGE_SIZE
displayName: Storage size
description: Size of the storage allocated to the notebook persistent storage in `/home/jovyan`.
value: 5Gi
required: true

We can then refer to those parameters value (filled by the users of the template) in the rest of the template using this syntax: ${APPLICATION_NAME}

We will now describe all objects deployed when we instantiate this template (to start an application).

Image

First we define the ImageStream object to import the Docker image(s) of your application(s) on the DSRI cluster

Setting the importPolicy: scheduled to true will have the DSRI to automatically check for new version of this image, which can be useful if you want to always have the latest published version of an applications. Visit the OpenShift ImageStreams documentation for more details. Be careful as enabling this feature without real need will cause the DSRI to query DockerHub more, which might require you to login to DockerHub to increase your pull request quota.

objects:
- kind: "ImageStream"
apiVersion: image.openshift.io/v1
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
tags:
- name: latest
from:
kind: DockerImage
name: ${APPLICATION_IMAGE}
importPolicy:
scheduled: true
lookupPolicy:
local: true

Create storage

Then we define the PersistentVolumeClaim, which is a persistent storage on which we will mount the /home/jovyan folder to avoid loosing data if our application is restarted.

Any file outside of a persistent volume can be lost at any moment if the pod restart, usually it only consists in temporary file if you are properly working in the persistent volume folder. This can be useful also if your application is crashing, stopping and restarting your pod (application) might fix it.

- kind: "PersistentVolumeClaim"
apiVersion: "v1"
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
accessModes:
- "ReadWriteMany"
resources:
requests:
storage: ${STORAGE_SIZE}

Secret

Then the Secret to store the password

- kind: "Secret"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: ${APPLICATION_NAME}
stringData:
application-password: "${PASSWORD}"

Deployment

Then the DeploymentConfig (aka. Deployment) define how to deploy the JupyterLab image, if you want to deploy another application alongside JupyterLab you can do it by adding as many deployments as you want! (and use the same, or different, persistent volume claims for storage). Checkout the OpenShift Deployments documentation for more details.

In this first block we will define the strategy to update and recreate our applications if you change the YAML configuration, or when a new latest docker image is updated, allowing your service to always use the latest up-to-date version of a software without any intervention from you.

We chose the Recreate release option to make sure the container is properly recreated and avoid unnecessary resources consumption, but you can also use Rolling to have a downtime free transition between deployments.

- kind: "DeploymentConfig"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: "${APPLICATION_NAME}"
spec:
replicas: 1
strategy:
type: "Recreate"
triggers:
- type: "ConfigChange"
- type: "ImageChange"
imageChangeParams:
automatic: true
containerNames:
- jupyter-notebook
from:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
selector:
app: "${APPLICATION_NAME}"
deploymentconfig: "${APPLICATION_NAME}"

Pod spec

Then we define the spec of the pod that will be deployed by this DeploymentConfig.

Setting the serviceAccountName: anyuid is required for most Docker containers as it allows to run a container using any user ID (e.g. root). Otherwise OpenShift expect to use a random user ID, which is require to build the Docker image especially to work with random user IDs.

We then create the containers: array which is where we will define the containers deployed in the pod. It is recommended to deploy 1 container per pod, as it enables a better separation and management of the applications, apart if you know what you are doing. You can also provide the command to run at the start of the container to overwrite the default one, and define the exposed ports (here 8080).

    template:
metadata:
labels:
app: "${APPLICATION_NAME}"
deploymentconfig: "${APPLICATION_NAME}"
spec:
serviceAccountName: "anyuid"
containers:
- name: "jupyter-notebook"
image: "${APPLICATION_NAME}:latest"
command:
- "start-notebook.sh"
- "--no-browser"
- "--ip=0.0.0.0"
ports:
- containerPort: 8888
protocol: TCP

Environment variables in the container

Then define the environment variables used in your container, usually the password and most parameters are set here, such as enabling sudo in the container.

          env:
- name: JUPYTER_TOKEN
valueFrom:
secretKeyRef:
key: "application-password"
name: "${APPLICATION_NAME}"
- name: JUPYTER_ENABLE_LAB
value: "yes"
- name: GRANT_SUDO
value: "yes"

Mount storage

Then we need to mount the previously created PersistentVolume on /home/jovyan , the workspace of JupyterLab. Be careful: volumeMounts is in the containers: object, and volumes is defined in the spec: object

          volumeMounts:
- name: data
mountPath: "/home/jovyan"
volumes:
- name: data
persistentVolumeClaim:
claimName: "${APPLICATION_NAME}"

Security context

Then we define the securityContext to allow JupyterLab to run as root, this is not required for most applications, just a specificity of the official Jupyter images to run with root privileges.

        securityContext:
runAsUser: 0
supplementalGroups:
- 100
automountServiceAccountToken: false

Service

Then we create the Service to expose the port 8888 of our JupyterLab container on the project network. This means that the JupyterLab web UI will reachable by all other application deployed in your project using its application name as hostname (e.g. jupyterlab)

- kind: "Service"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: ${APPLICATION_NAME}
spec:
ports:
- name: 8888-tcp
protocol: TCP
port: 8888
targetPort: 8888
selector:
app: ${APPLICATION_NAME}
deploymentconfig: "${APPLICATION_NAME}"
type: ClusterIP

Route

Finally, we define the Route which will automatically generate a URL for the service of your application based following this template: APPLICATION_NAME-PROJECT_ID-DSRI_URL

- kind: "Route"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: ${APPLICATION_NAME}
spec:
host: ''
to:
kind: Service
name: "${APPLICATION_NAME}"
weight: 100
port:
targetPort: 8888-tcp
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect

The complete application

Here is a complete file to describe the JupyterLab deployment template, you can add it to your project catalog by going to +Add in the DSRI web UI, then click on the option to add a YAML file content, and copy paste the template YAML.

---
kind: Template
apiVersion: template.openshift.io/v1
labels:
template: jupyterlab-root
metadata:
name: jupyterlab-root
annotations:
openshift.io/display-name: JupyterLab
description: |-
Start JupyterLab images as the `jovyan` user, with sudo privileges to install anything you need.
📂 Use the `/home/jovyan` folder (workspace of the JupyterLab UI) to store your data in the persistent storage automatically created
You can find the persistent storage in the DSRI web UI, go to Administrator view > Storage > Persistent Volume Claims
You can use any image based on the official Jupyter docker stack https://github.com/jupyter/docker-stacks
- jupyter/tensorflow-notebook
- jupyter/r-notebook
- jupyter/all-spark-notebook
- ghcr.io/maastrichtu-ids/jupyterlab (with Java and SPARQL kernels)
Or build your own! Checkout https://github.com/MaastrichtU-IDS/jupyterlab for an example of custom image
Once JupyterLab is deployed you can install any pip packages, JupyterLab extensions, and apt packages.
iconClass: icon-python
tags: python,jupyter,notebook
openshift.io/provider-display-name: Institute of Data Science, UM
openshift.io/documentation-url: https://maastrichtu-ids.github.io/dsri-documentation/docs/deploy-jupyter
openshift.io/support-url: https://maastrichtu-ids.github.io/dsri-documentation/help

parameters:
- name: APPLICATION_NAME
displayName: Name for the application
description: Must be without spaces (use -), and unique in the project.
value: jupyterlab
required: true
- name: PASSWORD
displayName: JupyterLab UI Password
description: The password/token to access the JupyterLab web UI
required: true
- name: APPLICATION_IMAGE
displayName: Jupyter notebook Docker image
value: ghcr.io/maastrichtu-ids/jupyterlab:latest
required: true
description: You can use any image based on https://github.com/jupyter/docker-stacks
- name: STORAGE_SIZE
displayName: Storage size
description: Size of the storage allocated to the notebook persistent storage in `/home/jovyan`.
value: 5Gi
required: true

objects:
- kind: "ImageStream"
apiVersion: image.openshift.io/v1
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
tags:
- name: latest
from:
kind: DockerImage
name: ${APPLICATION_IMAGE}
lookupPolicy:
local: true

- kind: "PersistentVolumeClaim"
apiVersion: "v1"
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
accessModes:
- "ReadWriteMany"
resources:
requests:
storage: ${STORAGE_SIZE}

- kind: "Secret"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: ${APPLICATION_NAME}
stringData:
application-password: "${PASSWORD}"

- kind: "DeploymentConfig"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: "${APPLICATION_NAME}"
spec:
replicas: 1
strategy:
type: Recreate
triggers:
- type: ConfigChange
- type: ImageChange
imageChangeParams:
automatic: true
containerNames:
- jupyter-notebook
from:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
selector:
app: "${APPLICATION_NAME}"
deploymentconfig: "${APPLICATION_NAME}"

template:
metadata:
labels:
app: "${APPLICATION_NAME}"
deploymentconfig: "${APPLICATION_NAME}"
spec:
serviceAccountName: "anyuid"
containers:
- name: jupyter-notebook
image: "${APPLICATION_NAME}:latest"
command:
- "start-notebook.sh"
- "--no-browser"
- "--ip=0.0.0.0"
ports:
- containerPort: 8888
protocol: TCP

env:
- name: "JUPYTER_TOKEN"
valueFrom:
secretKeyRef:
key: application-password
name: "${APPLICATION_NAME}"
- name: JUPYTER_ENABLE_LAB
value: "yes"
- name: GRANT_SUDO
value: "yes"

volumeMounts:
- name: data
mountPath: "/home/jovyan"
volumes:
- name: data
persistentVolumeClaim:
claimName: "${APPLICATION_NAME}"

securityContext:
runAsUser: 0
supplementalGroups:
- 100
automountServiceAccountToken: false

- kind: "Service"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: ${APPLICATION_NAME}
spec:
ports:
- name: 8888-tcp
protocol: TCP
port: 8888
targetPort: 8888
selector:
app: ${APPLICATION_NAME}
deploymentconfig: "${APPLICATION_NAME}"
type: ClusterIP

- kind: "Route"
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}"
labels:
app: ${APPLICATION_NAME}
spec:
host: ''
to:
kind: Service
name: "${APPLICATION_NAME}"
weight: 100
port:
targetPort: 8888-tcp
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect

Add a configuration file

This practice is more advanced, and is not required for most deployments, but you can easily create a ConfigMap object to define any file to be provided at runtime to the application.

For example here we are going to define a python script that will be run when starting JupyterLab (jupyter_notebook_config.py). It will clone the git repository URL, provided by the user when creating the template, at the start of JupyterLab in the workspace. If this repo contains files with list of packages in the root folder (requirements.txt and packages.txt), they will be installed at start

- kind: ConfigMap
apiVersion: v1
metadata:
name: "${APPLICATION_NAME}-cfg"
labels:
app: "${APPLICATION_NAME}"
data:
# Clone git repo, then install requirements.txt and packages.txt
jupyter_notebook_config.py: |
import os
git_url = os.environ.get('GIT_URL')
home_dir = os.environ.get('HOME')
os.chdir(home_dir)
if git_url:
repo_id = git_url.rsplit('/', 1)[-1]
os.system('git clone --quiet --recursive ' + git_url)
os.chdir(repo_id)
if os.path.exists('packages.txt'):
os.system('sudo apt-get update')
os.system('cat packages.txt | xargs sudo apt-get install -y')
if os.path.exists('requirements.txt'):
os.system('pip install -r requirements.txt')
os.chdir(home_dir)

We will then need to mount this config file like a persistent volume in the path we want it to be (here /etc/jupyter/openshift), change the volumes and volumeMounts of your DeploymentConfig:

          volumeMounts:
- name: data
mountPath: "/home/jovyan"
- name: configs
mountPath: "/etc/jupyter/openshift"
automountServiceAccountToken: false
volumes:
- name: data
persistentVolumeClaim:
claimName: "${APPLICATION_NAME}"
- name: configs
configMap:
name: "${APPLICATION_NAME}-cfg"

Then change the jupyter-notebook container start command to include this config file:

          command:
- "start-notebook.sh"
- "--no-browser"
- "--ip=0.0.0.0"
- "--config=/etc/jupyter/openshift/jupyter_notebook_config.py"

Add the optional parameter to get the git URL to clone when the user create the template:

parameters:
- name: GIT_URL
displayName: URL of the git repository to clone (optional)
required: false
description: Source code will be automatically cloned, then requirements.txt and packages.txt content will be automatically installed if presents

Finally, add the git URL parameter provided by the user as environment variable of the container, so that it is picked up by the config script when running at the start of JupyterLab:

          env:
- name: GIT_URL
value: "${GIT_URL}"

Add automated health checks

You can add readiness and liveness probes to a container to automatically check if the web application is up and ready. This will allow to wait for the JupyterLab web UI to be accessible before showing the application as ready in the Topology. Useful if you are cloning a repository and installing packages, which will take more time to start JupyterLab.

        containers:
- name: jupyter-notebook
readinessProbe:
tcpSocket:
port: 8888
livenessProbe:
initialDelaySeconds: 15
tcpSocket:
port: 8888
failureThreshold: 40
periodSeconds: 10
timeoutSeconds: 2

Checkout the OpenShift Application health documentation for more details.

Define resource limits

You can also define resources request and limits for each DeploymentConfig, in spec:

        spec:
resources:
requests:
cpu: "1"
memory: "2Gi"
limits:
cpu: "128"
memory: "300Gi"

Build your own application template

The easiest way to build a template for a new application is to start from this JupyterLab template:

  • Replace jupyterlab-root by your application name
  • Replace 8888 by your application
  • Change the template and parameters descriptions to match your application
  • Remove the securityContext part, and other objects you do not need

If you need to start multiple containers, copy/paste the objects you need to create and edit them

+ \ No newline at end of file diff --git a/docs/catalog-data-streaming/index.html b/docs/catalog-data-streaming/index.html index 102009a50..3d56603bc 100644 --- a/docs/catalog-data-streaming/index.html +++ b/docs/catalog-data-streaming/index.html @@ -16,13 +16,13 @@ - +
-

Data streaming

Apache Flink enables processing of Data Streams using languages such as Java or Scala .

Root permission required

🔒 You need root containers enabled (aka. anyuid) in your project to start this application.

Create the Apache Flink template in your project using vemonet/flink-on-openshift

oc apply -f https://raw.githubusercontent.com/vemonet/flink-on-openshift/master/template-flink-dsri.yml

Use the template to start the cluster from the catalog.

Use this command to get the Flink Jobmanager pod id and copy file to the pod.

oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name

# Example creating the workspace folder and copying the RMLStreamer.jar to the pod
oc exec <pod_id> -- mkdir -p /mnt/workspace/resources
oc cp workspace/resources/RMLStreamer.jar <pod_id>:/mnt/

Delete the Apache Flink cluster (change the application name):

oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=flink-cluster
- +

Data streaming

Apache Flink enables processing of Data Streams using languages such as Java or Scala .

Root permission required

🔒 You need root containers enabled (aka. anyuid) in your project to start this application.

Create the Apache Flink template in your project using vemonet/flink-on-openshift

oc apply -f https://raw.githubusercontent.com/vemonet/flink-on-openshift/master/template-flink-dsri.yml

Use the template to start the cluster from the catalog.

Use this command to get the Flink Jobmanager pod id and copy file to the pod.

oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name

# Example creating the workspace folder and copying the RMLStreamer.jar to the pod
oc exec <pod_id> -- mkdir -p /mnt/workspace/resources
oc cp workspace/resources/RMLStreamer.jar <pod_id>:/mnt/

Delete the Apache Flink cluster (change the application name):

oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=flink-cluster
+ \ No newline at end of file diff --git a/docs/catalog-genomics/index.html b/docs/catalog-genomics/index.html index de5ebc1ef..bbdd3c0af 100644 --- a/docs/catalog-genomics/index.html +++ b/docs/catalog-genomics/index.html @@ -16,13 +16,13 @@ - +
-

Genomics

Feel free to propose new services using pull requests, or to request them by creating new issues.

Trinity RNA Seq

Trinity assembles transcript sequences from Illumina RNA-Seq data. It represents a novel method for the efficient and robust de novo reconstruction of transcriptomes from RNA-seq data. See their documentation.

You can start a container using the Trinity RNA-Seq template in the Catalog web UI (make sure the Templates checkbox is checked)

Deploy Trinity RNA Seq

This template uses the Trinity RNA-Seq image hosted in the UM IDS GitHub Container Registry

Persistent data folder

📂 Use the /usr/local/src/work folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

We enabled the port 8787 in the container, if you need to deploy applications.

- +

Genomics

Feel free to propose new services using pull requests, or to request them by creating new issues.

Trinity RNA Seq

Trinity assembles transcript sequences from Illumina RNA-Seq data. It represents a novel method for the efficient and robust de novo reconstruction of transcriptomes from RNA-seq data. See their documentation.

You can start a container using the Trinity RNA-Seq template in the Catalog web UI (make sure the Templates checkbox is checked)

Deploy Trinity RNA Seq

This template uses the Trinity RNA-Seq image hosted in the UM IDS GitHub Container Registry

Persistent data folder

📂 Use the /usr/local/src/work folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

We enabled the port 8787 in the container, if you need to deploy applications.

+ \ No newline at end of file diff --git a/docs/catalog-imaging/index.html b/docs/catalog-imaging/index.html index 6aae47c49..748b63694 100644 --- a/docs/catalog-imaging/index.html +++ b/docs/catalog-imaging/index.html @@ -16,13 +16,13 @@ - +
-

Imaging softwares

Feel free to propose new services using pull requests, or to request them by creating new issues.

CellProfiler

Cell image analysis software. See their website.

You can start a container using the CellProfiler template in the Catalog web UI (make sure the Templates checkbox is checked)

This template uses the official CellProfiler image hosted on DockerHub

Persistent data folder

📂 Use the /usr/local/src/work folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

Once the CellProfiler has been started you can access it through the pod terminal (in the DSRI web UI, or using oc rsh POD_ID)

cellprofiler --help
cellprofiler --run
cellprofiler --run-headless
Getting Started

🐬 For more information using cell profiler from the command line see this post

- +

Imaging softwares

Feel free to propose new services using pull requests, or to request them by creating new issues.

CellProfiler

Cell image analysis software. See their website.

You can start a container using the CellProfiler template in the Catalog web UI (make sure the Templates checkbox is checked)

This template uses the official CellProfiler image hosted on DockerHub

Persistent data folder

📂 Use the /usr/local/src/work folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

Once the CellProfiler has been started you can access it through the pod terminal (in the DSRI web UI, or using oc rsh POD_ID)

cellprofiler --help
cellprofiler --run
cellprofiler --run-headless
Getting Started

🐬 For more information using cell profiler from the command line see this post

+ \ No newline at end of file diff --git a/docs/catalog-opendatahub/index.html b/docs/catalog-opendatahub/index.html index dc6481a0b..4a7a9eabe 100644 --- a/docs/catalog-opendatahub/index.html +++ b/docs/catalog-opendatahub/index.html @@ -16,13 +16,13 @@ - +
-

OpenDataHub

Work in progress

Deploying an OpenDataHub cluster is a work in progress on the DSRI, contact us if you are interested in trying it out.

OpenDataHub is a project to orchestrate the deployment of Data Science applications on OpenShift, based on KubeFlow.

Components available on DSRI

Those components have been tested on the DSRI:

Start Spark with JupyterHub

  1. Checkout the official documentation to start an instance of OpenDataHub (note that the Operator has already been installed)

  2. Then visit the documentation to reach the Spark cluster from a Jupyter notebook.

All components

Here are all the components that can be deployed as part of an OpenDataHub:

Let us know if you need help to deploy one of those components on the DSRI.

- +

OpenDataHub

Work in progress

Deploying an OpenDataHub cluster is a work in progress on the DSRI, contact us if you are interested in trying it out.

OpenDataHub is a project to orchestrate the deployment of Data Science applications on OpenShift, based on KubeFlow.

Components available on DSRI

Those components have been tested on the DSRI:

Start Spark with JupyterHub

  1. Checkout the official documentation to start an instance of OpenDataHub (note that the Operator has already been installed)

  2. Then visit the documentation to reach the Spark cluster from a Jupyter notebook.

All components

Here are all the components that can be deployed as part of an OpenDataHub:

Let us know if you need help to deploy one of those components on the DSRI.

+ \ No newline at end of file diff --git a/docs/catalog-utilities/index.html b/docs/catalog-utilities/index.html index 32b293622..2f9378382 100644 --- a/docs/catalog-utilities/index.html +++ b/docs/catalog-utilities/index.html @@ -16,13 +16,13 @@ - +
-

Utilities

Feel free to propose new services using pull requests, or to request them by creating new issues.

Ubuntu

With the terminal

Start Ubuntu with the root user which has sudo permissions to install anything.

You can start the application using the Ubuntu template in the Catalog web UI (make sure the Templates checkbox is checked)

Login Credentials

Username: root

Password: Template creation password

This template uses the Ubuntu image hosted on DockerHub, see its documentation at https://hub.docker.com/r/ubuntu

Persistent data folder

📂 Use the /root folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

We enabled the port 8080 in the Ubuntu container if you need to deploy applications.

To quickly access it from the terminal you can use the Terminal tab in the pod page, or via your local terminal:

  1. Get the Ubuntu pod ID:

    oc get pods
  2. Connect to it:

    oc rsh POD_ID
  3. Enable Bash in the Ubuntu container (if it starts with the Shell)

    bash
Deploy Ubuntu

With a web UI

Start Ubuntu with a web UI accessible via a URL (using VNC). You will be the root user which has elevated permissions to install anything via apt install <package-name>. Before you install a package run apt update. This also solves E: unable to locate package and E: no installation candidate errors.

You can start the application using the Ubuntu with web UI template in the Catalog web UI (make sure the Templates checkbox is checked)

Login Credentials

Username: root

Password: Template creation password

This template uses the Docker image defined at https://github.com/fcwu/docker-ubuntu-vnc-desktop

Less stable than the official image

This image might be less stable than the original Ubuntu image. Let us know on Slack if you have any problem!

File browser

Deploy a file browser on your persistent volume. This will provide a web UI to upload and download data to your DSRI persistent volume in case you need it (JupyterLab, RStudio and VisualStudio Code server already include a file browser)

You can start a container using the File Browser for existing storage template in the Catalog web UI (make sure the Templates checkbox is checked)

Deploy File browser

You can only deploy file browser on an existing Persistent Volume Claim, this enables you to add a web UI to access this storage.

The following parameters can be provided:

  1. Provide a unique Application name. It will be used to generate the application URL.
  2. Provide a Password, you will need to hash the password first for extra security, use this quick docker command to do it: docker run filebrowser/filebrowser hash mypassword
  3. The Storage name of the Persistent Volume Claim (PVC) that will be exposed by the filebrowser.
  4. Storage subpath in the the Persistent Volume Claim that will be exposed by the filebrowser. Let it empty to use the Root folder of the persistent volume.

You can find the Storage name if you Go to the deployments page > Storage panel.

Creating or Connecting an Existing Persistent Storage

Find more details about the how to create persistent storage

Create Persistent StorageCreate Persistent Storage
info

The DSRI using the Openshift Container Stroage (OCS) which is based on CEPH offers ReadWriteOnce and ReadWriteMany access mode.

  • ReadWriteOnce (RWO) volumes cannot be mounted on multiple nodes. Use the ReadWriteMany (RWX) access mode when possible. If a node fails, the system does not allow the attached RWO volume to be mounted on a new node because it is already assigned to the failed node. If you encounter a multi-attach error message as a result, force delete the pod on a shut down or crashed node.

Find more details about the how to Connect the Existing persistent storage

Add Existing Persistent StorageAdd Existing Persistent Storage
info

You can try above method if you want to connect more applications to the same storage

This deployment require to have root user enabled on your project. Contact the DSRI support team or create a new issues to request root access or to create persistent volume for your project if you don't have them .

Credentials

Default credentials will be username admin and password admin

Change password

Please change the password in the Filebrowser Web UI once it has been created.

- +

Utilities

Feel free to propose new services using pull requests, or to request them by creating new issues.

Ubuntu

With the terminal

Start Ubuntu with the root user which has sudo permissions to install anything.

You can start the application using the Ubuntu template in the Catalog web UI (make sure the Templates checkbox is checked)

Login Credentials

Username: root

Password: Template creation password

This template uses the Ubuntu image hosted on DockerHub, see its documentation at https://hub.docker.com/r/ubuntu

Persistent data folder

📂 Use the /root folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

We enabled the port 8080 in the Ubuntu container if you need to deploy applications.

To quickly access it from the terminal you can use the Terminal tab in the pod page, or via your local terminal:

  1. Get the Ubuntu pod ID:

    oc get pods
  2. Connect to it:

    oc rsh POD_ID
  3. Enable Bash in the Ubuntu container (if it starts with the Shell)

    bash
Deploy Ubuntu

With a web UI

Start Ubuntu with a web UI accessible via a URL (using VNC). You will be the root user which has elevated permissions to install anything via apt install <package-name>. Before you install a package run apt update. This also solves E: unable to locate package and E: no installation candidate errors.

You can start the application using the Ubuntu with web UI template in the Catalog web UI (make sure the Templates checkbox is checked)

Login Credentials

Username: root

Password: Template creation password

This template uses the Docker image defined at https://github.com/fcwu/docker-ubuntu-vnc-desktop

Less stable than the official image

This image might be less stable than the original Ubuntu image. Let us know on Slack if you have any problem!

File browser

Deploy a file browser on your persistent volume. This will provide a web UI to upload and download data to your DSRI persistent volume in case you need it (JupyterLab, RStudio and VisualStudio Code server already include a file browser)

You can start a container using the File Browser for existing storage template in the Catalog web UI (make sure the Templates checkbox is checked)

Deploy File browser

You can only deploy file browser on an existing Persistent Volume Claim, this enables you to add a web UI to access this storage.

The following parameters can be provided:

  1. Provide a unique Application name. It will be used to generate the application URL.
  2. Provide a Password, you will need to hash the password first for extra security, use this quick docker command to do it: docker run filebrowser/filebrowser hash mypassword
  3. The Storage name of the Persistent Volume Claim (PVC) that will be exposed by the filebrowser.
  4. Storage subpath in the the Persistent Volume Claim that will be exposed by the filebrowser. Let it empty to use the Root folder of the persistent volume.

You can find the Storage name if you Go to the deployments page > Storage panel.

Creating or Connecting an Existing Persistent Storage

Find more details about the how to create persistent storage

Create Persistent StorageCreate Persistent Storage
info

The DSRI using the Openshift Container Stroage (OCS) which is based on CEPH offers ReadWriteOnce and ReadWriteMany access mode.

  • ReadWriteOnce (RWO) volumes cannot be mounted on multiple nodes. Use the ReadWriteMany (RWX) access mode when possible. If a node fails, the system does not allow the attached RWO volume to be mounted on a new node because it is already assigned to the failed node. If you encounter a multi-attach error message as a result, force delete the pod on a shut down or crashed node.

Find more details about the how to Connect the Existing persistent storage

Add Existing Persistent StorageAdd Existing Persistent Storage
info

You can try above method if you want to connect more applications to the same storage

This deployment require to have root user enabled on your project. Contact the DSRI support team or create a new issues to request root access or to create persistent volume for your project if you don't have them .

Credentials

Default credentials will be username admin and password admin

Change password

Please change the password in the Filebrowser Web UI once it has been created.

+ \ No newline at end of file diff --git a/docs/checkpointing-ml-training/index.html b/docs/checkpointing-ml-training/index.html index 4151a07b3..09fcb020f 100644 --- a/docs/checkpointing-ml-training/index.html +++ b/docs/checkpointing-ml-training/index.html @@ -16,7 +16,7 @@ - + @@ -25,8 +25,8 @@ Therefore, whenever the training job fails (due to timelimit expiry or otherwise), many hours of training can be lost. This problem is mitigated by a frequent checkpoint saving. When the training is resumed it'll continue from the last checkpoint saved. If the failure occurred 12 hours after the last checkpoint has been saved, 12 hours of training is lost and needs to be re-done. This can be very expensive.

Checkpointing fequency?

In theory one could save a checkpoint every 10 minutes and only ever lose 10 minutes of training time, but this too would dramatically delay the reaching of the finish line because large models can't be saved quickly and if the saving time starts to create a bottleneck for the training this approach becomes counterproductive.

Depending on your checkpointing methodology and the speed of your IO storage partition the saving of a large model can take from dozens of seconds to several minutes. Therefore, the optimal approach to saving frequency lies somewhere in the middle.

The math is quite simple - measure the amount of time it takes to save the checkpoint, multiply it by how many times you'd want to save it and see how much of an additional delay the checkpoint saving will contribute to the total training time.

For instance, Let suppose,

1) Training Time (TT), i.e. allocated time on cluster : x days 2) Time needed to save every checkpoint: y seconds 3) Checkpoint fequencty: every z hours

=> Then, Total Number of Checkpoints during the complete training time (NCP) = (x *24)/ z

=> Total Time Spent on Checkpointing (TTSC) [in hours] = NCP * y/3600

=> % of Training time spent on checkpointing = (TTSC/TT24) 100

------------------Example calculations------------------------------------

Training Time (TT or x): 7 days

Time needed to save every checkpoint (y): 20 secs

Checkpoint fequency (z): every 30 minutes, i.e., 0.5 hours

Then,

NCP = 7*24/0.5 = 336

TTSC = 336* 20/3600 = 1.87 hours

% of Training time spent on checkpointing = (1.87/724)100 ~ 1.2 %

Support for Checkpointing in Tensorflow/Keras and PyTorch ?

Both PyTorch and TensorFlow/Keras support checkpointing. The follwoing sections provide an example of how Checkpointing can be done in these libraries.

Example of Tensorflow/Keras based checkpointing:

import tensorflow as tf

#Imports the ModelCheckpoint class
from tensorflow.keras.callbacks import ModelCheckpoint

# Create your model as you normally would and compile it:
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(64, activation='relu', input_shape=(32,)),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

# Create a Checkpoint Callback
checkpoint_callback = ModelCheckpoint(
#filepath should be a path to your persistent volume. Example, /home/jovyan path in your JupyterLab pod.
filepath='model_checkpoint.h5', # You can use formats like .hdf5 or .ckpt.
save_best_only=True,
monitor='val_loss',
mode='min',
verbose=1
)

# Train the Model with the Checkpoint Callback
history = model.fit(
x_train, y_train,
validation_data=(x_val, y_val),
epochs=10,
callbacks=[checkpoint_callback]
)

# Loading a Saved Checkpoint
# Load the model architecture + weights if you saved the full model
model = tf.keras.models.load_model('model_checkpoint.h5')

# If you saved only the weights, you would need to create the model architecture first, then load weights:
model.load_weights('model_checkpoint.h5')

# Optional Parameters for Checkpointing, Example with Custom Save Intervals
checkpoint_callback = ModelCheckpoint(
filepath='model_checkpoint_epoch_{epoch:02d}.h5',
save_freq='epoch',
save_weights_only=True,
verbose=1
)


Example of PyTorch based checkpointing:

import torch

# Example model
model = torch.nn.Linear(10, 2)

# Save the entire model
torch.save(model, 'model.pth')

# Loading the Entire Model
model = torch.load('model.pth')

# Saving and Loading Optimizer State, i.e., To continue training exactly as before, you may want to save the optimizer state as well.

optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

# Save model and optimizer state_dicts
checkpoint = {
'epoch': 5,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': 0.5,
}
torch.save(checkpoint, 'checkpoint.pth')

# Load checkpoint
checkpoint = torch.load('checkpoint.pth')
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
model.train() # Ensure model is in training mode if needed





External Resources

- +https://stasosphere.com/machine-learning/

+ \ No newline at end of file diff --git a/docs/contribute/index.html b/docs/contribute/index.html index c2f1935b7..59873eb84 100644 --- a/docs/contribute/index.html +++ b/docs/contribute/index.html @@ -16,13 +16,13 @@ - +
-

Contribute

Check if there are issues related to your contribution, or post a new issue to discuss improvement to the documentation.

GitHub issues
Fork this repository

Otherwise you will need to first fork this repository, then send a pull request when your changes have been pushed.

Direct change if permission

If you are part of the MaastrichtU-IDS organization on GitHub you can directly create a new branch to make your change in the main repository.


⚡ Quick edit on GitHub

You can really easily make quick changes directly on the GitHub website by clicking the Edit this page button at the bottom left of each documentation page. Or browsing to your forked repository.

For example to edit the introduction page you can go to https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/introduction.md


🏗️ Larger changes locally

To edit the documentation it is easier to clone the repository on your laptop, and use a user-friendly markdown editor.

Use a Markdown editor

We strongly recommend you to use a markdown editor, such as Typora. It makes writing documentation much faster, and more enjoyable.

  1. Clone the repository on your machine:
git clone https://github.com/MaastrichtU-IDS/dsri-documentation.git
cd dsri-documentation
  1. Create a new branch from the master branch 🕊️
git checkout -b my-branch
  1. Add your changes in this branch ✒️

  2. Start the website on http://localhost:3000 to test it:

cd website
yarn install
yarn start
Send a pull request

Send a pull request to the master branch when your changes are done

Development documentation

Read more about running the API in development at https://github.com/MaastrichtU-IDS/dsri-documentation#run-for-development


🔄 Automated deployment

The documentation website is automatically updated and redeployed at each change to the main branch using a GitHub Actions workflow.

Publish to GitHub Pages


📝 Help

Most pages of this website are written in Markdown, hence they are really easy to edit, especially when you are using a convenient markdown editor. Only the index.js page is written in React JavaScript.

🔎 Files locations

  • Main DSRI documentation markdown files in website/docs
    • Left docs menu defined in website/sidebars.json
  • Blog articles as markdown files in website/docs
  • Index and contribute pages in website/src/pages
  • Images in website/src/static/img
  • Website configuration file in website/docusaurus.config.js

🦄 Markdown tip

Colorful boxes

Use the following tags to create colorful boxes in markdown files:

:::note You can specify an optional title
Grey box
:::

:::tip Green box
The content and title *can* include markdown.
:::

:::info Blue box
Useful information.
:::

:::caution Be careful!
Yellow box
:::

:::danger Fire red box
Danger danger, mayday!
:::

✔️ Pull Request process

  1. Before sending a pull request make sure the DSRI documentation website still work as expected with the new changes properly integrated:
cd website
yarn install
yarn start
  1. Send a pull request to the master branch.
  2. Project contributors will review your change as soon as they can!
- +

Contribute

Check if there are issues related to your contribution, or post a new issue to discuss improvement to the documentation.

GitHub issues
Fork this repository

Otherwise you will need to first fork this repository, then send a pull request when your changes have been pushed.

Direct change if permission

If you are part of the MaastrichtU-IDS organization on GitHub you can directly create a new branch to make your change in the main repository.


⚡ Quick edit on GitHub

You can really easily make quick changes directly on the GitHub website by clicking the Edit this page button at the bottom left of each documentation page. Or browsing to your forked repository.

For example to edit the introduction page you can go to https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/introduction.md


🏗️ Larger changes locally

To edit the documentation it is easier to clone the repository on your laptop, and use a user-friendly markdown editor.

Use a Markdown editor

We strongly recommend you to use a markdown editor, such as Typora. It makes writing documentation much faster, and more enjoyable.

  1. Clone the repository on your machine:
git clone https://github.com/MaastrichtU-IDS/dsri-documentation.git
cd dsri-documentation
  1. Create a new branch from the master branch 🕊️
git checkout -b my-branch
  1. Add your changes in this branch ✒️

  2. Start the website on http://localhost:3000 to test it:

cd website
yarn install
yarn start
Send a pull request

Send a pull request to the master branch when your changes are done

Development documentation

Read more about running the API in development at https://github.com/MaastrichtU-IDS/dsri-documentation#run-for-development


🔄 Automated deployment

The documentation website is automatically updated and redeployed at each change to the main branch using a GitHub Actions workflow.

Publish to GitHub Pages


📝 Help

Most pages of this website are written in Markdown, hence they are really easy to edit, especially when you are using a convenient markdown editor. Only the index.js page is written in React JavaScript.

🔎 Files locations

  • Main DSRI documentation markdown files in website/docs
    • Left docs menu defined in website/sidebars.json
  • Blog articles as markdown files in website/docs
  • Index and contribute pages in website/src/pages
  • Images in website/src/static/img
  • Website configuration file in website/docusaurus.config.js

🦄 Markdown tip

Colorful boxes

Use the following tags to create colorful boxes in markdown files:

:::note You can specify an optional title
Grey box
:::

:::tip Green box
The content and title *can* include markdown.
:::

:::info Blue box
Useful information.
:::

:::caution Be careful!
Yellow box
:::

:::danger Fire red box
Danger danger, mayday!
:::

✔️ Pull Request process

  1. Before sending a pull request make sure the DSRI documentation website still work as expected with the new changes properly integrated:
cd website
yarn install
yarn start
  1. Send a pull request to the master branch.
  2. Project contributors will review your change as soon as they can!
+ \ No newline at end of file diff --git a/docs/dask-cluster/index.html b/docs/dask-cluster/index.html index 6d1daa276..a35fee879 100644 --- a/docs/dask-cluster/index.html +++ b/docs/dask-cluster/index.html @@ -16,13 +16,13 @@ - +
-

Deploy Dask Cluster

🧊 Installation with Helm

  1. Go to the +Add page, and select to add Helm Chart
dask init
  1. Search and Select the Dask chart then click on Create
dask initdask init
  1. Configure the Yaml file, while under the Jupyter section:
    • Command: ["jupyter", "lab", "--allow-root", "--ip=0.0.0.0", "--port=8888", "--no-browser"]
    • servicePort: 8888
dask initdask init
  1. Add Storage to the dask-jupyter pod as shown below
dask init
  1. Set up a new Persistent Volume Claim for the cluster as shown below
dask init

🪐 Configure a Route for the Cluster

  1. Switch to the Administrator view and navigate to Route
dask routedask route
  1. Create a new route by clicking the button Create Route with the setup as shown below
dask routedask routedask route
  1. Navigate the provided link to access your local cluster
dask route

🪐 Access the Jupyter Password/Token

  1. Start up the terminal
    • Run oc get pods to find the full podname of the dask-jupyter
    • Run oc logs <podname> and copy the token used to access the jupyter notebook
dask route
- +

Deploy Dask Cluster

🧊 Installation with Helm

  1. Go to the +Add page, and select to add Helm Chart
dask init
  1. Search and Select the Dask chart then click on Create
dask initdask init
  1. Configure the Yaml file, while under the Jupyter section:
    • Command: ["jupyter", "lab", "--allow-root", "--ip=0.0.0.0", "--port=8888", "--no-browser"]
    • servicePort: 8888
dask initdask init
  1. Add Storage to the dask-jupyter pod as shown below
dask init
  1. Set up a new Persistent Volume Claim for the cluster as shown below
dask init

🪐 Configure a Route for the Cluster

  1. Switch to the Administrator view and navigate to Route
dask routedask route
  1. Create a new route by clicking the button Create Route with the setup as shown below
dask routedask routedask route
  1. Navigate the provided link to access your local cluster
dask route

🪐 Access the Jupyter Password/Token

  1. Start up the terminal
    • Run oc get pods to find the full podname of the dask-jupyter
    • Run oc logs <podname> and copy the token used to access the jupyter notebook
dask route
+ \ No newline at end of file diff --git a/docs/dask-tutorial/index.html b/docs/dask-tutorial/index.html index 19fc66821..f038df9cd 100644 --- a/docs/dask-tutorial/index.html +++ b/docs/dask-tutorial/index.html @@ -16,13 +16,13 @@ - +
-

Parallelization using Dask

🧊 Installation

!pip install "dask[complete]"
import dask

dask.__version__
'2023.5.0'
import dask.array as da
import dask.bag as db
import dask.dataframe as dd
import numpy as np
import pandas as pd

🪐 Basic Concepts of Dask

On a high-level, you can think of Dask as a wrapper that extends the capabilities of traditional tools like pandas, NumPy, and Spark to handle larger-than-memory datasets.

When faced with large objects like larger-than-memory arrays (vectors) or matrices (dataframes), Dask breaks them up into chunks, also called partitions.

For example, consider the array of 12 random numbers in both NumPy and Dask:

narr = np.random.rand(12)

narr
array([0.44236558, 0.00504448, 0.87087911, 0.468925  , 0.37513511,
0.22607761, 0.83035297, 0.07772372, 0.61587933, 0.82861156,
0.66214299, 0.90979423])
darr = da.from_array(narr, chunks=3)
darr
dask table

The image above shows that the Dask array contains four chunks as we set chunks to 3. Under the hood, each chunk is a NumPy array in itself.

To fully appreciate the benefits of Dask, we need a large dataset, preferably over 1 GB in size. Consider the autogenerated data from the script below:

import string

# Set the desired number of rows and columns
num_rows = 5_000_000
num_cols = 10
chunk_size = 100_000

# Define an empty DataFrame to store the chunks
df_chunks = pd.DataFrame()

# Generate and write the dataset in chunks
for i in range(0, num_rows, chunk_size):
# Generate random numeric data
numeric_data = np.random.rand(chunk_size, num_cols)

# Generate random categorical data
letters = list(string.ascii_uppercase)
categorical_data = np.random.choice(letters, (chunk_size, num_cols))

# Combine numeric and categorical data into a Pandas DataFrame
df_chunk = pd.DataFrame(np.concatenate([numeric_data, categorical_data], axis=1))

# Set column names for better understanding
column_names = [f'Numeric_{i}' for i in range(num_cols)] + [f'Categorical_{i}' for i in range(num_cols)]
df_chunk.columns = column_names

# Append the current chunk to the DataFrame holding all chunks
df_chunks = pd.concat([df_chunks, df_chunk], ignore_index=True)

# Write the DataFrame chunk to a CSV file incrementally
if (i + chunk_size) >= num_rows or (i // chunk_size) % 10 == 0:
df_chunks.to_csv('large_dataset.csv', index=False, mode='a', header=(i == 0))
df_chunks = pd.DataFrame()
dask_df = dd.read_csv("large_dataset.csv")

dask_df.head()

Even though the file is large, you will notice that the result is fetched almost instantaneously. For even larger files, you can specify the blocksize parameter, which determines the number of bytes to break up the file into.

Similar to how Dask Arrays contain chunks of small NumPy arrays, Dask is designed to handle multiple small Pandas DataFrames arranged along the row index.

✨ Selecting columns and element-wise operations

In this example, we're doing some pretty straightforward column operations on our Dask DataFrame, called dask_df. We're adding the values from the column Numeric_0 to the result of multiplying the values from Numeric_9 and Numeric_3. We store the outcome in a variable named result.

result = (
dask_df["Numeric_0"] + dask_df["Numeric_9"] * dask_df["Numeric_3"]
)

result.compute().head()

As we’ve mentioned, Dask is a bit different from traditional computing tools in that it doesn't immediately execute these operations. Instead, it creates a kind of 'plan' called a task graph to carry out these operations later on. This approach allows Dask to optimize the computations and parallelize them when needed. The compute() function triggers Dask to finally perform these computations, and head() just shows us the first few rows of the result.

⚡️ Conditional filtering

Now, let's look at how Dask can filter data. We're selecting rows from our DataFrame where the value in the "Categorical_5" column is "A".

This filtering process is similar to how you'd do it in pandas, but with a twist - Dask does this operation lazily. It prepares the task graph for this operation but waits to execute it until we call compute(). When we run head(), we get to see the first few rows of our filtered DataFrame.

dask_df[dask_df["Categorical_5"] == "A"].compute().head()

✨ Common summary statistics

Next, we're going to generate some common summary statistics using Dask's describe() function.

It gives us a handful of descriptive statistics for our DataFrame, including the mean, standard deviation, minimum, maximum, and so on. As with our previous examples, Dask prepares the task graph for this operation when we call describe(), but it waits to execute it until we call compute().

dask_df.describe().compute()
dask_df["Categorical_3"].value_counts().compute().head()

We also use value_counts() to count the number of occurrences of each unique value in the "Categorical_3" column. We trigger the operation with compute(), and head() shows us the most common values.

✨ Groupby

Finally, let's use the groupby() function to group our data based on values in the "Categorical_8" column. Then we select the "Numeric_7" column and calculate the mean for each group.

This is similar to how you might use ‘groupby()’ in pandas, but as you might have guessed, Dask does this lazily. We trigger the operation with compute(), and head() displays the average of the "Numeric_7" column for the first few groups.

dask_df.groupby("Categorical_8")["Numeric_7"].mean().compute().head()

⚡️ Lazy evaluation

Now, let’s explore the use of the compute function at the end of each code block.

Dask evaluates code blocks in lazy mode compared to Pandas’ eager mode, which returns results immediately.

To draw a parallel in cooking, lazy evaluation is like preparing ingredients and chopping vegetables in advance but only combining them to cook when needed. The compute function serves that purpose.

In contrast, eager evaluation is like throwing ingredients into the fire to cook as soon as they are ready. This approach ensures everything is ready to serve at once.

Lazy evaluation is key to Dask’s excellent performance as it provides:

  1. Reduced computation. Expressions are evaluated only when needed (when compute is called), avoiding unnecessary intermediate results that may not be used in the final result.
  2. Optimal resource allocation. Lazy evaluation avoids allocating memory or processing power to intermediate results that may not be required.
  3. Support for large datasets. This method processes data elements on-the-fly or in smaller chunks, enabling efficient utilization of memory resources.

When the results of compute are returned, they are given as Pandas Series/DataFrames or NumPy arrays instead of native Dask DataFrames.

type(dask_df)
dask.dataframe.core.DataFrame
type(
dask_df[["Numeric_5", "Numeric_6", "Numeric_7"]].mean().compute()
)
pandas.core.series.Series

The reason for this is that most data manipulation operations return only a subset of the original dataframe, taking up much smaller space. So, there won’t be any need to use parallelism of Dask, and you continue the rest of your workflow either in pandas or NumPy.

🪐 Dask Bags and Dask Delayed for Unstructured Data

Dask Bags and Dask Delayed are two components of the Dask library that provide powerful tools for working with unstructured or semi-structured data and enabling lazy evaluation.

While in the past, tabular data was the most common, today’s datasets often involve unstructured files such as images, text files, videos, and audio. Dask Bags provides the functionality and API to handle such unstructured files in a parallel and scalable manner.

For example, let’s consider a simple illustration:

# Create a Dask Bag from a list of strings
b = db.from_sequence(["apple", "banana", "orange", "grape", "kiwi"])

# Filter the strings that start with the letter 'a'
filtered_strings = b.filter(lambda x: x.startswith("a"))

# Map a function to convert each string to uppercase
uppercase_strings = filtered_strings.map(lambda x: x.upper())

# Compute the result as a list
result = uppercase_strings.compute()

print(result)
['APPLE']

In this example, we create a Dask Bag b from a list of strings. We then apply operations on the Bag to filter the strings that start with the letter 'a' and convert them to uppercase using the filter() and map() functions, respectively. Finally, we compute the result as a list using the compute() method and print the output.

Now imagine that you can perform even more complex operations on billions of similar strings stored in a text file. Without the lazy evaluation and parallelism offered by Dask Bags, you would face significant challenges.

As for Dask Delayed, it provides even more flexibility and introduces lazy evaluation and parallelism to various other scenarios. With Dask Delayed, you can convert any native Python function into a lazy object using the @dask.delayed decorator.

Here is a simple example:

%%time

import time
@dask.delayed
def process_data(x):
# Simulate some computation
time.sleep(1)
return x**2


# Generate a list of inputs
inputs = range(1000)

# Apply the delayed function to each input
results = [process_data(x) for x in inputs]

# Compute the results in parallel
computed_results = dask.compute(*results)
CPU times: user 260 ms, sys: 68.1 ms, total: 328 ms
Wall time: 32.2 s

In this example, we define a function process_data decorated with @dask.delayed. The function simulates some computational work by sleeping for 1 second and then returning the square of the input value.

Without parallelism, performing this computation on 1000 inputs would have taken more than 1000 seconds. However, with Dask Delayed and parallel execution, the computation only took about 42.1 seconds.

This example demonstrates the power of parallelism in reducing computation time by efficiently distributing the workload across multiple cores or workers.

That’s what parallelism is all about. for more information see https://docs.dask.org/en/stable/


- +

Parallelization using Dask

🧊 Installation

!pip install "dask[complete]"
import dask

dask.__version__
'2023.5.0'
import dask.array as da
import dask.bag as db
import dask.dataframe as dd
import numpy as np
import pandas as pd

🪐 Basic Concepts of Dask

On a high-level, you can think of Dask as a wrapper that extends the capabilities of traditional tools like pandas, NumPy, and Spark to handle larger-than-memory datasets.

When faced with large objects like larger-than-memory arrays (vectors) or matrices (dataframes), Dask breaks them up into chunks, also called partitions.

For example, consider the array of 12 random numbers in both NumPy and Dask:

narr = np.random.rand(12)

narr
array([0.44236558, 0.00504448, 0.87087911, 0.468925  , 0.37513511,
0.22607761, 0.83035297, 0.07772372, 0.61587933, 0.82861156,
0.66214299, 0.90979423])
darr = da.from_array(narr, chunks=3)
darr
dask table

The image above shows that the Dask array contains four chunks as we set chunks to 3. Under the hood, each chunk is a NumPy array in itself.

To fully appreciate the benefits of Dask, we need a large dataset, preferably over 1 GB in size. Consider the autogenerated data from the script below:

import string

# Set the desired number of rows and columns
num_rows = 5_000_000
num_cols = 10
chunk_size = 100_000

# Define an empty DataFrame to store the chunks
df_chunks = pd.DataFrame()

# Generate and write the dataset in chunks
for i in range(0, num_rows, chunk_size):
# Generate random numeric data
numeric_data = np.random.rand(chunk_size, num_cols)

# Generate random categorical data
letters = list(string.ascii_uppercase)
categorical_data = np.random.choice(letters, (chunk_size, num_cols))

# Combine numeric and categorical data into a Pandas DataFrame
df_chunk = pd.DataFrame(np.concatenate([numeric_data, categorical_data], axis=1))

# Set column names for better understanding
column_names = [f'Numeric_{i}' for i in range(num_cols)] + [f'Categorical_{i}' for i in range(num_cols)]
df_chunk.columns = column_names

# Append the current chunk to the DataFrame holding all chunks
df_chunks = pd.concat([df_chunks, df_chunk], ignore_index=True)

# Write the DataFrame chunk to a CSV file incrementally
if (i + chunk_size) >= num_rows or (i // chunk_size) % 10 == 0:
df_chunks.to_csv('large_dataset.csv', index=False, mode='a', header=(i == 0))
df_chunks = pd.DataFrame()
dask_df = dd.read_csv("large_dataset.csv")

dask_df.head()

Even though the file is large, you will notice that the result is fetched almost instantaneously. For even larger files, you can specify the blocksize parameter, which determines the number of bytes to break up the file into.

Similar to how Dask Arrays contain chunks of small NumPy arrays, Dask is designed to handle multiple small Pandas DataFrames arranged along the row index.

✨ Selecting columns and element-wise operations

In this example, we're doing some pretty straightforward column operations on our Dask DataFrame, called dask_df. We're adding the values from the column Numeric_0 to the result of multiplying the values from Numeric_9 and Numeric_3. We store the outcome in a variable named result.

result = (
dask_df["Numeric_0"] + dask_df["Numeric_9"] * dask_df["Numeric_3"]
)

result.compute().head()

As we’ve mentioned, Dask is a bit different from traditional computing tools in that it doesn't immediately execute these operations. Instead, it creates a kind of 'plan' called a task graph to carry out these operations later on. This approach allows Dask to optimize the computations and parallelize them when needed. The compute() function triggers Dask to finally perform these computations, and head() just shows us the first few rows of the result.

⚡️ Conditional filtering

Now, let's look at how Dask can filter data. We're selecting rows from our DataFrame where the value in the "Categorical_5" column is "A".

This filtering process is similar to how you'd do it in pandas, but with a twist - Dask does this operation lazily. It prepares the task graph for this operation but waits to execute it until we call compute(). When we run head(), we get to see the first few rows of our filtered DataFrame.

dask_df[dask_df["Categorical_5"] == "A"].compute().head()

✨ Common summary statistics

Next, we're going to generate some common summary statistics using Dask's describe() function.

It gives us a handful of descriptive statistics for our DataFrame, including the mean, standard deviation, minimum, maximum, and so on. As with our previous examples, Dask prepares the task graph for this operation when we call describe(), but it waits to execute it until we call compute().

dask_df.describe().compute()
dask_df["Categorical_3"].value_counts().compute().head()

We also use value_counts() to count the number of occurrences of each unique value in the "Categorical_3" column. We trigger the operation with compute(), and head() shows us the most common values.

✨ Groupby

Finally, let's use the groupby() function to group our data based on values in the "Categorical_8" column. Then we select the "Numeric_7" column and calculate the mean for each group.

This is similar to how you might use ‘groupby()’ in pandas, but as you might have guessed, Dask does this lazily. We trigger the operation with compute(), and head() displays the average of the "Numeric_7" column for the first few groups.

dask_df.groupby("Categorical_8")["Numeric_7"].mean().compute().head()

⚡️ Lazy evaluation

Now, let’s explore the use of the compute function at the end of each code block.

Dask evaluates code blocks in lazy mode compared to Pandas’ eager mode, which returns results immediately.

To draw a parallel in cooking, lazy evaluation is like preparing ingredients and chopping vegetables in advance but only combining them to cook when needed. The compute function serves that purpose.

In contrast, eager evaluation is like throwing ingredients into the fire to cook as soon as they are ready. This approach ensures everything is ready to serve at once.

Lazy evaluation is key to Dask’s excellent performance as it provides:

  1. Reduced computation. Expressions are evaluated only when needed (when compute is called), avoiding unnecessary intermediate results that may not be used in the final result.
  2. Optimal resource allocation. Lazy evaluation avoids allocating memory or processing power to intermediate results that may not be required.
  3. Support for large datasets. This method processes data elements on-the-fly or in smaller chunks, enabling efficient utilization of memory resources.

When the results of compute are returned, they are given as Pandas Series/DataFrames or NumPy arrays instead of native Dask DataFrames.

type(dask_df)
dask.dataframe.core.DataFrame
type(
dask_df[["Numeric_5", "Numeric_6", "Numeric_7"]].mean().compute()
)
pandas.core.series.Series

The reason for this is that most data manipulation operations return only a subset of the original dataframe, taking up much smaller space. So, there won’t be any need to use parallelism of Dask, and you continue the rest of your workflow either in pandas or NumPy.

🪐 Dask Bags and Dask Delayed for Unstructured Data

Dask Bags and Dask Delayed are two components of the Dask library that provide powerful tools for working with unstructured or semi-structured data and enabling lazy evaluation.

While in the past, tabular data was the most common, today’s datasets often involve unstructured files such as images, text files, videos, and audio. Dask Bags provides the functionality and API to handle such unstructured files in a parallel and scalable manner.

For example, let’s consider a simple illustration:

# Create a Dask Bag from a list of strings
b = db.from_sequence(["apple", "banana", "orange", "grape", "kiwi"])

# Filter the strings that start with the letter 'a'
filtered_strings = b.filter(lambda x: x.startswith("a"))

# Map a function to convert each string to uppercase
uppercase_strings = filtered_strings.map(lambda x: x.upper())

# Compute the result as a list
result = uppercase_strings.compute()

print(result)
['APPLE']

In this example, we create a Dask Bag b from a list of strings. We then apply operations on the Bag to filter the strings that start with the letter 'a' and convert them to uppercase using the filter() and map() functions, respectively. Finally, we compute the result as a list using the compute() method and print the output.

Now imagine that you can perform even more complex operations on billions of similar strings stored in a text file. Without the lazy evaluation and parallelism offered by Dask Bags, you would face significant challenges.

As for Dask Delayed, it provides even more flexibility and introduces lazy evaluation and parallelism to various other scenarios. With Dask Delayed, you can convert any native Python function into a lazy object using the @dask.delayed decorator.

Here is a simple example:

%%time

import time
@dask.delayed
def process_data(x):
# Simulate some computation
time.sleep(1)
return x**2


# Generate a list of inputs
inputs = range(1000)

# Apply the delayed function to each input
results = [process_data(x) for x in inputs]

# Compute the results in parallel
computed_results = dask.compute(*results)
CPU times: user 260 ms, sys: 68.1 ms, total: 328 ms
Wall time: 32.2 s

In this example, we define a function process_data decorated with @dask.delayed. The function simulates some computational work by sleeping for 1 second and then returning the square of the input value.

Without parallelism, performing this computation on 1000 inputs would have taken more than 1000 seconds. However, with Dask Delayed and parallel execution, the computation only took about 42.1 seconds.

This example demonstrates the power of parallelism in reducing computation time by efficiently distributing the workload across multiple cores or workers.

That’s what parallelism is all about. for more information see https://docs.dask.org/en/stable/


+ \ No newline at end of file diff --git a/docs/deploy-database/index.html b/docs/deploy-database/index.html index 2fce76ee9..6cac52a53 100644 --- a/docs/deploy-database/index.html +++ b/docs/deploy-database/index.html @@ -16,13 +16,13 @@ - +
-

Databases

SQL databases

You can easily create a database from the templates available in the DSRI OpenShift web UI catalog:

Databases in catalog web UI

You can connect to a database from another application in the same project by using the database service name as hostname:

Databases in catalog web UI

You can also use the oc CLI to get the services in your project:

oc get services

Start PostgreSQL 🐘

Use the Postgresql template in the DSRI OpenShift web UI catalog to start a SQL database.

Connect to the database

When the database has been deployed, you can connect from another pod using your favorite language and connector.

Example with the psql Command Line Interface:

apt-get update && apt-get install postgresql-client -y

Connect to the Postgresql database using the service name (change depending on the username and database name you chose):

psql -h postgresql-db -U postgres db

Checkout the dsri-demo repository for a quick demo for accessing and using a PostgreSQL database from a Jupyter notebook on the DSRI.

Start MySQL 🐬

Use the MySQL template in the DSRI OpenShift web UI catalog.

Connect to the database

When the database has been deployed, you can connect from another pod using your favorite language and connector.

Example with the mysql Command Line Interface:

apt-get update && apt-get install mariadb-client -y

Connect to the MySQL database using the service name:

mysql -h example-mysql -p

Checkout the dsri-demo repository for a quick demo for accessing and using a MySQL database from a Jupyter notebook on the DSRI.

Alternatively, MySQL databases can be started using Helm, see the Helm documentation page for more details.

NoSQL databases

MongoDB 🌿

MongoDB is a general purpose, document-based, distributed database built for modern application developers and for the cloud era.

Use the MongoDB template in the DSRI OpenShift web UI catalog.

Connect to the database

Use the service name as hostname to connect from another pod in the same project.

Redis 🎲

Redis is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperlog.

Use the Redis template in the DSRI OpenShift web UI catalog.

Connect to the database

Use the service name as hostname to connect from another pod in the same project.

Graph databases

Search for the Virtuoso triplestore template in the DSRI web UI catalog. Instantiate the template to create a Virtuoso triplestore in your project.

The deployment is based on the latest open source version of Virtuoso: https://hub.docker.com/r/openlink/virtuoso-opensource-7

Connect to the database

Use the service name as hostname to connect from another pod in the same project.

Ontotext GraphDB triplestore

Use the official DockerHub image if you have an enterprise license. Or build GraphDB free edition image from graphdb-docker on GitHub.

After copying the .zip file in the graphdb-docker/free-edition folder, go the graphdb-docker folder in your terminal:

cd graphdb-docker

Before creating your GraphDB ImageStream, make sure you are in the right project:

oc project my-project

Create the ImageStream for GraphDB:

oc new-build --name graphdb --binary

Build the image on the DSRI and save it in the ImageStream:

oc start-build graphdb --from-dir=free-edition --follow --wait

You can now use the Ontotext GraphDB template to deploy a GraphDB instance on DSRI.

Use the name of the ImageStream when instantiating the template, you can check if the image was properly built in Search > Filter Resources for ImageStreams

Connect to the database

Use the service name as hostname to connect from another pod in the same project.

AllegroGraph

AllegroGraph® is a modern, high-performance, persistent graph database. It supports SPARQL, RDFS++, and Prolog reasoning from numerous client applications.

AllegroGraph has not been tested on DSRI yet, but it can be deployed on Kubernetes using Helm, cf. https://www.github.com/franzinc/agraph-examples/tree/master/clustering%2Fkubernetes%2Fmmr%2Fkubernetes-mmr.md

- +

Databases

SQL databases

You can easily create a database from the templates available in the DSRI OpenShift web UI catalog:

Databases in catalog web UI

You can connect to a database from another application in the same project by using the database service name as hostname:

Databases in catalog web UI

You can also use the oc CLI to get the services in your project:

oc get services

Start PostgreSQL 🐘

Use the Postgresql template in the DSRI OpenShift web UI catalog to start a SQL database.

Connect to the database

When the database has been deployed, you can connect from another pod using your favorite language and connector.

Example with the psql Command Line Interface:

apt-get update && apt-get install postgresql-client -y

Connect to the Postgresql database using the service name (change depending on the username and database name you chose):

psql -h postgresql-db -U postgres db

Checkout the dsri-demo repository for a quick demo for accessing and using a PostgreSQL database from a Jupyter notebook on the DSRI.

Start MySQL 🐬

Use the MySQL template in the DSRI OpenShift web UI catalog.

Connect to the database

When the database has been deployed, you can connect from another pod using your favorite language and connector.

Example with the mysql Command Line Interface:

apt-get update && apt-get install mariadb-client -y

Connect to the MySQL database using the service name:

mysql -h example-mysql -p

Checkout the dsri-demo repository for a quick demo for accessing and using a MySQL database from a Jupyter notebook on the DSRI.

Alternatively, MySQL databases can be started using Helm, see the Helm documentation page for more details.

NoSQL databases

MongoDB 🌿

MongoDB is a general purpose, document-based, distributed database built for modern application developers and for the cloud era.

Use the MongoDB template in the DSRI OpenShift web UI catalog.

Connect to the database

Use the service name as hostname to connect from another pod in the same project.

Redis 🎲

Redis is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperlog.

Use the Redis template in the DSRI OpenShift web UI catalog.

Connect to the database

Use the service name as hostname to connect from another pod in the same project.

Graph databases

Search for the Virtuoso triplestore template in the DSRI web UI catalog. Instantiate the template to create a Virtuoso triplestore in your project.

The deployment is based on the latest open source version of Virtuoso: https://hub.docker.com/r/openlink/virtuoso-opensource-7

Connect to the database

Use the service name as hostname to connect from another pod in the same project.

Ontotext GraphDB triplestore

Use the official DockerHub image if you have an enterprise license. Or build GraphDB free edition image from graphdb-docker on GitHub.

After copying the .zip file in the graphdb-docker/free-edition folder, go the graphdb-docker folder in your terminal:

cd graphdb-docker

Before creating your GraphDB ImageStream, make sure you are in the right project:

oc project my-project

Create the ImageStream for GraphDB:

oc new-build --name graphdb --binary

Build the image on the DSRI and save it in the ImageStream:

oc start-build graphdb --from-dir=free-edition --follow --wait

You can now use the Ontotext GraphDB template to deploy a GraphDB instance on DSRI.

Use the name of the ImageStream when instantiating the template, you can check if the image was properly built in Search > Filter Resources for ImageStreams

Connect to the database

Use the service name as hostname to connect from another pod in the same project.

AllegroGraph

AllegroGraph® is a modern, high-performance, persistent graph database. It supports SPARQL, RDFS++, and Prolog reasoning from numerous client applications.

AllegroGraph has not been tested on DSRI yet, but it can be deployed on Kubernetes using Helm, cf. https://www.github.com/franzinc/agraph-examples/tree/master/clustering%2Fkubernetes%2Fmmr%2Fkubernetes-mmr.md

+ \ No newline at end of file diff --git a/docs/deploy-from-docker/index.html b/docs/deploy-from-docker/index.html index b8db22b43..9ed959326 100644 --- a/docs/deploy-from-docker/index.html +++ b/docs/deploy-from-docker/index.html @@ -16,13 +16,13 @@ - +
-

Deploy from a Docker image

The DSRI is an OpenShift OKD cluster, based on Kubernetes. It uses Docker containers to deploy services and applications in pods.

Any service or job can be run in a Docker container. If you want to run a service in Python for example, you will find Docker images for Python.

  • You can find already existing images for the service you want to run on DockerHub
  • or create a custom Docker image in a few minutes.

Find an image for your service

The easiest way to deploy a service on the DSRI is to use a Docker image from DockerHub 🐳.

Search for an image for your service published on DockerHub

  • Google "dockerhub my_service_name"
  • Sometimes multiple images can be found for your service. Take the official image when possible, or the one most relevant to your use-case.
Deploy from a Dockerfile

If no suitable image can be found on DockerHub, it can be deployed from a Dockerfile. See above to do so.


Deploy the image on DSRI

Once you have a Docker image for your application you can deploy it using the DSRI web UI.

Go to the Overview page of your project.

  • Click the Add to Project button in top right corner > Deploy Image
  • Select to deploy from Image Name
    • Provide your image name, e.g. umdsri/freesurfer
    • Eventually change the Name, it needs to be unique by project.
    • Click Deploy.
Deploy image from UI
Fix a common problem

Once the application is deployed it will most probably fail because it has not been optimized to work with OpenShift random user ID. You will need to add an entry to the deployment to enable your image to run using any user ID.

Go to Topology, click on your application node, click on the Actions button of your application details, and Edit deployment. In the deployment YAML search for spec: which has a containers: as child, and add the following under spec:

spec:
serviceAccountName: anyuid
containers: ...
Access the application

You should now see your pod deployed on the Overview page of your project.

You can expose routes to this pod in the Overview page: Create route


Build and push a new Docker image

In case you there is no Docker image for your application you can build and push one.

To build and push a Docker image you will need to have Docker installed.

Define a Dockerfile

If no images are available on DockerHub, it is still possible that the developers created the Dockerfile to build the image without pushing it to DockerHub. Go to the GitHub/GitLab source code repository and search for a Dockerfile, it can usually be found in

  • the source code repository root folder
  • a docker subfolder
  • as instructions in the README.md

If no Dockerfile are available we will need to define one.

Contact us

Feel free to contact us to get help with this, especially if you are unfamiliar with Docker.

Build the image

Once a Dockerfile has been defined for the service you can build it by running the following command from the source code root folder, where the Dockerfile is:

docker build -t username/my-service .

Arguments can be provided when starting the build, they need to be defined in the Dockerfile to be used.

docker build -t username/my-service --build-args MY_ARG=my_value .

Push to DockerHub

Before pushing it to DockerHub you will need to create a repository. To do so, click on Create Repository.

  • DockerHub is free for public repositories
  • Images can be published under your DockerHub user or an organization you belong to

Login to DockerHub, if not already done:

docker login

Push the image previously built to DockerHub:

docker push username/my-service

You can link DockerHub to your source code repository and ask it to build the Docker image automatically (from the Dockerfile in the root folder). It should take between 10 and 30min for DockerHub to build your image

Deploy from a local Dockerfile

You can also deploy a service on the DSRI directly from a local Dockerfile, to avoid using DockerHub. See this page to deploy a service from a local Dockerfile for more instructions

- +

Deploy from a Docker image

The DSRI is an OpenShift OKD cluster, based on Kubernetes. It uses Docker containers to deploy services and applications in pods.

Any service or job can be run in a Docker container. If you want to run a service in Python for example, you will find Docker images for Python.

  • You can find already existing images for the service you want to run on DockerHub
  • or create a custom Docker image in a few minutes.

Find an image for your service

The easiest way to deploy a service on the DSRI is to use a Docker image from DockerHub 🐳.

Search for an image for your service published on DockerHub

  • Google "dockerhub my_service_name"
  • Sometimes multiple images can be found for your service. Take the official image when possible, or the one most relevant to your use-case.
Deploy from a Dockerfile

If no suitable image can be found on DockerHub, it can be deployed from a Dockerfile. See above to do so.


Deploy the image on DSRI

Once you have a Docker image for your application you can deploy it using the DSRI web UI.

Go to the Overview page of your project.

  • Click the Add to Project button in top right corner > Deploy Image
  • Select to deploy from Image Name
    • Provide your image name, e.g. umdsri/freesurfer
    • Eventually change the Name, it needs to be unique by project.
    • Click Deploy.
Deploy image from UI
Fix a common problem

Once the application is deployed it will most probably fail because it has not been optimized to work with OpenShift random user ID. You will need to add an entry to the deployment to enable your image to run using any user ID.

Go to Topology, click on your application node, click on the Actions button of your application details, and Edit deployment. In the deployment YAML search for spec: which has a containers: as child, and add the following under spec:

spec:
serviceAccountName: anyuid
containers: ...
Access the application

You should now see your pod deployed on the Overview page of your project.

You can expose routes to this pod in the Overview page: Create route


Build and push a new Docker image

In case you there is no Docker image for your application you can build and push one.

To build and push a Docker image you will need to have Docker installed.

Define a Dockerfile

If no images are available on DockerHub, it is still possible that the developers created the Dockerfile to build the image without pushing it to DockerHub. Go to the GitHub/GitLab source code repository and search for a Dockerfile, it can usually be found in

  • the source code repository root folder
  • a docker subfolder
  • as instructions in the README.md

If no Dockerfile are available we will need to define one.

Contact us

Feel free to contact us to get help with this, especially if you are unfamiliar with Docker.

Build the image

Once a Dockerfile has been defined for the service you can build it by running the following command from the source code root folder, where the Dockerfile is:

docker build -t username/my-service .

Arguments can be provided when starting the build, they need to be defined in the Dockerfile to be used.

docker build -t username/my-service --build-args MY_ARG=my_value .

Push to DockerHub

Before pushing it to DockerHub you will need to create a repository. To do so, click on Create Repository.

  • DockerHub is free for public repositories
  • Images can be published under your DockerHub user or an organization you belong to

Login to DockerHub, if not already done:

docker login

Push the image previously built to DockerHub:

docker push username/my-service

You can link DockerHub to your source code repository and ask it to build the Docker image automatically (from the Dockerfile in the root folder). It should take between 10 and 30min for DockerHub to build your image

Deploy from a local Dockerfile

You can also deploy a service on the DSRI directly from a local Dockerfile, to avoid using DockerHub. See this page to deploy a service from a local Dockerfile for more instructions

+ \ No newline at end of file diff --git a/docs/deploy-gitlab-runner/index.html b/docs/deploy-gitlab-runner/index.html index be19d2304..b43b47e67 100644 --- a/docs/deploy-gitlab-runner/index.html +++ b/docs/deploy-gitlab-runner/index.html @@ -16,14 +16,14 @@ - +

deploy-gitlab-runner

First, obtain gitlab runner registration token via the gitlab webinterface

TODO: add screenshot

Add "GitLab Runner" operator to your project from the Operators --> OperatorHub page. -Make sure you choose the "certified" GitLab Runner (v1.4.0) The community runner (v1.10.0) is a bit more up to date, but currently does not work.

Install in a specific namespace on the cluster. Choose your namespace in the dropdown.

Create registration token secret:

---
apiVersion: v1
kind: Secret
metadata:
name: gitlab-runner-secret
type: Opaque
stringData:
runner-registration-token: <insert your registration token>
oc create -f gitlab-runner-secret.yaml

Although, this should also work:

oc create secret generic gitlab-runner-secret --from-literal=runner-registration-token=<insert your registration token>

Add the following to the ConfigMap of the GitLab Runner operator:

[[runners]]
executor = "kubernetes"
[runners.kubernetes]
[runners.kubernetes.volumes]
[[runners.kubernetes.volumes.empty_dir]]
name = "empty-dir"
mount_path = "/"
medium = "Memory"

Create the configmap:

oc create configmap custom-config-toml --from-file config.toml=/tmp/customconfig   

Create the gitlab runner Custom Resource Definition:

apiVersion: apps.gitlab.com/v1beta2
kind: Runner
metadata:
name: gitlab-runner
spec:
gitlabUrl: https://gitlab.maastrichtuniversity.nl
token: gitlab-runner-secret
config: custom-config-toml
tags: openshift
--- other stuff dont use!
apiVersion: apps.gitlab.com/v1beta2
kind: Runner
metadata:
name: gitlab-runner
spec:
gitlabUrl: https://gitlab.maastrichtuniversity.nl
buildImage: alpine
token: gitlab-runner-secret
tags: openshift
- +Make sure you choose the "certified" GitLab Runner (v1.4.0) The community runner (v1.10.0) is a bit more up to date, but currently does not work.

Install in a specific namespace on the cluster. Choose your namespace in the dropdown.

Create registration token secret:

---
apiVersion: v1
kind: Secret
metadata:
name: gitlab-runner-secret
type: Opaque
stringData:
runner-registration-token: <insert your registration token>
oc create -f gitlab-runner-secret.yaml

Although, this should also work:

oc create secret generic gitlab-runner-secret --from-literal=runner-registration-token=<insert your registration token>

Add the following to the ConfigMap of the GitLab Runner operator:

[[runners]]
executor = "kubernetes"
[runners.kubernetes]
[runners.kubernetes.volumes]
[[runners.kubernetes.volumes.empty_dir]]
name = "empty-dir"
mount_path = "/"
medium = "Memory"

Create the configmap:

oc create configmap custom-config-toml --from-file config.toml=/tmp/customconfig   

Create the gitlab runner Custom Resource Definition:

apiVersion: apps.gitlab.com/v1beta2
kind: Runner
metadata:
name: gitlab-runner
spec:
gitlabUrl: https://gitlab.maastrichtuniversity.nl
token: gitlab-runner-secret
config: custom-config-toml
tags: openshift
--- other stuff dont use!
apiVersion: apps.gitlab.com/v1beta2
kind: Runner
metadata:
name: gitlab-runner
spec:
gitlabUrl: https://gitlab.maastrichtuniversity.nl
buildImage: alpine
token: gitlab-runner-secret
tags: openshift
+ \ No newline at end of file diff --git a/docs/deploy-jupyter/index.html b/docs/deploy-jupyter/index.html index 9687a3b4e..527457104 100644 --- a/docs/deploy-jupyter/index.html +++ b/docs/deploy-jupyter/index.html @@ -16,13 +16,13 @@ - +
-

Jupyter Notebooks

🪐 Start JupyterLab

Start a JupyterLab container based on the official Jupyter docker stacks (debian), with sudo privileges to install anything you need (e.g. pip or apt packages)

You can start a container using the JupyterLab template in the Catalog web UI (make sure the Templates checkbox is checked)

When instantiating the template you can provide a few parameters, such as:

  • Password to access the notebook
  • Optionally you can provide a git repository to be automatically cloned in the JupyterLab (if there is a requirements.txt packages will be automatically installed with pip)
  • Docker image to use for the notebook (see below for more details on customizing the docker image)
  • Your git username and email to automatically configure git

The DSRI will automatically create a persistent volume to store data you will put in the /home/jovyan/work folder (the folder used by the notebook interface). You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

Deploy Jupyter

With this template you can use any image based on the official Jupyter docker stack: https://github.com/jupyter/docker-stacks

  • ghcr.io/maastrichtu-ids/jupyterlab:latest: custom image for data science on the DSRI, with additional kernels (Java), conda integration, VisualStudio Code, and autocomplete for Python
  • ghcr.io/maastrichtu-ids/jupyterlab:knowledge-graph: custom image for working with knowledge graph on the DSRI, with SPARQL kernel and OpenRefine
  • jupyter/scipy-notebook: some packages for science are preinstalled
  • jupyter/datascience-notebook: with Julia kernel
  • jupyter/tensorflow-notebook: with tensorflow package pre-installed
  • jupyter/r-notebook: to work with R
  • jupyter/pyspark-notebook: if you want to connect to a Spark cluster
  • jupyter/all-spark-notebook: if you want to run Spark locally in the notebook

You can also build your own image, we recommend to use this repository as example to extend a JupyterLab image: https://github.com/MaastrichtU-IDS/jupyterlab

📦️ Manage dependencies with Conda

With the ghcr.io/maastrichtu-ids/jupyterlab:latest image, you can easily start notebooks from the JupyterLab Launcher page using installed conda environments, at the condition nb_conda_kernels and ipykernel are installed in those environments.

  • You can pass a Git repository URL which contains an environment.yml file in the root folder when starting JupyterLab, the conda environment will automatically be installed at the start of your container, and available in the JupyterLab Launcher page. You can use this repository as example: https://github.com/MaastrichtU-IDS/dsri-demo

  • Or you can install it directly in a running JupyterLab (we use mamba which is like conda but faster):

    mamba env create -f environment.yml

    You'll need to wait for 1 or 2 minutes before the new conda environment becomes available on the JupyterLab Launcher page.

You can easily install an environment with a different version of Python if you need it. Here is an example of an environment.yml file to create an environment with Python 3.9, install the minimal dependencies required to easily starts notebooks in this environment with conda, and install a pip package:

name: custom-env
channels:
- defaults
- conda-forge
- anaconda
dependencies:
- python=3.9
- ipykernel
- nb_conda_kernels
- pip
- pip:
- matplotlib

⚠️ You cannot use conda activate in a Docker container, so you will need to either open a notebook using the kernel for your conda env, or use conda run to run scripts in the new environment:

conda run -n custom-env python --version

🐙 Use git in JupyterLab

You can always use git from the terminal.

Configure username

Before pushing back to GitHub or GitLab, you will need to configure you username and email in VSCode terminal:

git config --global user.name "Jean Dupont"
git config --global user.email jeandupont@gmail.com
Save your password

You can run this command to ask git to save your password for 15min:

git config credential.helper cache

Or store the password in a plain text file:

git config --global credential.helper 'store --file ~/.git-credentials'
Git tip

We recommend to use SSH instead of HTTPS connection when possible, checkout here how to generate SSH keys and use them with your GitHub account.

You can also enable and use the JupyterLab Git extension to clone and manage your git repositories.

It will prompt you for a username and password if the repository is private.

JupyterLab Git extension

🐶 Example

Initialize repository

Initialize repo

Include git details in DSRI project setup

git details

Verify automatic deployment

workspacerequirements
- +

Jupyter Notebooks

🪐 Start JupyterLab

Start a JupyterLab container based on the official Jupyter docker stacks (debian), with sudo privileges to install anything you need (e.g. pip or apt packages)

You can start a container using the JupyterLab template in the Catalog web UI (make sure the Templates checkbox is checked)

When instantiating the template you can provide a few parameters, such as:

  • Password to access the notebook
  • Optionally you can provide a git repository to be automatically cloned in the JupyterLab (if there is a requirements.txt packages will be automatically installed with pip)
  • Docker image to use for the notebook (see below for more details on customizing the docker image)
  • Your git username and email to automatically configure git

The DSRI will automatically create a persistent volume to store data you will put in the /home/jovyan/work folder (the folder used by the notebook interface). You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

Deploy Jupyter

With this template you can use any image based on the official Jupyter docker stack: https://github.com/jupyter/docker-stacks

  • ghcr.io/maastrichtu-ids/jupyterlab:latest: custom image for data science on the DSRI, with additional kernels (Java), conda integration, VisualStudio Code, and autocomplete for Python
  • ghcr.io/maastrichtu-ids/jupyterlab:knowledge-graph: custom image for working with knowledge graph on the DSRI, with SPARQL kernel and OpenRefine
  • jupyter/scipy-notebook: some packages for science are preinstalled
  • jupyter/datascience-notebook: with Julia kernel
  • jupyter/tensorflow-notebook: with tensorflow package pre-installed
  • jupyter/r-notebook: to work with R
  • jupyter/pyspark-notebook: if you want to connect to a Spark cluster
  • jupyter/all-spark-notebook: if you want to run Spark locally in the notebook

You can also build your own image, we recommend to use this repository as example to extend a JupyterLab image: https://github.com/MaastrichtU-IDS/jupyterlab

📦️ Manage dependencies with Conda

With the ghcr.io/maastrichtu-ids/jupyterlab:latest image, you can easily start notebooks from the JupyterLab Launcher page using installed conda environments, at the condition nb_conda_kernels and ipykernel are installed in those environments.

  • You can pass a Git repository URL which contains an environment.yml file in the root folder when starting JupyterLab, the conda environment will automatically be installed at the start of your container, and available in the JupyterLab Launcher page. You can use this repository as example: https://github.com/MaastrichtU-IDS/dsri-demo

  • Or you can install it directly in a running JupyterLab (we use mamba which is like conda but faster):

    mamba env create -f environment.yml

    You'll need to wait for 1 or 2 minutes before the new conda environment becomes available on the JupyterLab Launcher page.

You can easily install an environment with a different version of Python if you need it. Here is an example of an environment.yml file to create an environment with Python 3.9, install the minimal dependencies required to easily starts notebooks in this environment with conda, and install a pip package:

name: custom-env
channels:
- defaults
- conda-forge
- anaconda
dependencies:
- python=3.9
- ipykernel
- nb_conda_kernels
- pip
- pip:
- matplotlib

⚠️ You cannot use conda activate in a Docker container, so you will need to either open a notebook using the kernel for your conda env, or use conda run to run scripts in the new environment:

conda run -n custom-env python --version

🐙 Use git in JupyterLab

You can always use git from the terminal.

Configure username

Before pushing back to GitHub or GitLab, you will need to configure you username and email in VSCode terminal:

git config --global user.name "Jean Dupont"
git config --global user.email jeandupont@gmail.com
Save your password

You can run this command to ask git to save your password for 15min:

git config credential.helper cache

Or store the password in a plain text file:

git config --global credential.helper 'store --file ~/.git-credentials'
Git tip

We recommend to use SSH instead of HTTPS connection when possible, checkout here how to generate SSH keys and use them with your GitHub account.

You can also enable and use the JupyterLab Git extension to clone and manage your git repositories.

It will prompt you for a username and password if the repository is private.

JupyterLab Git extension

🐶 Example

Initialize repository

Initialize repo

Include git details in DSRI project setup

git details

Verify automatic deployment

workspacerequirements
+ \ No newline at end of file diff --git a/docs/deploy-jupyterhub/index.html b/docs/deploy-jupyterhub/index.html index 8b4f80e89..ae00762df 100644 --- a/docs/deploy-jupyterhub/index.html +++ b/docs/deploy-jupyterhub/index.html @@ -16,7 +16,7 @@ - + @@ -28,8 +28,8 @@ The default config that is provided by JupyterHub will not work.

Installing the JupyterHub Helm Chart repository

After you have created a project you can start with installing the JupyterHub Helm Chart. If you do not have access to DSRI or created a project yet, and you need to find out how, please refer to our documentation.

Helm Chart already available

The Helm Chart should be already made available for everyone to use on the DSRI platform. There will be no need to install the repository yourself.

In Developer mode in your project, go to Helm in the sidepanel (1). Next, click on Create and choose Repository (2).

Then fill in the Name, Display Name, give it a Description and fill in the URL: https://hub.jupyter.org/helm-chart/.

Next, click Create.

Installing the JupyterHub Helm Chart

info

At the moment the latest -and only- Helm Chart version which is supported by DSRI is version 3.3.8. Newer versions will not work, and older versions are not tested and/or configured!

In Developer mode in your project, go to Helm in the sidepanel (1). Next, click on Create and choose Helm Release (2)

Search for jupyterhub (or the name you gave the repository if you added the repository yourself), and choose the JupyterHub Helm Chart (1).

Click Create.

Click the Chart version drop down menu (1).

And choose the right Chart version: 3.3.8 (1). Note that this is an important step, as we only support version 3.3.8 at the moment. Newer versions do not work yet and older versions we did not configure and/or test!

Now, change the config with the content of the config.yaml you have downloaded from our GitHub repository. Copy the content of the config.yaml and paste it in the highlighted box to replace the old with the new config. Click Create to install the JupyterHub Helm Chart.

Creating a secured route

Create a secured route, with TLS edge termination.

In Developer mode in your project, go to Project in the sidepanel (1). Next, click on Route (2).

Next, click Create.

Fill in the Name (1), choose the Service: proxy-public (2), choose the Target Port: 80 -> http (TCP) (3), tick the box Secure Route (4), and finally choose TLS Termination: Edge (5). Next, click Create, to create the route.

Upgrading the config.yaml

You can upgrade your config.yaml easily in the DSRI web UI if you would like to change certain settings, such as user's default persistent volume claims, authentication methods, and many more things. Note that in some cases users who created an account with an old authentication method will still have access via that method, make sure you set up your preferred authentication method before allowing users to authenticate and use the JupyterHub instance.

In Developer mode in your project, go to Helm in the sidepanel (1). Next, click on your Helm Chart Release (2).

Now, click the Actions drop down menu, and choose Upgrade (1).

In the box -highlighted in the picutre below- you can make changes to the config.yaml. After you have made your changes, click Upgrade and your upgraded JupyterHub Helm Chart Release will automatically be deployed.

Configure JupyterHub

Feel free to submit a ticket to ask for help configuring your JupyterHub.

Deploying JupyterHub using the Command Line Interface (CLI) 🪐

Before you begin download the config.yaml

Download the preconfigured config.yaml from our GitHub repository. The default config that is provided by JupyterHub will not work.

Installing the JupyterHub Helm Chart repository

After you have created a project you can start with installing the JupyterHub Helm Chart. If you do not have access to DSRI or created a project yet, and you need to find out how, please refer to our documentation.

Helm Chart already available

The Helm Chart should be already made available for everyone to use on the DSRI platform. There will be no need to install the repository yourself.

Add the JupyterHub Helm Chart repository:

helm repo add jupyterhub https://hub.jupyter.org/helm-chart/
helm repo update

Installing the JupyterHub Helm Chart

info

At the moment the latest -and only- Helm Chart version which is supported by DSRI is version 3.3.8. Newer versions will not work, and older versions are not tested and/or configured!

Make sure you use the right config.yaml downloaded from our GitHub repository.

Install the Helm Chart using the following command:

helm upgrade --cleanup-on-fail \
--install jupyterhub jupyterhub/jupyterhub \
--version=3.3.8 \
--namespace=<NAMESPACE> \
--values config.yaml

<NAMESPACE> is the name of the namespace your project is in.

Creating a secured route

Create a secured route, with TLS edge termination:

oc create route edge <NAME OF ROUTE> --namespace <NAMESPACE> --service=proxy-public --port=http

<NAMESPACE> is the name of the namespace your project is in. -<NAME OF ROUTE> is the name of the route.

Upgrading the config.yaml

Run the following command with your new config.yaml:

helm upgrade --cleanup-on-fail \
--install jupyterhub jupyterhub/jupyterhub \
--version=3.3.8 \
--namespace=<NAMESPACE> \
--values config.yaml

<NAMESPACE> is the name of the namespace your project is in.

Note that the namespace should be the same namespace as the one where your original deployment was initiated!

Configure JupyterHub

Feel free to submit a ticket to ask for help configuring your JupyterHub.

- +<NAME OF ROUTE> is the name of the route.

Upgrading the config.yaml

Run the following command with your new config.yaml:

helm upgrade --cleanup-on-fail \
--install jupyterhub jupyterhub/jupyterhub \
--version=3.3.8 \
--namespace=<NAMESPACE> \
--values config.yaml

<NAMESPACE> is the name of the namespace your project is in.

Note that the namespace should be the same namespace as the one where your original deployment was initiated!

Configure JupyterHub

Feel free to submit a ticket to ask for help configuring your JupyterHub.

+ \ No newline at end of file diff --git a/docs/deploy-matlab/index.html b/docs/deploy-matlab/index.html index b1bcbf002..c52f730e1 100644 --- a/docs/deploy-matlab/index.html +++ b/docs/deploy-matlab/index.html @@ -16,13 +16,13 @@ - +
-

Matlab

Note that we are not expert in Matlab: feel free to contact Mathworks support directly if you are having any issues with their official Docker image. Because since it's closed source we cannot fix it ourselves.

You can request official support from Matlab at this address after login and connecting your account to the UM license: https://nl.mathworks.com/academia/tah-portal/maastricht-university-31574866.html#get

Use the official Matlab image

Start Matlab with a desktop UI accessible directly using your web browser at a URL automatically generated.

Go to the Catalog, make sure Templates are displayed (box checked), and search for Matlab, and provide the right parameters:

  • You will need to provide the password you will use to access the Matlab UI when filling the template.
  • You can also change the Matlab image version, see the latest version released in the official Matlab Docker image documentation

Once Matlab start you can access it through 2 routes (URL), which can be accessed when clicking on the Matlab node in the Topology:

  • The main matlab route to access Matlab desktop UI directly in your web browser. It is recommended to use this route.
  • The matlab-vnc route can be used to access Matlab using a VNC client (you will need to use the full URL to your Matlab VNC route). Only use it if you know what you're doing.

Use a stable Matlab image

The official Matlab image is infamous for showing a black screening after a few hours of use, making it a bit cumbersome to be used trustfully.

We have a solution if you need to have a more stable Matlab image, that will require a bit more manual operations:

  • Use the Ubuntu with GUI template to setup a Ubuntu pod on the DSRI with the image ghcr.io/vemonet/docker-ubuntu-vnc-desktop:latest
  • Start firefox and browse to https://nl.mathworks.com
  • Login with your personal Matlab account, create one if you don’t have it
  • Choose get matlab and download, the linux matlab version
  • Open a terminal window and run the following commands:
sudo apt-get update
sudo apt-get install unzip
# Unzip the previous downloaded matlab installation file
# start the matlab installation with:
sudo .\install

You will then be prompted the Matlab installation process:

  • Fill in your personal matlab account credentials
  • ⚠️ Fill in the username as used in the Ubuntu environment, in your case it will most probably be root (Matlab gives a license error if this is not correct, check with whoami in the terminal when in doubt)
  • Select every Matlab modules you want to be installed
  • Check "symbolic link" and "Improve……"

Use Matlab in Jupyter

You can also use mathworks/jupyter-matlab-proxy. You can easily install it in a JupyterLab image with pip:

pip install jupyter-matlab-proxy

Follow the instructions on the mathworks/jupyter-matlab-proxy repository to access it.

Deploy Matlab on GPU

We use the Matlab template in the DSRI catalog to deploy a pre-built Nvidia Matlab Deep Learning Container on CPU or GPU nodes. See the official documentation from MathWorks for more details about this image.

Request access to Matlab

To be able to access the Matlab on GPU template you will need to ask the DSRI admins to enable it in your project.

2 options are available to connect to your running Matlab pod terminal:

  • Go to the matlab pod page on the DSRI web UI
  • Or connect from your terminal with oc rsh MATLAB_POD_ID

Type bash when first accessing to the terminal to have a better experience.

Type cd /ContainerDeepLearningData to go in the persistent volume, and use this volume to store all data that should be preserved.

Type matlab to access Matlab from the terminal

It is possible to access the Matlab desktop UI through VNC and a web UI, but the script to start it in /bin/run.sh seems to face some errors, let us know if you have any luck with this.

By default the image run with the matlab user which does not have sudo privilege, you can run the container as root if you need to install packages which require admin privileges.

Build your own Matlab image

Follow the instructions at: https://github.com/mathworks-ref-arch/matlab-dockerfile

This will require you to retrieve Matlab installation files to build your own container

Once all the files have been properly placed in the folder and the license server URL has been set, you can start the build on DSRI by following the documentation to deploy from a Dockerfile

License server not available on your laptop

If you try to build Matlab directly on your laptop it will most probably fail as your machine might not have access to the license server. You will need to build the Matlab container directly on DSRI with oc start-build

Once Matlab deployed, you will need to edit the matlab deployment YAML before it works.

Go to Topology, click on the Matlab node, click on the Actions button of the matlab details, and Edit deployment. In the deployment YAML search for spec: which has a containers: as child, and add the following under spec:

spec:
serviceAccountName: anyuid
containers: ...

Your Matlab container should now be running!

2 options are available to connect to your running Matlab pod terminal:

  • Go to the matlab pod page on the DSRI web UI
  • Or connect from your terminal with oc rsh MATLAB_POD_ID

You can access Matlab from the terminal by running matlab

Unfortunately Matlab did not expected their users to need the graphical UI when using Matlab in containers. So only the command line is available by default. You can find more information to enable the Matlab UI in this issue.

- +

Matlab

Note that we are not expert in Matlab: feel free to contact Mathworks support directly if you are having any issues with their official Docker image. Because since it's closed source we cannot fix it ourselves.

You can request official support from Matlab at this address after login and connecting your account to the UM license: https://nl.mathworks.com/academia/tah-portal/maastricht-university-31574866.html#get

Use the official Matlab image

Start Matlab with a desktop UI accessible directly using your web browser at a URL automatically generated.

Go to the Catalog, make sure Templates are displayed (box checked), and search for Matlab, and provide the right parameters:

  • You will need to provide the password you will use to access the Matlab UI when filling the template.
  • You can also change the Matlab image version, see the latest version released in the official Matlab Docker image documentation

Once Matlab start you can access it through 2 routes (URL), which can be accessed when clicking on the Matlab node in the Topology:

  • The main matlab route to access Matlab desktop UI directly in your web browser. It is recommended to use this route.
  • The matlab-vnc route can be used to access Matlab using a VNC client (you will need to use the full URL to your Matlab VNC route). Only use it if you know what you're doing.

Use a stable Matlab image

The official Matlab image is infamous for showing a black screening after a few hours of use, making it a bit cumbersome to be used trustfully.

We have a solution if you need to have a more stable Matlab image, that will require a bit more manual operations:

  • Use the Ubuntu with GUI template to setup a Ubuntu pod on the DSRI with the image ghcr.io/vemonet/docker-ubuntu-vnc-desktop:latest
  • Start firefox and browse to https://nl.mathworks.com
  • Login with your personal Matlab account, create one if you don’t have it
  • Choose get matlab and download, the linux matlab version
  • Open a terminal window and run the following commands:
sudo apt-get update
sudo apt-get install unzip
# Unzip the previous downloaded matlab installation file
# start the matlab installation with:
sudo .\install

You will then be prompted the Matlab installation process:

  • Fill in your personal matlab account credentials
  • ⚠️ Fill in the username as used in the Ubuntu environment, in your case it will most probably be root (Matlab gives a license error if this is not correct, check with whoami in the terminal when in doubt)
  • Select every Matlab modules you want to be installed
  • Check "symbolic link" and "Improve……"

Use Matlab in Jupyter

You can also use mathworks/jupyter-matlab-proxy. You can easily install it in a JupyterLab image with pip:

pip install jupyter-matlab-proxy

Follow the instructions on the mathworks/jupyter-matlab-proxy repository to access it.

Deploy Matlab on GPU

We use the Matlab template in the DSRI catalog to deploy a pre-built Nvidia Matlab Deep Learning Container on CPU or GPU nodes. See the official documentation from MathWorks for more details about this image.

Request access to Matlab

To be able to access the Matlab on GPU template you will need to ask the DSRI admins to enable it in your project.

2 options are available to connect to your running Matlab pod terminal:

  • Go to the matlab pod page on the DSRI web UI
  • Or connect from your terminal with oc rsh MATLAB_POD_ID

Type bash when first accessing to the terminal to have a better experience.

Type cd /ContainerDeepLearningData to go in the persistent volume, and use this volume to store all data that should be preserved.

Type matlab to access Matlab from the terminal

It is possible to access the Matlab desktop UI through VNC and a web UI, but the script to start it in /bin/run.sh seems to face some errors, let us know if you have any luck with this.

By default the image run with the matlab user which does not have sudo privilege, you can run the container as root if you need to install packages which require admin privileges.

Build your own Matlab image

Follow the instructions at: https://github.com/mathworks-ref-arch/matlab-dockerfile

This will require you to retrieve Matlab installation files to build your own container

Once all the files have been properly placed in the folder and the license server URL has been set, you can start the build on DSRI by following the documentation to deploy from a Dockerfile

License server not available on your laptop

If you try to build Matlab directly on your laptop it will most probably fail as your machine might not have access to the license server. You will need to build the Matlab container directly on DSRI with oc start-build

Once Matlab deployed, you will need to edit the matlab deployment YAML before it works.

Go to Topology, click on the Matlab node, click on the Actions button of the matlab details, and Edit deployment. In the deployment YAML search for spec: which has a containers: as child, and add the following under spec:

spec:
serviceAccountName: anyuid
containers: ...

Your Matlab container should now be running!

2 options are available to connect to your running Matlab pod terminal:

  • Go to the matlab pod page on the DSRI web UI
  • Or connect from your terminal with oc rsh MATLAB_POD_ID

You can access Matlab from the terminal by running matlab

Unfortunately Matlab did not expected their users to need the graphical UI when using Matlab in containers. So only the command line is available by default. You can find more information to enable the Matlab UI in this issue.

+ \ No newline at end of file diff --git a/docs/deploy-on-gpu/index.html b/docs/deploy-on-gpu/index.html index 160806864..d674f3c58 100644 --- a/docs/deploy-on-gpu/index.html +++ b/docs/deploy-on-gpu/index.html @@ -16,14 +16,14 @@ - +

GPU applications

GPUs on the DSRI can only be used by one workspace at a time, and there is a limited number of GPUs (8).

⚠️ We currently provide a free access to those GPUs, but with the growing demands for GPUs it might get more restricted. As consideration for others, and to help keep this system open, it is important to make a maximum use of your GPUs when you get access to them.

Unfortunately job scheduling is currently not mature enough on Kubernetes, you can look into volcano.sh if you are interested, but it is still quite experimental.

To use the GPU on the DSRI you will go through this process:

  1. Deploy, prepare and debug your GPU workspace
  2. Book a GPU
  3. Once the booking is done you will receive an email about your reservation, and more emails when it starts and before it ends
  4. Enable the GPU in workspace when your booking starts, and make the best use of it!
Book a GPU

By default you do not have the permission to run applications on GPU, you need to make a reservation.

You can check the availability of our GPUs, and reserve GPU slots in the GPU booking calendar 📅

Prepare your GPU workspace

You will first need to start your workspace without the GPU enabled, you can then prepare your experiments: clone the code, download the data, prepare scripts to install all requirements (the workspace will be restarted when you enable the GPU).

About the docker images

We are mainly using images provided by Nvidia, with all required drivers and optimizations for GPU pre-installed. You can access the workspace with JupyterLab and VisualStudio Code in your browser, and install dependencies with apt-get, conda or pip in the workspace.

We currently mainly use Tensorflow, PyTorch and CUDA, but any image available in the Nvidia catalog should be easy to deploy. Checkout this documentation for more details on how we build the optimized docker images for the DSRI GPUs. And feel free to extend the images to install any software you need.

Deploy the workspace

You can easily deploy your GPU workspace from the DSRI catalog:

  1. Go to the DSRI Catalog web UI: Click on Add to Project, then Browse Catalog
  2. Search the catalog for "GPU", and make sure the Template checkbox is enabled
  3. Choose the template: JupyterLab on GPU
  4. Follow the instructions to create the template in the DSRI web UI, all information about the images you can use are provided there. The most notable is the base image you want to use for your workspace (cuda, tensorflow or pytorch)

Access the workspace from the route created (the small arrow at the top right of your application bubble in the Topology page).

Prepare the workspace

You can now add your code and data in the persistent folder to be fully prepared when you will get access to the GPUs.

You can install dependencies with apt-get, conda or pip. We recommend your to use scripts stored in the persistent folder to easily install all your requirements, so you can reinstall them when we enable the GPU, as it restarts the workspace.

For more information on how to use conda/mamba to install new dependencies or complete environment (useful if you need to use a different version of python than the one installed by default) checkout this page.

⚠️ We recommend you to also try and debug your code on small sample using the CPU before getting the GPU, this way you will be able to directly start long running task when you get the GPU, instead of losing time debugging your code (it's probably not going to work on the first try, you know it).

You can find more details on the images we use and how to extend them in this repository.

Storage

Use the /workspace/persistent folder, which is the JupyterLab workspace, to store your code and data persistently. Note that loading data from the persistent storage will be slowly that what you might expected, this is due to the nature of the distributed storage. So try to optimize this part and avoid reloading multiple time your data, and let us know if it is too much of a problem, we have some solution to improve this

Enable the GPU

You will receive an email when the GPU has been enabled in your project. You can then update your deployment to use the GPUs using either the oc command-line tool, or by editing the deployment configuration from the web UI

  • With the Command Line Interface, run the following command from the terminal of your laptop after having installed the oc command-line tool.

We use jupyterlab-gpu as deployment name is in the example, change it to yours if it is different.

oc patch dc/jupyterlab-gpu --type=json -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {"requests": {"nvidia.com/gpu": 1}, "limits": {"nvidia.com/gpu": 1}}}]'
  • Or through the web UI

In the Topology view click on the circle representing your GPU application, then click on the Actions button in the top right of the screen, and click on Edit Deployment Config at the bottom of the list

In the Deployment Config text editor, hit ctrl + f to search for "resources". You should see a line - resources: {} under containers:. You need to change this line to the following to enable GPU in your application (and make sure the indentation match the rest of the file):

        - resources:
requests:
nvidia.com/gpu: 1
limits:
nvidia.com/gpu: 1

Then wait for the pod to restart, or start it if it was stopped.

You can use the following command in the terminal of your container on the DSRI to see the current GPU usage:

nvidia-smi
Windows

When using above command with the oc client on windows you might receive an error like: -error: unable to parse "'[{op:": yaml: found unexpected end of stream

This is because the single quotation mark on windows is handled differently. Try replacing the single quotation marks in the command with double quotation marks and the command should work.

Disable the GPU

The GPU allocated to your workspace will be automatically disabled the after your booking ends at 9:00.

You can also manually disable the GPU from your app, the pod will be restarted automatically on a CPU node:

oc patch dc/jupyterlab-gpu --type=json -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {}}]'

Increase the number of GPUs

If you have been granted a 2nd GPU to speed up your experiment you can easily upgrade the number of GPU used by your workspace:

From the Topology view click on your application:

  1. Stop the application, by decreasing the number of pod to 0 (in the Details tab)
  2. Click on Options > Edit Deployment > in the YAML of the deployment search for limits and change the number of GPU assigned to your deployment to 2:
          resources:
limits:
nvidia.com/gpu: '2'
requests:
nvidia.com/gpu: '2'

You can also do it using the command line, make sure to stop the pod first, and replace jupyterlab-gpu by your app name in this command:

oc patch dc/jupyterlab-gpu --type=json -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {"requests": {"nvidia.com/gpu": 2}, "limits": {"nvidia.com/gpu": 2}}}]'
  1. Restart the pod for your application (the same way you stopped it)

Install GPU drivers in any image

You can also install the GPU drivers in any image and use this image directly.

See the latest official Nvidia docs to install the nvidia-container-runtime, which should contain all packages and drivers required to access the GPU from your application.

Here is an example of commands to add to a debian based Dockerfile to install the GPU drivers (note that this is not complete, you will need to check the latest instructions and do some research & development to get it to work):

RUN curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | \
apt-key add - \ &&
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \ &&
curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list |
RUN apt-get update \ &&
apt-get install -y nvidia-container-runtime

Then, build your image in your DSRI project using oc from the folder where your put the Dockerfile (replace custom-app-gpu by your app name):

oc new-build --name custom-app-gpu --binary
oc start-build custom-app-gpu --from-dir=. --follow --wait
oc new-app custom-app-gpu

You will then need to edit the deployment to the serviceAccountName: anyuid and add a persistent storage

oc edit custom-app-gpu

Finally for when your reservation start, checkout the section above about how to enable the GPU in workspace

See also: official Nvidia docs for CUDA

- +error: unable to parse "'[{op:": yaml: found unexpected end of stream

This is because the single quotation mark on windows is handled differently. Try replacing the single quotation marks in the command with double quotation marks and the command should work.

Disable the GPU

The GPU allocated to your workspace will be automatically disabled the after your booking ends at 9:00.

You can also manually disable the GPU from your app, the pod will be restarted automatically on a CPU node:

oc patch dc/jupyterlab-gpu --type=json -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {}}]'

Increase the number of GPUs

If you have been granted a 2nd GPU to speed up your experiment you can easily upgrade the number of GPU used by your workspace:

From the Topology view click on your application:

  1. Stop the application, by decreasing the number of pod to 0 (in the Details tab)
  2. Click on Options > Edit Deployment > in the YAML of the deployment search for limits and change the number of GPU assigned to your deployment to 2:
          resources:
limits:
nvidia.com/gpu: '2'
requests:
nvidia.com/gpu: '2'

You can also do it using the command line, make sure to stop the pod first, and replace jupyterlab-gpu by your app name in this command:

oc patch dc/jupyterlab-gpu --type=json -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {"requests": {"nvidia.com/gpu": 2}, "limits": {"nvidia.com/gpu": 2}}}]'
  1. Restart the pod for your application (the same way you stopped it)

Install GPU drivers in any image

You can also install the GPU drivers in any image and use this image directly.

See the latest official Nvidia docs to install the nvidia-container-runtime, which should contain all packages and drivers required to access the GPU from your application.

Here is an example of commands to add to a debian based Dockerfile to install the GPU drivers (note that this is not complete, you will need to check the latest instructions and do some research & development to get it to work):

RUN curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | \
apt-key add - \ &&
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \ &&
curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list |
RUN apt-get update \ &&
apt-get install -y nvidia-container-runtime

Then, build your image in your DSRI project using oc from the folder where your put the Dockerfile (replace custom-app-gpu by your app name):

oc new-build --name custom-app-gpu --binary
oc start-build custom-app-gpu --from-dir=. --follow --wait
oc new-app custom-app-gpu

You will then need to edit the deployment to the serviceAccountName: anyuid and add a persistent storage

oc edit custom-app-gpu

Finally for when your reservation start, checkout the section above about how to enable the GPU in workspace

See also: official Nvidia docs for CUDA

+ \ No newline at end of file diff --git a/docs/deploy-rstudio/index.html b/docs/deploy-rstudio/index.html index ba492c261..7f49902d3 100644 --- a/docs/deploy-rstudio/index.html +++ b/docs/deploy-rstudio/index.html @@ -16,13 +16,13 @@ - +
-

RStudio

Start RStudio

Start a RStudio container based on Rocker RStudio tidyverse images (debian), with sudo privileges to install anything you need (e.g. pip or apt packages)

You can start a container using the RStudio template in the Catalog web UI (make sure the Templates checkbox is checked)

Provide a few parameters, and Instantiate the template. The username will be rstudio and the password will be what you configure yourself, the DSRI will automatically create a persistent volume to store data you will put in the /home/rstudio folder. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

Deploy RStudio
Official image documentation

See the official Docker image documentation for more details about the container deployed.

Restricted RStudio with Shiny server

Start a RStudio application, with a complementary Shiny server, using a regular rstudio user, without sudo privileges.

Create the template in your project:

  • In the DSRI web UI, go to + Add, then click on YAML, add the content of the template-rstudio-shiny-restricted.yml file, and validate.

  • You can also do it using the terminal:

    oc apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/templates/restricted/template-rstudio-shiny-restricted.yml

Once the template has been created in your project, use the RStudio with Shiny server template in the OpenShift web UI catalog. It will automatically create a persistent storage for the data.

No sudo privileges

You will not have sudo privileges in the application.

Use Git in RStudio

The fastest way to get started is to use git from the terminal, for example to clone a git repository use git clone

You can also check how to enable Git integration in RStudio at https://support.rstudio.com/hc/en-us/articles/200532077

You can run this command to ask git to save your password for 15min:

git config credential.helper cache

Or store the password/token in a plain text file:

git config --global credential.helper 'store --file ~/.git-credentials'

Before pushing back to GitHub or GitLab, you will need to configure you username and email in the terminal:

git config --global user.name "Jean Dupont"
git config --global user.email jeandupont@gmail.com
Git tip

We recommend to use SSH instead of HTTPS connection when possible, checkout here how to generate SSH keys and use them with your GitHub account.

Run R jobs

You can visit this folder that gives all resources and instructions to explain how to run a standalone R job on the DSRI: https://github.com/MaastrichtU-IDS/dsri-demo/tree/main/r-job

If you want to run jobs directly from RStudio, checkout this package to run chunks of R code as jobs directly through RStudio: https://github.com/lindeloev/job

- +

RStudio

Start RStudio

Start a RStudio container based on Rocker RStudio tidyverse images (debian), with sudo privileges to install anything you need (e.g. pip or apt packages)

You can start a container using the RStudio template in the Catalog web UI (make sure the Templates checkbox is checked)

Provide a few parameters, and Instantiate the template. The username will be rstudio and the password will be what you configure yourself, the DSRI will automatically create a persistent volume to store data you will put in the /home/rstudio folder. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

Deploy RStudio
Official image documentation

See the official Docker image documentation for more details about the container deployed.

Restricted RStudio with Shiny server

Start a RStudio application, with a complementary Shiny server, using a regular rstudio user, without sudo privileges.

Create the template in your project:

  • In the DSRI web UI, go to + Add, then click on YAML, add the content of the template-rstudio-shiny-restricted.yml file, and validate.

  • You can also do it using the terminal:

    oc apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/templates/restricted/template-rstudio-shiny-restricted.yml

Once the template has been created in your project, use the RStudio with Shiny server template in the OpenShift web UI catalog. It will automatically create a persistent storage for the data.

No sudo privileges

You will not have sudo privileges in the application.

Use Git in RStudio

The fastest way to get started is to use git from the terminal, for example to clone a git repository use git clone

You can also check how to enable Git integration in RStudio at https://support.rstudio.com/hc/en-us/articles/200532077

You can run this command to ask git to save your password for 15min:

git config credential.helper cache

Or store the password/token in a plain text file:

git config --global credential.helper 'store --file ~/.git-credentials'

Before pushing back to GitHub or GitLab, you will need to configure you username and email in the terminal:

git config --global user.name "Jean Dupont"
git config --global user.email jeandupont@gmail.com
Git tip

We recommend to use SSH instead of HTTPS connection when possible, checkout here how to generate SSH keys and use them with your GitHub account.

Run R jobs

You can visit this folder that gives all resources and instructions to explain how to run a standalone R job on the DSRI: https://github.com/MaastrichtU-IDS/dsri-demo/tree/main/r-job

If you want to run jobs directly from RStudio, checkout this package to run chunks of R code as jobs directly through RStudio: https://github.com/lindeloev/job

+ \ No newline at end of file diff --git a/docs/deploy-spark/index.html b/docs/deploy-spark/index.html index 38827cea6..764844543 100644 --- a/docs/deploy-spark/index.html +++ b/docs/deploy-spark/index.html @@ -16,13 +16,13 @@ - +
-

Spark cluster

Request access to the Spark Operator

To be able to deploy Spark you will need to ask the DSRI admins to enable the Spark Operator in your project. It will be done quickly, once enabled you will be able to start a Spark cluster in a few clicks.

Deploy a Spark cluster

Once the DSRI admins have enabled the Spark Operator your project, you should found a Spark Cluster entry in the Catalog (in the Operator Backed category)

Deploy the cluster from the catalog

Apache Spark in the Catalog

Click on the Spark Cluster entry to deploy a Spark cluster.

You will be presented a form where you can provide the number of Spark workers in your cluster.

Additionally you can provide a label which can be helpful later to manage or delete the cluster, use the name of your application and the label app, e.g.: app=my-spark-cluster

Deploy a Apache Spark cluster
Change

The number of Spark workers can be easily updated later in the Spark deployment YAML file.

Create a route to the Spark dashboard

Once the cluster has been started you can create a route to access the Spark web UI:

Go to Search > Click on Resources and search for Route > Click on Route

You should now see the routes deployed in your project. Click on the button Create Route

  • Give a short meaningful name to your route, e.g. my-spark-ui
  • Keep Hostname and Path as it is
  • Select the Service corresponding your Spark cluster suffixed with -ui, e.g. my-spark-cluster-ui
  • Select the Target Port of the route, it should be 8080

You can now access the Spark web UI at the generated URL to see which jobs are running and the nodes in your cluster.

Run on Spark

You can now start a spark-enabled JupyterLab, or any other spark-enabled applications, to use the Spark cluster deployed.

Using PySpark

The easiest is to use a Spark-enabled JupyterLab image, such as jupyter/pyspark-notebook

But you can also use any image as long as you download the jar file, install all requirements, such as pyspark, and set the right environment variable, such as SPARK_HOME

Connect to a Spark cluster deployed in the same project, replace spark-cluster by your Spark cluster name:

from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
# Stop existing Spark Context
spark = SparkSession.builder.master("spark://spark-cluster:7077").getOrCreate()
spark.sparkContext.stop()
# Connect to the Spark cluster
conf = SparkConf().setAppName('sansa').setMaster('spark://spark-cluster:7077')
sc = SparkContext(conf=conf)

# Run basic Spark test
x = ['spark', 'rdd', 'example', 'sample', 'example']
y = sc.parallelize(x)
y.collect()

RDF analytics with SANSA and Zeppelin notebooks

SANSA is a big data engine for scalable processing of large-scale RDF data. SANSA uses Spark, or Flink, which offer fault-tolerant, highly available and scalable approaches to efficiently process massive sized datasets. SANSA provides the facilities for Semantic data representation, Querying, Inference, and Analytics.

Use the Zeppelin notebook for Spark template in the catalog to start a Spark-enabled Zeppelin notebook. You can find more information on the Zeppelin image at https://github.com/rimolive/zeppelin-openshift

Connect and test Spark in a Zeppelin notebook, replace spark-cluster by your Spark cluster name:

%pyspark
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
# Stop existing Spark Context
spark = SparkSession.builder.master("spark://spark-cluster:7077").getOrCreate()
spark.sparkContext.stop()
# Connect to the Spark cluster
conf = SparkConf().setAppName('sansa').setMaster('spark://spark-cluster:7077')
sc = SparkContext(conf=conf)

# Run basic Spark test
x = [1, 2, 3, 4, 5]
y = sc.parallelize(x)
y.collect()

You should see the job running in the Spark web UI, kill the job with the kill button in the Spark dashboard.

You can now start to run your workload on the Spark cluster

Reset a Zeppelin notebook

Click on the cranked wheel in the top right of the note: Interpreter binding, and reset the interpreter

Use the official SANSA notebooks examples

See more examples:

Connect Spark to the persistent storage

Instructions available at https://github.com/rimolive/ceph-spark-integration

Requirements:

pip install boto

Check the example notebook for Ceph storage

Delete a running Spark cluster

Get all objects part of the Spark cluster, change app=spark-cluster to match your Spark cluster name:

oc get all,secret,configmaps --selector app=spark-cluster

Then delete the Operator deployment from the OpenShift web UI overview.

- +

Spark cluster

Request access to the Spark Operator

To be able to deploy Spark you will need to ask the DSRI admins to enable the Spark Operator in your project. It will be done quickly, once enabled you will be able to start a Spark cluster in a few clicks.

Deploy a Spark cluster

Once the DSRI admins have enabled the Spark Operator your project, you should found a Spark Cluster entry in the Catalog (in the Operator Backed category)

Deploy the cluster from the catalog

Apache Spark in the Catalog

Click on the Spark Cluster entry to deploy a Spark cluster.

You will be presented a form where you can provide the number of Spark workers in your cluster.

Additionally you can provide a label which can be helpful later to manage or delete the cluster, use the name of your application and the label app, e.g.: app=my-spark-cluster

Deploy a Apache Spark cluster
Change

The number of Spark workers can be easily updated later in the Spark deployment YAML file.

Create a route to the Spark dashboard

Once the cluster has been started you can create a route to access the Spark web UI:

Go to Search > Click on Resources and search for Route > Click on Route

You should now see the routes deployed in your project. Click on the button Create Route

  • Give a short meaningful name to your route, e.g. my-spark-ui
  • Keep Hostname and Path as it is
  • Select the Service corresponding your Spark cluster suffixed with -ui, e.g. my-spark-cluster-ui
  • Select the Target Port of the route, it should be 8080

You can now access the Spark web UI at the generated URL to see which jobs are running and the nodes in your cluster.

Run on Spark

You can now start a spark-enabled JupyterLab, or any other spark-enabled applications, to use the Spark cluster deployed.

Using PySpark

The easiest is to use a Spark-enabled JupyterLab image, such as jupyter/pyspark-notebook

But you can also use any image as long as you download the jar file, install all requirements, such as pyspark, and set the right environment variable, such as SPARK_HOME

Connect to a Spark cluster deployed in the same project, replace spark-cluster by your Spark cluster name:

from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
# Stop existing Spark Context
spark = SparkSession.builder.master("spark://spark-cluster:7077").getOrCreate()
spark.sparkContext.stop()
# Connect to the Spark cluster
conf = SparkConf().setAppName('sansa').setMaster('spark://spark-cluster:7077')
sc = SparkContext(conf=conf)

# Run basic Spark test
x = ['spark', 'rdd', 'example', 'sample', 'example']
y = sc.parallelize(x)
y.collect()

RDF analytics with SANSA and Zeppelin notebooks

SANSA is a big data engine for scalable processing of large-scale RDF data. SANSA uses Spark, or Flink, which offer fault-tolerant, highly available and scalable approaches to efficiently process massive sized datasets. SANSA provides the facilities for Semantic data representation, Querying, Inference, and Analytics.

Use the Zeppelin notebook for Spark template in the catalog to start a Spark-enabled Zeppelin notebook. You can find more information on the Zeppelin image at https://github.com/rimolive/zeppelin-openshift

Connect and test Spark in a Zeppelin notebook, replace spark-cluster by your Spark cluster name:

%pyspark
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
# Stop existing Spark Context
spark = SparkSession.builder.master("spark://spark-cluster:7077").getOrCreate()
spark.sparkContext.stop()
# Connect to the Spark cluster
conf = SparkConf().setAppName('sansa').setMaster('spark://spark-cluster:7077')
sc = SparkContext(conf=conf)

# Run basic Spark test
x = [1, 2, 3, 4, 5]
y = sc.parallelize(x)
y.collect()

You should see the job running in the Spark web UI, kill the job with the kill button in the Spark dashboard.

You can now start to run your workload on the Spark cluster

Reset a Zeppelin notebook

Click on the cranked wheel in the top right of the note: Interpreter binding, and reset the interpreter

Use the official SANSA notebooks examples

See more examples:

Connect Spark to the persistent storage

Instructions available at https://github.com/rimolive/ceph-spark-integration

Requirements:

pip install boto

Check the example notebook for Ceph storage

Delete a running Spark cluster

Get all objects part of the Spark cluster, change app=spark-cluster to match your Spark cluster name:

oc get all,secret,configmaps --selector app=spark-cluster

Then delete the Operator deployment from the OpenShift web UI overview.

+ \ No newline at end of file diff --git a/docs/deploy-vscode/index.html b/docs/deploy-vscode/index.html index 734746bad..9e126996f 100644 --- a/docs/deploy-vscode/index.html +++ b/docs/deploy-vscode/index.html @@ -16,13 +16,13 @@ - +
-

VisualStudio Code

Start VisualStudio Code server

Start a VisualStudio Code server with the coder user, which has sudo privileges.

You can deploy it using the VisualStudio Code server solution in the Catalog web UI (make sure the Templates checkbox is checked)

Provide a few parameters, and instantiate the template. The DSRI will automatically create a persistent volume to store data you will put in the /home/coder/project folder. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

Deploy VSCode

Use Git in VSCode

The easiest way to login and clone a repository from GitHub is to use the built-in authentication system of VisualStudio Code, to do so click on clone repository... in the Welcome page, and follow the instructions in the top of the VisualStudio window.

If this solution does not work for you, you can use git from the terminal to clone the git repository with git clone. VisualStudio might ask you to login in the dialog box at the top of the page, enter your username and password when requested. For GitHub you might need to generate a token at https://github.com/settings/tokens to use as password.

Once the repository cloned, you can use git from the VSCode web UI to manage your git repositories (add, commit, push changes), or in the terminal.

Before committing to GitHub or GitLab, you might need to configure you username and email in VSCode terminal:

git config --global user.name "Jean Dupont"
git config --global user.email jeandupont@gmail.com
Save your git password

You can run this command to ask git to save your password for 15min:

git config credential.helper cache

Or store the password in a plain text file:

git config --global credential.helper 'store --file ~/.git-credentials'
Git tip

We recommend to use SSH instead of HTTPS connection when possible, checkout here how to generate SSH keys and use them with your GitHub account.

VSCode for GPU

See the Deploy on GPU page to deploy a VisualStudio Code server on GPU.

- +

VisualStudio Code

Start VisualStudio Code server

Start a VisualStudio Code server with the coder user, which has sudo privileges.

You can deploy it using the VisualStudio Code server solution in the Catalog web UI (make sure the Templates checkbox is checked)

Provide a few parameters, and instantiate the template. The DSRI will automatically create a persistent volume to store data you will put in the /home/coder/project folder. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

Deploy VSCode

Use Git in VSCode

The easiest way to login and clone a repository from GitHub is to use the built-in authentication system of VisualStudio Code, to do so click on clone repository... in the Welcome page, and follow the instructions in the top of the VisualStudio window.

If this solution does not work for you, you can use git from the terminal to clone the git repository with git clone. VisualStudio might ask you to login in the dialog box at the top of the page, enter your username and password when requested. For GitHub you might need to generate a token at https://github.com/settings/tokens to use as password.

Once the repository cloned, you can use git from the VSCode web UI to manage your git repositories (add, commit, push changes), or in the terminal.

Before committing to GitHub or GitLab, you might need to configure you username and email in VSCode terminal:

git config --global user.name "Jean Dupont"
git config --global user.email jeandupont@gmail.com
Save your git password

You can run this command to ask git to save your password for 15min:

git config credential.helper cache

Or store the password in a plain text file:

git config --global credential.helper 'store --file ~/.git-credentials'
Git tip

We recommend to use SSH instead of HTTPS connection when possible, checkout here how to generate SSH keys and use them with your GitHub account.

VSCode for GPU

See the Deploy on GPU page to deploy a VisualStudio Code server on GPU.

+ \ No newline at end of file diff --git a/docs/enabling-vpn-wsl/index.html b/docs/enabling-vpn-wsl/index.html index 81d3f55ee..4a1a7f674 100644 --- a/docs/enabling-vpn-wsl/index.html +++ b/docs/enabling-vpn-wsl/index.html @@ -16,14 +16,14 @@ - +

Enabling VPN access in WSL2

Follow these steps in the WSL2 environment:

Create a file in /etc/wsl.conf:

[network]

generateResolvConf = false

This makes sure that WSL2 does not generate it's own resolv.conf anymore.

Edit the file /etc/resolv.conf and add the appropiate nameservers:

nameserver 137.120.1.1

nameserver 137.120.1.5

nameserver 8.8.8.8 # OR OF YOUR CHOOSING

search unimaas.nl

These are all the steps you should take in WSL2. Now you should do the following step after you connected to the VPN. -You can run this command in Powershell:

Get-NetAdapter | Where-Object {$_.InterfaceDescription -Match "Cisco AnyConnect"} | Set-NetIPInterface -InterfaceMetric 6000

you should now be able to verify that WSL2 has connectivity:

ping google.com -c 4

- +You can run this command in Powershell:

Get-NetAdapter | Where-Object {$_.InterfaceDescription -Match "Cisco AnyConnect"} | Set-NetIPInterface -InterfaceMetric 6000

you should now be able to verify that WSL2 has connectivity:

ping google.com -c 4

+ \ No newline at end of file diff --git a/docs/glossary/index.html b/docs/glossary/index.html index 613a1e2b3..28ecc2389 100644 --- a/docs/glossary/index.html +++ b/docs/glossary/index.html @@ -16,13 +16,13 @@ - +
-

Glossary

Docker

Kubernetes

Kubernetes is a portable, extensible, open-source platform for managing containerized workloads and services, that facilitates both declarative configuration and automation. It has a large, rapidly growing ecosystem.

Kubernetes services, support, and tools are widely available.

Kubernetes, also known as K8s, is an open-source system for automating deployment, scaling, and management of containerized applications.

Kubernetes Architecture

More Information: https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/

OpenShift

Red Hat OpenShift is a hybrid cloud, enterprise Kubernetes application platform, trusted by 2,000+ organizations.

It includes

  • Container host and runtime
  • Enterprise Kubernetes
  • Validated integrations
  • Integrated container registry
  • Developer workflows
  • Easy access to services
Red Hat Openshift

OKD

OKD is a distribution of Kubernetes optimized for continuous application development and multi-tenant deployment. OKD adds developer and operations-centric tools on top of Kubernetes to enable rapid application development, easy deployment and scaling, and long-term lifecycle maintenance for small and large teams. OKD is a sibling Kubernetes distribution to Red Hat OpenShift

OKD 4 Documentation

- +

Glossary

Docker

Kubernetes

Kubernetes is a portable, extensible, open-source platform for managing containerized workloads and services, that facilitates both declarative configuration and automation. It has a large, rapidly growing ecosystem.

Kubernetes services, support, and tools are widely available.

Kubernetes, also known as K8s, is an open-source system for automating deployment, scaling, and management of containerized applications.

Kubernetes Architecture

More Information: https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/

OpenShift

Red Hat OpenShift is a hybrid cloud, enterprise Kubernetes application platform, trusted by 2,000+ organizations.

It includes

  • Container host and runtime
  • Enterprise Kubernetes
  • Validated integrations
  • Integrated container registry
  • Developer workflows
  • Easy access to services
Red Hat Openshift

OKD

OKD is a distribution of Kubernetes optimized for continuous application development and multi-tenant deployment. OKD adds developer and operations-centric tools on top of Kubernetes to enable rapid application development, easy deployment and scaling, and long-term lifecycle maintenance for small and large teams. OKD is a sibling Kubernetes distribution to Red Hat OpenShift

OKD 4 Documentation

+ \ No newline at end of file diff --git a/docs/guide-dockerfile-to-openshift/index.html b/docs/guide-dockerfile-to-openshift/index.html index 5327e6e40..98814560b 100644 --- a/docs/guide-dockerfile-to-openshift/index.html +++ b/docs/guide-dockerfile-to-openshift/index.html @@ -16,14 +16,14 @@ - +

Deploy from a Dockerfile

Build from local Dockerfile

This manual shows you an example of how to convert a dockerfile from your local machine to a running container on DSRI (openshift / okd). Start by cloning the example repository to your local machine.

git clone git@gitlab.maastrichtuniversity.nl:dsri-examples/dockerfile-to-okd.git

After cloning you now have a local folder containing a Dockerfile and index.html file. Inspect both files.

Login with the openshift client: -Authenticate to the OpenShift cluster using oc login .

oc login --token=<token>

Create a new project if you don't have a project yet you can work with (change myproject to a project name of your choice:

oc new-project myproject

Create new build configuration.

oc new-build --name dockerfile-to-okd --binary

Build the image

Start a new build on the DSRI with the files provided:

cd dockerfile-to-okd
oc start-build dockerfile-to-okd --from-dir=. --follow --wait

Create your app

Create a new app using the build we just created:

oc new-app dockerfile-to-okd

To properly deploy your app on OpenShift you will need to define a few more parameters:

  • Enable root user access (with serviceAccountName) by running this command:
oc patch deployment/dockerfile-to-okd --patch '{"spec":{"template": {"spec":{"serviceAccountName": "anyuid"}}}}'
  • You can also add persistent storage (with volumes and containers: volumeMounts )

    • ${STORAGE_NAME}: Name of your persistent volume claim in the Storage page of your project in the web UI
    • ${STORAGE_FOLDER} : Name of the folder inside the persistent volume claim to store the application data (so you can store multiple applications on the same persistent volume claim)

Open the configuration of the started app to fix its configuration:

oc edit deployment/dockerfile-to-okd

You can mount existing persistent volume this way (replace the variables, such as ${STORAGE_NAME} by your values):

    template:
spec:
serviceAccountName: anyuid
volumes:
- name: data
persistentVolumeClaim:
claimName: "${STORAGE_NAME}"
containers:
- image: rstudio-root:latest
volumeMounts:
- name: data
mountPath: "/home/rstudio"
subPath: "${STORAGE_FOLDER}"
Generate deployment file in YAML

You can also generate the app deployment in a YAML file to edit it before start:

oc new-app dockerfile-to-okd -o yaml > myapp.yml
# Edit myapp.yml
oc create -f myapp.yml

Expose app

Expose the application so you can reach it from your browser and check the route that was created

oc expose svc/dockerfile-to-okd
oc get route

You can now visit the route shown in the HOST/PORT output of the oc get route command and see if you have successfully converted the docker file.

You can edit the created route to enable HTTPS with this command:

oc patch route/dockerfile-to-okd --patch '{"spec":{"tls": {"termination": "edge", "insecureEdgeTerminationPolicy": "Redirect"}}}'

Delete the created build

oc delete build dockerfile-to-okd

See oc delete documentation.


Deploy from a local docker image

You can also deploy a local docker image from your machine.

First build the docker image:

docker build -t my-docker-image:latest .

Check you have the image locally on your system:

docker images ls

You should have a docker image for your application:

REPOSITORY                                   TAG                 
my-docker-image latest

You can then deploy providing the docker image name and the name of the application to be deployed:

oc new-app my-docker-image --name app-name-on-openshift

Deploy from a Git repository

Go to +Add > From Git: https://console-openshift-console.apps.dsri2.unimaas.nl/import

Follow the instructions given by the web UI: provide the URL to your git repository, the port on which the web interface will be deployed, you can also create a secret for git login if the repository is private.

Once the container has started you will need to make a small change to enable it running with any user ID (due to OpenShift security policies).

You can do it with the command line (just change your-app-name by your application name)

oc patch deployment/your-app-name --patch '{"spec":{"template": {"spec":{"serviceAccountName": "anyuid"}}}}'

Or through the web UI: click on your deployment, then Actions > Edit Deployment. And edit the YAML of your deployment to add serviceAccountName: anyuid under template.spec:

    template:
spec:
serviceAccountName: anyuid
containers:
- [...]
- +Authenticate to the OpenShift cluster using oc login .

oc login --token=<token>

Create a new project if you don't have a project yet you can work with (change myproject to a project name of your choice:

oc new-project myproject

Create new build configuration.

oc new-build --name dockerfile-to-okd --binary

Build the image

Start a new build on the DSRI with the files provided:

cd dockerfile-to-okd
oc start-build dockerfile-to-okd --from-dir=. --follow --wait

Create your app

Create a new app using the build we just created:

oc new-app dockerfile-to-okd

To properly deploy your app on OpenShift you will need to define a few more parameters:

  • Enable root user access (with serviceAccountName) by running this command:
oc patch deployment/dockerfile-to-okd --patch '{"spec":{"template": {"spec":{"serviceAccountName": "anyuid"}}}}'
  • You can also add persistent storage (with volumes and containers: volumeMounts )

    • ${STORAGE_NAME}: Name of your persistent volume claim in the Storage page of your project in the web UI
    • ${STORAGE_FOLDER} : Name of the folder inside the persistent volume claim to store the application data (so you can store multiple applications on the same persistent volume claim)

Open the configuration of the started app to fix its configuration:

oc edit deployment/dockerfile-to-okd

You can mount existing persistent volume this way (replace the variables, such as ${STORAGE_NAME} by your values):

    template:
spec:
serviceAccountName: anyuid
volumes:
- name: data
persistentVolumeClaim:
claimName: "${STORAGE_NAME}"
containers:
- image: rstudio-root:latest
volumeMounts:
- name: data
mountPath: "/home/rstudio"
subPath: "${STORAGE_FOLDER}"
Generate deployment file in YAML

You can also generate the app deployment in a YAML file to edit it before start:

oc new-app dockerfile-to-okd -o yaml > myapp.yml
# Edit myapp.yml
oc create -f myapp.yml

Expose app

Expose the application so you can reach it from your browser and check the route that was created

oc expose svc/dockerfile-to-okd
oc get route

You can now visit the route shown in the HOST/PORT output of the oc get route command and see if you have successfully converted the docker file.

You can edit the created route to enable HTTPS with this command:

oc patch route/dockerfile-to-okd --patch '{"spec":{"tls": {"termination": "edge", "insecureEdgeTerminationPolicy": "Redirect"}}}'

Delete the created build

oc delete build dockerfile-to-okd

See oc delete documentation.


Deploy from a local docker image

You can also deploy a local docker image from your machine.

First build the docker image:

docker build -t my-docker-image:latest .

Check you have the image locally on your system:

docker images ls

You should have a docker image for your application:

REPOSITORY                                   TAG                 
my-docker-image latest

You can then deploy providing the docker image name and the name of the application to be deployed:

oc new-app my-docker-image --name app-name-on-openshift

Deploy from a Git repository

Go to +Add > From Git: https://console-openshift-console.apps.dsri2.unimaas.nl/import

Follow the instructions given by the web UI: provide the URL to your git repository, the port on which the web interface will be deployed, you can also create a secret for git login if the repository is private.

Once the container has started you will need to make a small change to enable it running with any user ID (due to OpenShift security policies).

You can do it with the command line (just change your-app-name by your application name)

oc patch deployment/your-app-name --patch '{"spec":{"template": {"spec":{"serviceAccountName": "anyuid"}}}}'

Or through the web UI: click on your deployment, then Actions > Edit Deployment. And edit the YAML of your deployment to add serviceAccountName: anyuid under template.spec:

    template:
spec:
serviceAccountName: anyuid
containers:
- [...]
+ \ No newline at end of file diff --git a/docs/guide-known-issues/index.html b/docs/guide-known-issues/index.html index 8626526ac..4653df094 100644 --- a/docs/guide-known-issues/index.html +++ b/docs/guide-known-issues/index.html @@ -16,14 +16,14 @@ - +

Known Issues

Cannot access your data in the persistent folder

Sometimes you cannot access anymore the data you put in the persistent folder of your container. It can be due to a node going down, if the persistent volume your pod is connected to is on this node, then it cannot access it anymore.

You can easily fix this issue by restarting the pod of your application, it will make it properly connect to resources on nodes that are up.

To restart the pod, go in topology, click on your application, go to the details tab, and decrease the pod count to 0, then put it back up to 1.

Large volumes

Pod or Deployment will not start

You could run into a following message in the Events tab that looks similar to this

Error: kubelet may be retrying requests that are timing out in CRI-O due to system load. Currently at stage container volume configuration: context deadline exceeded: error reserving ctr name

The issue above will occur if you are using a large persistent volume. It can be resolved by adding the following to your Deployment(Config):

spec:
template:
metadata:
annotations:
io.kubernetes.cri-o.TrySkipVolumeSELinuxLabel: 'true'
spec:
runtimeClassName: selinux

Take note of the indentation and the place in the file!

An example of this can be found here:

Storage

DockerHub pull limitations

Spot the issue

If the Events tab show this error:

--> Scaling filebrowser-case-1 to 1
error: update acceptor rejected my-app-1: pods for rc 'my-project/my-app-1' took longer than 600 seconds to become available

Then check for the application ImageStream in Build > Images, and you might see this for your application image:

Internal error occurred: toomanyrequests: You have reached your pull rate limit.
You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit.

You can solve this by creating a secret to login to DockerHub in your project:

oc create secret docker-registry dockerhub-login --docker-server=docker.io --docker-username=dockerhub_username --docker-password=dockerhub_password --docker-email=example@mail.com

Linking the login secret to the default service account:

oc secrets link default dockerhub-login --for=pull
tip

Login to DockerHub should raise the limitations

To definitely solve this issue you can publish the DockerHub image to the GitHub Container Registry.

Follow those instructions on your laptop:

  1. Login to the GitHub Container Registry with docker login.

  2. Pull the docker image from

    docker pull myorg/myimage:latest

    git@github.com:MaastrichtU-IDS/dsri-documentation.gitgit@github.com:MaastrichtU-IDS/dsri-documentation.gitgit@github.com:MaastrichtU-IDS/dsri-documentation.git

  3. Change its tag

    docker tag myorg/myimage:latest ghcr.io/maastrichtu-ids/myimage:latest
  4. Push it back to the GitHub Container Registry:

    docker push ghcr.io/maastrichtu-ids/myimage:latest
Image created automatically

If the image does not exist, GitHub will create automatically when you push it for the first time! You can then head to your organization Packages tab to see the package.

Make it public

By default new images are set as Private, go to your Package Settings, and click Change Visibility to set it as Public, this avoids the need to login to pull the image.

You can update the image if you want access to the latest version, you can set a GitHub Actions workflow to do so.

Finally you will need to update your DSRI deployment, or template, to use the newly created image on ghcr.io, and redeploy the application with the new template.


How to run function within a container ''in the background'

Spot the issue

If the Events tab show this error:

--> cd /usr/local/src/work2/aerius-sample-sequencing/CD4K4ANXX

Trinity --seqType fq --max_memory 100G --CPU 64 --samples_file samples.txt --output /usr/local/src/work2/Trinity_output_zip_090221
error: The function starts but at some points just exits without warnings or errors to Windows folder
DSRI in the container's terminal keep running fine but never finishes. At some point a red label ''disconnected'' appears and the terminal stops and the analysis never continues.

Those two issues are due to the process running attach to the terminal

Should be able to easily run it using the "Bash way": add nohup at the beginning and & at the end -It will run in the back and all output that should have gone to the terminal will go to a file nohup.out in the repo

nohup Trinity --seqType fq --max_memory 100G --CPU 64 --samples_file samples.txt --output /usr/local/src/work2/Trinity_output_zip_090221 &

To check if it is still running:

ps aux | grep Trinity

Be careful make sure the terminal uses bash and not shell ("sh")

To use bash just type bash in the terminal:

bash

Git authentication issue

danger

⚠️ remote: HTTP Basic: Access denied fatal: Authentication failed for

It happen every time when we forced to change the Windows password.

  1. Apply command from powershell (run as administrator)

    git config --system --unset credential.helper

  2. And then remove gitconfig file from C:\Program Files\Git\mingw64/etc/ location (Note: this path will be different in MAC like "/Users/username")

  3. After that use git command like git pull or git push, it asked me for username and password. applying valid username and password and git command working.

Windows:
  1. Go to Windows Credential Manager. This is done in a EN-US Windows by pressing the Windows Key and typing 'credential'. In other localized Windows variants you need to use the localized term.
Windows Credentials

alternatively you can use the shortcut control /name Microsoft.CredentialManager in the run dialog (WIN+R)

  1. Edit the git entry under Windows Credentials, replacing old password with the new one.
Mac:
  1. cmd+space and type "KeyChain Access",

  2. You should find a key with the name like "gitlab.*.com Access Key for user". You can order by date modified to find it more easily.

Mac GIT Autentication
  1. Right click and delete.

Filebrowser 403 forbidden

Spot the issue

If you get 403 forbidden issue while try to upload folders / files or creating new folder / file

403 forbidden
Forbidden Issue

Above issue will occur if you are not using the persistent storage.

A persistent storage can be created by the DSRI team for a persistent storage of the data. Contact the DSRI team to request a persistent storage.

You can find the persistent storage name as below

Storage
- +It will run in the back and all output that should have gone to the terminal will go to a file nohup.out in the repo

nohup Trinity --seqType fq --max_memory 100G --CPU 64 --samples_file samples.txt --output /usr/local/src/work2/Trinity_output_zip_090221 &

To check if it is still running:

ps aux | grep Trinity

Be careful make sure the terminal uses bash and not shell ("sh")

To use bash just type bash in the terminal:

bash

Git authentication issue

danger

⚠️ remote: HTTP Basic: Access denied fatal: Authentication failed for

It happen every time when we forced to change the Windows password.

  1. Apply command from powershell (run as administrator)

    git config --system --unset credential.helper

  2. And then remove gitconfig file from C:\Program Files\Git\mingw64/etc/ location (Note: this path will be different in MAC like "/Users/username")

  3. After that use git command like git pull or git push, it asked me for username and password. applying valid username and password and git command working.

Windows:
  1. Go to Windows Credential Manager. This is done in a EN-US Windows by pressing the Windows Key and typing 'credential'. In other localized Windows variants you need to use the localized term.
Windows Credentials

alternatively you can use the shortcut control /name Microsoft.CredentialManager in the run dialog (WIN+R)

  1. Edit the git entry under Windows Credentials, replacing old password with the new one.
Mac:
  1. cmd+space and type "KeyChain Access",

  2. You should find a key with the name like "gitlab.*.com Access Key for user". You can order by date modified to find it more easily.

Mac GIT Autentication
  1. Right click and delete.

Filebrowser 403 forbidden

Spot the issue

If you get 403 forbidden issue while try to upload folders / files or creating new folder / file

403 forbidden
Forbidden Issue

Above issue will occur if you are not using the persistent storage.

A persistent storage can be created by the DSRI team for a persistent storage of the data. Contact the DSRI team to request a persistent storage.

You can find the persistent storage name as below

Storage + \ No newline at end of file diff --git a/docs/guide-local-install/index.html b/docs/guide-local-install/index.html index 878935f33..522e14387 100644 --- a/docs/guide-local-install/index.html +++ b/docs/guide-local-install/index.html @@ -16,13 +16,13 @@ - +
-

Install local OpenShift

OpenShift and Kubernetes can be installed locally on a single machine for test purpose. The installation requires knowledge of your OS administration, and can be quite complex. We recommend to install it locally only if really required. Otherwise we recommend you to simply use Docker to test images, then deploy them on the DSRI.

Install MiniShift

You will need to set up the virtualization environment before installing MiniShift.

Download MiniShift and unzip it.

# For Ubuntu 18.04 and older
sudo apt install -y libvirt-bin qemu-kvm
# For Ubuntu 18.10 and newer (replace libvirtd by libvirt in next commands)
sudo apt install -y qemu-kvm libvirt-daemon libvirt-daemon-system

# Create group if does not exist
sudo addgroup libvirtd
sudo adduser $(whoami) libvirtd

sudo usermod -a -G libvirtd $(whoami)
newgrp libvirtd
curl -L https://github.com/dhiltgen/docker-machine-kvm/releases/download/v0.10.0/docker-machine-driver-kvm-ubuntu16.04 -o /usr/local/bin/docker-machine-driver-kvm
sudo chmod +x /usr/local/bin/docker-machine-driver-kvm

# Check if libvirtd running
systemctl is-active libvirtd
# Start if inactive
sudo systemctl start libvirtd

# Copy MiniShift in your path
cp minishift-1.34.1-linux-amd64/minishift /usr/local/bin

Start MiniShift

minishift start

Get your local OpenShift cluster URL after the command complete.

Login

Go to your local cluster URL.

E.g. https://192.168.42.58:8443/console/catalog.

Username: admin or developer

Password: anything will work

# As admin
oc login -u system:admin

Stop

minishift stop

Reset

minishift delete -f

Install kubectl

Kubernetes

kubectl on Ubuntu

For more details: read the official install Kubernetes on Ubuntu tutorial or see the official Ubuntu Kubernetes install documentation.

sudo snap install microk8s --classic
sudo usermod -a -G microk8s $USER
# Restart your machine
mkdir -p ~/.kube
microk8s.kubectl config view --raw > $HOME/.kube/config

# Make sure this works for dashboard on Ubuntu
microk8s.enable dashboard dns

To do only if kubectl is not already installed on your machine:

sudo snap alias microk8s.kubectl kubectl

kubectl on MacOS & Windows

Included in Docker installation. Use the installer provided by DockerHub.

Activate it in Docker Preferences > Kubernetes.

For Windows you will need to download the kubectl.exe and place it in your PATH.

We recommend to create a kubectl directory in C:/ and add this C:/kubectl to the Path environment variable in System properties > Advanced > Environment Variables > Path

Install the Dashboard UI

# Install Kubernetes UI
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yaml
kubectl apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/d2s-core/master/argo/roles/kube-dashboard-adminuser-sa.yml
kubectl apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/d2s-core/master/argo/roles/kube-adminuser-rolebinding.yml

# Get the Token to access the dashboard
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')


# Windows user will need to execute the 2 commands manually:
kubectl -n kube-system get secret
# And get the token containing 'admin-user'
kubectl -n kube-system describe secret
# For Windows: give the anonymous user global access
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=admin --user=system:anonymous
# Note: this could be improved. I think only the Dashboard UI didn't have the required permissions.

# Finally, start the web UI, and chose the Token connection
kubectl proxy

Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/

And provide the previously obtained token.

Warning: you will need to save the token to login again next time (use the password save from your browser if possible).

Run kubectl

kubectl should be running at start.

Just restart the web UI

kubectl proxy

Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/

Enable internet

Debug DNS on Ubuntu

microk8s.enable dns

Restart your machine.

You might need to change your firewall configuration

  • On Ubuntu
sudo ufw allow in on cni0
sudo ufw allow out on cni0
sudo ufw default allow routed
kubectl exec -ti busybox -- /bin/sh
ping google.com

Create persistent volume

# Create volume
kubectl apply -n argo -f d2s-core/argo/storage/storage-mac.yaml

Not working at the moment.

Uninstall

Clean uninstall before 2.2.

kubectl get cm workflow-controller-configmap -o yaml -n kube-system --export | kubectl apply -n argo -f -
kubectl delete -n kube-system cm workflow-controller-configmap
kubectl delete -n kube-system deploy workflow-controller argo-ui
kubectl delete -n kube-system sa argo argo-ui
kubectl delete -n kube-system svc argo-ui

Install Argo workflows

Argo project

Install on your local Kubernetes

Argo workflows will be installed on the argo namespace. See the official Argo documentation for more details.

kubectl create ns argo
kubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo/v2.4.2/manifests/install.yaml

# Configure service account to run workflow
kubectl create rolebinding default-admin --clusterrole=admin --serviceaccount=default:default

# Test run
argo submit --watch https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml

See custom configuration for namespace install.

kubectl apply -n argo -f https://raw.githubusercontent.com/vemonet/argo/master/manifests/namespace-install.yaml

Install the client

See the Argo workflows documentation.

Expose the UI

kubectl -n argo port-forward deployment/argo-ui 8002:8001

Access on http://localhost:8002.

- +

Install local OpenShift

OpenShift and Kubernetes can be installed locally on a single machine for test purpose. The installation requires knowledge of your OS administration, and can be quite complex. We recommend to install it locally only if really required. Otherwise we recommend you to simply use Docker to test images, then deploy them on the DSRI.

Install MiniShift

You will need to set up the virtualization environment before installing MiniShift.

Download MiniShift and unzip it.

# For Ubuntu 18.04 and older
sudo apt install -y libvirt-bin qemu-kvm
# For Ubuntu 18.10 and newer (replace libvirtd by libvirt in next commands)
sudo apt install -y qemu-kvm libvirt-daemon libvirt-daemon-system

# Create group if does not exist
sudo addgroup libvirtd
sudo adduser $(whoami) libvirtd

sudo usermod -a -G libvirtd $(whoami)
newgrp libvirtd
curl -L https://github.com/dhiltgen/docker-machine-kvm/releases/download/v0.10.0/docker-machine-driver-kvm-ubuntu16.04 -o /usr/local/bin/docker-machine-driver-kvm
sudo chmod +x /usr/local/bin/docker-machine-driver-kvm

# Check if libvirtd running
systemctl is-active libvirtd
# Start if inactive
sudo systemctl start libvirtd

# Copy MiniShift in your path
cp minishift-1.34.1-linux-amd64/minishift /usr/local/bin

Start MiniShift

minishift start

Get your local OpenShift cluster URL after the command complete.

Login

Go to your local cluster URL.

E.g. https://192.168.42.58:8443/console/catalog.

Username: admin or developer

Password: anything will work

# As admin
oc login -u system:admin

Stop

minishift stop

Reset

minishift delete -f

Install kubectl

Kubernetes

kubectl on Ubuntu

For more details: read the official install Kubernetes on Ubuntu tutorial or see the official Ubuntu Kubernetes install documentation.

sudo snap install microk8s --classic
sudo usermod -a -G microk8s $USER
# Restart your machine
mkdir -p ~/.kube
microk8s.kubectl config view --raw > $HOME/.kube/config

# Make sure this works for dashboard on Ubuntu
microk8s.enable dashboard dns

To do only if kubectl is not already installed on your machine:

sudo snap alias microk8s.kubectl kubectl

kubectl on MacOS & Windows

Included in Docker installation. Use the installer provided by DockerHub.

Activate it in Docker Preferences > Kubernetes.

For Windows you will need to download the kubectl.exe and place it in your PATH.

We recommend to create a kubectl directory in C:/ and add this C:/kubectl to the Path environment variable in System properties > Advanced > Environment Variables > Path

Install the Dashboard UI

# Install Kubernetes UI
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yaml
kubectl apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/d2s-core/master/argo/roles/kube-dashboard-adminuser-sa.yml
kubectl apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/d2s-core/master/argo/roles/kube-adminuser-rolebinding.yml

# Get the Token to access the dashboard
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')


# Windows user will need to execute the 2 commands manually:
kubectl -n kube-system get secret
# And get the token containing 'admin-user'
kubectl -n kube-system describe secret
# For Windows: give the anonymous user global access
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=admin --user=system:anonymous
# Note: this could be improved. I think only the Dashboard UI didn't have the required permissions.

# Finally, start the web UI, and chose the Token connection
kubectl proxy

Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/

And provide the previously obtained token.

Warning: you will need to save the token to login again next time (use the password save from your browser if possible).

Run kubectl

kubectl should be running at start.

Just restart the web UI

kubectl proxy

Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/

Enable internet

Debug DNS on Ubuntu

microk8s.enable dns

Restart your machine.

You might need to change your firewall configuration

  • On Ubuntu
sudo ufw allow in on cni0
sudo ufw allow out on cni0
sudo ufw default allow routed
kubectl exec -ti busybox -- /bin/sh
ping google.com

Create persistent volume

# Create volume
kubectl apply -n argo -f d2s-core/argo/storage/storage-mac.yaml

Not working at the moment.

Uninstall

Clean uninstall before 2.2.

kubectl get cm workflow-controller-configmap -o yaml -n kube-system --export | kubectl apply -n argo -f -
kubectl delete -n kube-system cm workflow-controller-configmap
kubectl delete -n kube-system deploy workflow-controller argo-ui
kubectl delete -n kube-system sa argo argo-ui
kubectl delete -n kube-system svc argo-ui

Install Argo workflows

Argo project

Install on your local Kubernetes

Argo workflows will be installed on the argo namespace. See the official Argo documentation for more details.

kubectl create ns argo
kubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo/v2.4.2/manifests/install.yaml

# Configure service account to run workflow
kubectl create rolebinding default-admin --clusterrole=admin --serviceaccount=default:default

# Test run
argo submit --watch https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml

See custom configuration for namespace install.

kubectl apply -n argo -f https://raw.githubusercontent.com/vemonet/argo/master/manifests/namespace-install.yaml

Install the client

See the Argo workflows documentation.

Expose the UI

kubectl -n argo port-forward deployment/argo-ui 8002:8001

Access on http://localhost:8002.

+ \ No newline at end of file diff --git a/docs/guide-monitoring/index.html b/docs/guide-monitoring/index.html index 1b52c3f20..fda2f14c4 100644 --- a/docs/guide-monitoring/index.html +++ b/docs/guide-monitoring/index.html @@ -16,13 +16,13 @@ - +
-

Monitor your applications

Monitor your application resources use

You can have an overview of the different resources consumed by the applications running in your project by going to the Monitoring tab (in the developer view)

Filter templates catalog

You can also check the CPU and memory usage directly from the terminal inside a specific container

  1. Go to your application terminal, and run:

    top
  2. Check the number of Cpu(s) used at the top:

    %Cpu(s): 3,3 us,

  3. Check the memory usage with the used column:

    MiB Mem : 515543.2 total, 403486.8 free, 98612.0 used, 13444.5 buff/cache

Debug an application deployment

If your application is facing issues when deployed:

  1. If the pod is not building, or not deploying properly, take a look at the Events tab of the deployment. It shows a log of all events faced by the deployment (assign to node, pull image, build, etc). Additionally, all Events in your project can be accessed in Monitoring.
Filter templates catalog
Various ways to check the events

You can also check the Monitoring page in the left side menu to see all events in a project.

Or use the terminal:

oc get events
  1. When a pod is running you can check its logs in the Logs tab (after going to the pod page). It will show the logs output of the container, equivalent to doing docker logs.
Get help

If you cannot figure out the issue by yourself:

  1. Gather relevant information to help the DSRI team to solve your issue: URL to the faulty application, which error was shown in the Events tab? Or in the Logs tab?
  2. Seek help on the #helpdesk DSRI Slack channel
  3. Checkout if an issue have already been created for this problem, or create a new one: https://github.com/MaastrichtU-IDS/dsri-documentation/issues
- +

Monitor your applications

Monitor your application resources use

You can have an overview of the different resources consumed by the applications running in your project by going to the Monitoring tab (in the developer view)

Filter templates catalog

You can also check the CPU and memory usage directly from the terminal inside a specific container

  1. Go to your application terminal, and run:

    top
  2. Check the number of Cpu(s) used at the top:

    %Cpu(s): 3,3 us,

  3. Check the memory usage with the used column:

    MiB Mem : 515543.2 total, 403486.8 free, 98612.0 used, 13444.5 buff/cache

Debug an application deployment

If your application is facing issues when deployed:

  1. If the pod is not building, or not deploying properly, take a look at the Events tab of the deployment. It shows a log of all events faced by the deployment (assign to node, pull image, build, etc). Additionally, all Events in your project can be accessed in Monitoring.
Filter templates catalog
Various ways to check the events

You can also check the Monitoring page in the left side menu to see all events in a project.

Or use the terminal:

oc get events
  1. When a pod is running you can check its logs in the Logs tab (after going to the pod page). It will show the logs output of the container, equivalent to doing docker logs.
Get help

If you cannot figure out the issue by yourself:

  1. Gather relevant information to help the DSRI team to solve your issue: URL to the faulty application, which error was shown in the Events tab? Or in the Logs tab?
  2. Seek help on the #helpdesk DSRI Slack channel
  3. Checkout if an issue have already been created for this problem, or create a new one: https://github.com/MaastrichtU-IDS/dsri-documentation/issues
+ \ No newline at end of file diff --git a/docs/guide-publish-image/index.html b/docs/guide-publish-image/index.html index 0542aa9f2..ad23f5d2d 100644 --- a/docs/guide-publish-image/index.html +++ b/docs/guide-publish-image/index.html @@ -16,13 +16,13 @@ - +
-

Publish a Docker image

DockerHub pull rates limitations

⚠️ DockerHub imposes strict pull limitations for clusters like the DSRI (using DockerHub might result in failing to pull your images on the DSRI).

We highly recommend to use the GitHub Container Registry or RedHat quay.io Container Registry to publish public Docker images.

You can also login to DockerHub using a Secret in OpenShift to increase the pull rates limitations from 100 to 200 every 6 hours (this will mitigate the issue, but not solve it completely if you do not have a paid account on DockerHub):

oc create secret docker-registry docker-hub-secret --docker-server=docker.io --docker-username=your-dockerhub-username --docker-password=your-dockerhub-password --docker-email=your-dockerhub-email

Login to Container Registries 🔑

Login to GitHub Container Registry

Use your existing GitHub account if you have one:

  1. Create a Personal Access Token for GitHub packages at https://github.com/settings/tokens/new
  2. Provide a meaningful description for the token, and enable the following scopes when creating the token:
    • write:packages: publish container images to GitHub Container Registry
    • delete:packages: delete specified versions of private or public container images from GitHub Container Registry
  3. You might want to store this token in a safe place, as you will not be able to retrieve it later on github.com (you can still delete it, and create a new token easily if you lose your token)
  4. 👨‍💻 Log in to the GitHub Container Registry in your terminal (change USERNAME and ACCESS_TOKEN to yours):
echo "ACCESS_TOKEN" | docker login ghcr.io -u USERNAME --password-stdin

On Windows use this command:

docker login ghcr.io -u USERNAME -p "ACCESS_TOKEN"

See the official GitHub documentation.

Login to quay.io

  1. Create an account at https://quay.io
  2. Login in your terminal (you will be asked for username and password)
docker login quay.io

Login to DockerHub

  1. Get a DockerHub account at https://hub.docker.com (you most probably already have one if you installed Docker Desktop)

  2. 👩‍💻 Run in your terminal:

docker login
  1. Provide your DockerHub username and password.

Publish your image 📢

Once you built a Docker image, and you logged in to a Container Registry, you might want to publish the image to pull and re-use it easily later.

Publish to GitHub Container Registry

Free for public images

The GitHub Container Registry is still in beta but will be free for public images when fully released. It enables you to store your Docker images at the same place you keep your code! 📦

Publish to your user Container Registry on GitHub:

docker build -t ghcr.io/github-username/my-image:latest .
docker push ghcr.io/github-username/my-image:latest

For example, to the MaastrichtU-IDS organization Container Registry on GitHub:

docker build -t ghcr.io/maastrichtu-ids/jupyterlab:latest .
docker push ghcr.io/maastrichtu-ids/jupyterlab:latest
Created automatically

If the image does not exist, GitHub Container Registry will create it automatically and set it as Private by default. You can easily change it to Public in the image settings on github.com.

Publish to Quay.io

Free for public images

Quay.io is free for public images and does not restrict images pulls.

  1. Create the image on quay.io

  2. Build and push to quay.io

docker build -t ghcr.io/quay-username/my-image:latest .
docker push quay.io/quay-username/my-image:latest

Publish to DockerHub

DockerHub pull rates limitations

⚠️ DockerHub imposes strict pull limitations for clusters like the DSRI (using DockerHub might result in failing to pull your images on the DSRI).

We highly recommend to use the GitHub Container Registry or RedHat quay.io Container Registry to publish public Docker images.

Logged in

If you are login with your DockerHub user on the DSRI, it should allow you to pull DockerHub images in your project (see above).

  1. Create the repository on DockerHub (attached to your user or an organization)
  2. Build and push the image:
docker build -t dockerhub-username/jupyterlab:latest .
docker push dockerhub-username/jupyterlab:latest

You can also change the name (aka. tag) of an existing image:

docker build -t my-jupyterlab .
docker tag my-jupyterlab ghcr.io/github-username/jupyterlab:latest

Use automated workflows

You can automate the building and publication of Docker images using GitHub Actions workflows 🔄

Use a working workflow as example

👀 Check the .github/workflows/publish-docker.yml file to see an example of a workflow to publish an image to the GitHub Container Registry.

👩‍💻 You only need to change the IMAGE_NAME, and use it in your GitHub repository to publish a Docker image for your application automatically! It will build from a Dockerfile at the root of the repository.

Workflow triggers

The workflow can be easily configured to:

  • publish a new image to the latest tag at each push to the main branch
  • publish an image to a new tag if a release is pushed on GitHub (using the git tag)
    • e.g. v0.0.1 published as image 0.0.1
- +

Publish a Docker image

DockerHub pull rates limitations

⚠️ DockerHub imposes strict pull limitations for clusters like the DSRI (using DockerHub might result in failing to pull your images on the DSRI).

We highly recommend to use the GitHub Container Registry or RedHat quay.io Container Registry to publish public Docker images.

You can also login to DockerHub using a Secret in OpenShift to increase the pull rates limitations from 100 to 200 every 6 hours (this will mitigate the issue, but not solve it completely if you do not have a paid account on DockerHub):

oc create secret docker-registry docker-hub-secret --docker-server=docker.io --docker-username=your-dockerhub-username --docker-password=your-dockerhub-password --docker-email=your-dockerhub-email

Login to Container Registries 🔑

Login to GitHub Container Registry

Use your existing GitHub account if you have one:

  1. Create a Personal Access Token for GitHub packages at https://github.com/settings/tokens/new
  2. Provide a meaningful description for the token, and enable the following scopes when creating the token:
    • write:packages: publish container images to GitHub Container Registry
    • delete:packages: delete specified versions of private or public container images from GitHub Container Registry
  3. You might want to store this token in a safe place, as you will not be able to retrieve it later on github.com (you can still delete it, and create a new token easily if you lose your token)
  4. 👨‍💻 Log in to the GitHub Container Registry in your terminal (change USERNAME and ACCESS_TOKEN to yours):
echo "ACCESS_TOKEN" | docker login ghcr.io -u USERNAME --password-stdin

On Windows use this command:

docker login ghcr.io -u USERNAME -p "ACCESS_TOKEN"

See the official GitHub documentation.

Login to quay.io

  1. Create an account at https://quay.io
  2. Login in your terminal (you will be asked for username and password)
docker login quay.io

Login to DockerHub

  1. Get a DockerHub account at https://hub.docker.com (you most probably already have one if you installed Docker Desktop)

  2. 👩‍💻 Run in your terminal:

docker login
  1. Provide your DockerHub username and password.

Publish your image 📢

Once you built a Docker image, and you logged in to a Container Registry, you might want to publish the image to pull and re-use it easily later.

Publish to GitHub Container Registry

Free for public images

The GitHub Container Registry is still in beta but will be free for public images when fully released. It enables you to store your Docker images at the same place you keep your code! 📦

Publish to your user Container Registry on GitHub:

docker build -t ghcr.io/github-username/my-image:latest .
docker push ghcr.io/github-username/my-image:latest

For example, to the MaastrichtU-IDS organization Container Registry on GitHub:

docker build -t ghcr.io/maastrichtu-ids/jupyterlab:latest .
docker push ghcr.io/maastrichtu-ids/jupyterlab:latest
Created automatically

If the image does not exist, GitHub Container Registry will create it automatically and set it as Private by default. You can easily change it to Public in the image settings on github.com.

Publish to Quay.io

Free for public images

Quay.io is free for public images and does not restrict images pulls.

  1. Create the image on quay.io

  2. Build and push to quay.io

docker build -t ghcr.io/quay-username/my-image:latest .
docker push quay.io/quay-username/my-image:latest

Publish to DockerHub

DockerHub pull rates limitations

⚠️ DockerHub imposes strict pull limitations for clusters like the DSRI (using DockerHub might result in failing to pull your images on the DSRI).

We highly recommend to use the GitHub Container Registry or RedHat quay.io Container Registry to publish public Docker images.

Logged in

If you are login with your DockerHub user on the DSRI, it should allow you to pull DockerHub images in your project (see above).

  1. Create the repository on DockerHub (attached to your user or an organization)
  2. Build and push the image:
docker build -t dockerhub-username/jupyterlab:latest .
docker push dockerhub-username/jupyterlab:latest

You can also change the name (aka. tag) of an existing image:

docker build -t my-jupyterlab .
docker tag my-jupyterlab ghcr.io/github-username/jupyterlab:latest

Use automated workflows

You can automate the building and publication of Docker images using GitHub Actions workflows 🔄

Use a working workflow as example

👀 Check the .github/workflows/publish-docker.yml file to see an example of a workflow to publish an image to the GitHub Container Registry.

👩‍💻 You only need to change the IMAGE_NAME, and use it in your GitHub repository to publish a Docker image for your application automatically! It will build from a Dockerfile at the root of the repository.

Workflow triggers

The workflow can be easily configured to:

  • publish a new image to the latest tag at each push to the main branch
  • publish an image to a new tag if a release is pushed on GitHub (using the git tag)
    • e.g. v0.0.1 published as image 0.0.1
+ \ No newline at end of file diff --git a/docs/guide-vpn/index.html b/docs/guide-vpn/index.html index f366512f2..763fa87b9 100644 --- a/docs/guide-vpn/index.html +++ b/docs/guide-vpn/index.html @@ -16,13 +16,13 @@ - +
-

Install UM VPN

Request an account

  1. You will need to have an account at Maastricht University with an email ending with @maastrichtuniversity.nl or @student.maastrichtuniversity.nl.

  2. Request access to the DSRI for your account Please fill this form 📬. to provide us some information on what you plan to do with the DSRI.

Connect to the UM network

You need to be connected to the UM network to access the DSRI.

  • Connect to UMnet or eduroam WiFi at Maastricht University

  • Use the Maastricht University VPN at vpn.maastrichtuniversity.nl

    Log in to that using your UM username and password.

Students

By default the UM VPN is only available to employees. As a student you can access UM resources from any location via Student Desktop Anywhere. However, if VPN access is absolutely necessary you can request access via your course coordinator.

  • The prefix of your UM email address with the first letter capitalized, e.g. Firstname.Lastname or F.LastnameOr your employee number at Maastricht University (a.k.a. P number), e.g. P7000000

    VPN Log in View

    Then You will see below page to download the AnyConnect Secure Mobility Client

    Download AnyConnect Secure Mobility Client

Install the VPN (AnyConnect Secure Mobility Client) on Windows

Double click on the .exe file to install the VPN.

You can follow below steps as in pictures.

Install VPNInstall VPNInstall VPNInstall VPN

Log in to the VPN (AnyConnect Secure Mobility Client)

Once you finish installing you can run the Cisco AnyConnect Secure Mobility Client.

Log in to the VPN

Then after you will get the bellow wizard and click connect

Log in to the VPN

Provide your UM username and password. (employee number at Maastricht University (a.k.a. P number), e.g. P7000000)

Log in to the VPN

Install the VPN (AnyConnect Secure Mobility Client) on Linux

  • Connect to UMnet or eduroam WiFi at Maastricht University

  • For Linux, use openconnect to connect to the UM VPN. You can easily install it on Ubuntu and Debian distributions with apt:

    sudo apt install openconnect
    sudo openconnect -u YOUR.USER --authgroup 01-Employees --useragent=AnyConnect vpn.maastrichtuniversity.nl

    Provide your UM password when prompted.

  • For students:

    • By default the UM VPN is only available to employees. As a student you can access UM resources from any location via Student Desktop Anywhere. However, if VPN access is absolutely necessary you can request access via your course coordinator.
- +

Install UM VPN

Request an account

  1. You will need to have an account at Maastricht University with an email ending with @maastrichtuniversity.nl or @student.maastrichtuniversity.nl.

  2. Request access to the DSRI for your account Please fill this form 📬. to provide us some information on what you plan to do with the DSRI.

Connect to the UM network

You need to be connected to the UM network to access the DSRI.

  • Connect to UMnet or eduroam WiFi at Maastricht University

  • Use the Maastricht University VPN at vpn.maastrichtuniversity.nl

    Log in to that using your UM username and password.

Students

By default the UM VPN is only available to employees. As a student you can access UM resources from any location via Student Desktop Anywhere. However, if VPN access is absolutely necessary you can request access via your course coordinator.

  • The prefix of your UM email address with the first letter capitalized, e.g. Firstname.Lastname or F.LastnameOr your employee number at Maastricht University (a.k.a. P number), e.g. P7000000

    VPN Log in View

    Then You will see below page to download the AnyConnect Secure Mobility Client

    Download AnyConnect Secure Mobility Client

Install the VPN (AnyConnect Secure Mobility Client) on Windows

Double click on the .exe file to install the VPN.

You can follow below steps as in pictures.

Install VPNInstall VPNInstall VPNInstall VPN

Log in to the VPN (AnyConnect Secure Mobility Client)

Once you finish installing you can run the Cisco AnyConnect Secure Mobility Client.

Log in to the VPN

Then after you will get the bellow wizard and click connect

Log in to the VPN

Provide your UM username and password. (employee number at Maastricht University (a.k.a. P number), e.g. P7000000)

Log in to the VPN

Install the VPN (AnyConnect Secure Mobility Client) on Linux

  • Connect to UMnet or eduroam WiFi at Maastricht University

  • For Linux, use openconnect to connect to the UM VPN. You can easily install it on Ubuntu and Debian distributions with apt:

    sudo apt install openconnect
    sudo openconnect -u YOUR.USER --authgroup 01-Employees --useragent=AnyConnect vpn.maastrichtuniversity.nl

    Provide your UM password when prompted.

  • For students:

    • By default the UM VPN is only available to employees. As a student you can access UM resources from any location via Student Desktop Anywhere. However, if VPN access is absolutely necessary you can request access via your course coordinator.
+ \ No newline at end of file diff --git a/docs/guide-workshop/index.html b/docs/guide-workshop/index.html index 9d3e57857..1a8d79b56 100644 --- a/docs/guide-workshop/index.html +++ b/docs/guide-workshop/index.html @@ -16,13 +16,13 @@ - +
-

Prepare a workshop

The DSRI is a good platform to run a training or class within Maastricht University.

Request VPN accounts for users

If the users are students from Maastricht University, or not from Maastricht University (without an email @maastrichtuniversity.nl, or @maastro.nl), you will need to contact the ICT support of your department to request the creation of accounts so that your users can connect to the UM VPN.

At FSE, you will need to send an email to lo-fse@maastrichtuniversity.nl and DSRI-SUPPORT-L@maastrichtuniversity.nl with the following information:

  • Emails of the users
  • Why they need access to the DSRI (provide the ID of the course at Maastricht University if it is for a course)
  • Until which date the users will need those VPN accounts

Fill a form

Fill this form 📬 to give us more details on your project (you don't need to do it if you have already filled it in the past).

Prepare you workshop

Use the DSRI documentation to explain to your users how to access the DSRI.

Publish an image for your training

Feel free to use the existing templates for JupyterLab, RStudio, or Visual Studio Code in the DSRI catalog.

You can easily reuse our images to adapt it to your training need and install all required dependencies:

Then you will just need to instruct your users to start an existing templates with your newly published image.

With the JupyterLab template you can also prepare a git repository to be cloned in the workspace as soon as they start it.

You can find some examples of python scripts with database to run on the DSRI in this repository: https://github.com/MaastrichtU-IDS/dsri-demo

Show your users how to start a workspace

You can use this video showing how to start a RStudio workspace, the process is similar for JupyterLab and VisualStudio Code: https://www.youtube.com/watch?v=Y0BjotH1LiE

Otherwise just do it directly with them.

- +

Prepare a workshop

The DSRI is a good platform to run a training or class within Maastricht University.

Request VPN accounts for users

If the users are students from Maastricht University, or not from Maastricht University (without an email @maastrichtuniversity.nl, or @maastro.nl), you will need to contact the ICT support of your department to request the creation of accounts so that your users can connect to the UM VPN.

At FSE, you will need to send an email to lo-fse@maastrichtuniversity.nl and DSRI-SUPPORT-L@maastrichtuniversity.nl with the following information:

  • Emails of the users
  • Why they need access to the DSRI (provide the ID of the course at Maastricht University if it is for a course)
  • Until which date the users will need those VPN accounts

Fill a form

Fill this form 📬 to give us more details on your project (you don't need to do it if you have already filled it in the past).

Prepare you workshop

Use the DSRI documentation to explain to your users how to access the DSRI.

Publish an image for your training

Feel free to use the existing templates for JupyterLab, RStudio, or Visual Studio Code in the DSRI catalog.

You can easily reuse our images to adapt it to your training need and install all required dependencies:

Then you will just need to instruct your users to start an existing templates with your newly published image.

With the JupyterLab template you can also prepare a git repository to be cloned in the workspace as soon as they start it.

You can find some examples of python scripts with database to run on the DSRI in this repository: https://github.com/MaastrichtU-IDS/dsri-demo

Show your users how to start a workspace

You can use this video showing how to start a RStudio workspace, the process is similar for JupyterLab and VisualStudio Code: https://www.youtube.com/watch?v=Y0BjotH1LiE

Otherwise just do it directly with them.

+ \ No newline at end of file diff --git a/docs/helm/index.html b/docs/helm/index.html index bd447088c..57eeb43fd 100644 --- a/docs/helm/index.html +++ b/docs/helm/index.html @@ -16,13 +16,13 @@ - +
-

Install from Helm charts

Helm is a popular package manager for Kubernetes. A Helm chart is a bundle of parameterizable YAML resources for Kubernetes/OpenShift.

Difference with Operators

Helm charts can be defined as Operators (if they are packaged using the operator-sdk), but they are not all Operators.

See the official documentation for Helm on OpenShift.

Install the Helm client

Install Golang

Go lang is required to run Helm. Install go 1.14.4 on Linux, you can find instructions for MacOS, Windows and newer versions at https://golang.org/dl

wget https://dl.google.com/go/go1.14.4.linux-amd64.tar.gz

# Extract to /usr/local
tar -C /usr/local -xzf go1.14.4.linux-amd64.tar.gz

# Add Go to path in .profile
echo "export PATH=$PATH:/usr/local/go/bin" >> ~/.profile
# Or in .zshrc if you use ZSH
echo "export PATH=$PATH:/usr/local/go/bin" >> ~/.zshrc

Restart your laptop for the changes to take effects or execute source ~/.profile

Install Helm

You can also use the official documentation to install Helm on your machine.

Install on Linux

curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash

See Helm documentation for Linux.

Install on MacOS

brew install helm

See Helm documentation for MacOS.

Install on Windows

Install using Chocolatey.

choco install kubernetes-helm

See Helm documentation for Windows.

Check Helm installation

helm version

Install a Helm chart

Explore published Helm charts at https://hub.helm.sh ⛵

Start a MySQL database with Helm

Example from the OpenShift 4.3 documentation. See also the official Helm documentation.

  1. Add the repository of official Helm charts to your local Helm client:
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
  1. Update the repository:
helm repo update
  1. Install an example MySQL chart, and start the application named example-mysql:
helm install example-mysql stable/mysql
Password

The instructions to retrieve the admin password and connect to the database will be displayed in the terminal.

Retrieve the database password with this command (N.B.: kubectl can also be used in place of oc):

oc get secret example-mysql -o jsonpath="{.data.mysql-root-password}" | base64 --decode; echo
  1. Verify that the chart has installed successfully:
helm list
  1. Expose the MySQL service as a route:
oc expose service example-mysql
oc get routes

Or port-forward to http://localhost:3306

oc port-forward svc/example-mysql 3306

Uninstall the application

helm uninstall example-mysql

Set deployment parameters

You can also define deployment parameters when installing a Helm chart, such as the service account and node selector.

For example, here we make sure the application will run on DSRI CPU nodes and use the anyuid service account:

Add Bitnami repository:

helm repo add bitnami https://charts.bitnami.com/bitnami

Install and start Postgresql:

helm install postgresql-db bitnami/postgresql --set nodeSelector.dsri.unimaas.nl/cpu=true --set serviceAccount.name=anyuid
- +

Install from Helm charts

Helm is a popular package manager for Kubernetes. A Helm chart is a bundle of parameterizable YAML resources for Kubernetes/OpenShift.

Difference with Operators

Helm charts can be defined as Operators (if they are packaged using the operator-sdk), but they are not all Operators.

See the official documentation for Helm on OpenShift.

Install the Helm client

Install Golang

Go lang is required to run Helm. Install go 1.14.4 on Linux, you can find instructions for MacOS, Windows and newer versions at https://golang.org/dl

wget https://dl.google.com/go/go1.14.4.linux-amd64.tar.gz

# Extract to /usr/local
tar -C /usr/local -xzf go1.14.4.linux-amd64.tar.gz

# Add Go to path in .profile
echo "export PATH=$PATH:/usr/local/go/bin" >> ~/.profile
# Or in .zshrc if you use ZSH
echo "export PATH=$PATH:/usr/local/go/bin" >> ~/.zshrc

Restart your laptop for the changes to take effects or execute source ~/.profile

Install Helm

You can also use the official documentation to install Helm on your machine.

Install on Linux

curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash

See Helm documentation for Linux.

Install on MacOS

brew install helm

See Helm documentation for MacOS.

Install on Windows

Install using Chocolatey.

choco install kubernetes-helm

See Helm documentation for Windows.

Check Helm installation

helm version

Install a Helm chart

Explore published Helm charts at https://hub.helm.sh ⛵

Start a MySQL database with Helm

Example from the OpenShift 4.3 documentation. See also the official Helm documentation.

  1. Add the repository of official Helm charts to your local Helm client:
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
  1. Update the repository:
helm repo update
  1. Install an example MySQL chart, and start the application named example-mysql:
helm install example-mysql stable/mysql
Password

The instructions to retrieve the admin password and connect to the database will be displayed in the terminal.

Retrieve the database password with this command (N.B.: kubectl can also be used in place of oc):

oc get secret example-mysql -o jsonpath="{.data.mysql-root-password}" | base64 --decode; echo
  1. Verify that the chart has installed successfully:
helm list
  1. Expose the MySQL service as a route:
oc expose service example-mysql
oc get routes

Or port-forward to http://localhost:3306

oc port-forward svc/example-mysql 3306

Uninstall the application

helm uninstall example-mysql

Set deployment parameters

You can also define deployment parameters when installing a Helm chart, such as the service account and node selector.

For example, here we make sure the application will run on DSRI CPU nodes and use the anyuid service account:

Add Bitnami repository:

helm repo add bitnami https://charts.bitnami.com/bitnami

Install and start Postgresql:

helm install postgresql-db bitnami/postgresql --set nodeSelector.dsri.unimaas.nl/cpu=true --set serviceAccount.name=anyuid
+ \ No newline at end of file diff --git a/docs/increase-process-speed/index.html b/docs/increase-process-speed/index.html index 77c428bdb..e99900b4d 100644 --- a/docs/increase-process-speed/index.html +++ b/docs/increase-process-speed/index.html @@ -16,13 +16,13 @@ - +
-

Increase your processes speed

DSRI provides a lot of computing resources, but there are a few things to know if you want to increase the speed of your processes.

The good

With the DSRI you get access to a workspace with more memory and cores than your laptop (around 200G memory, and 64 cores on the DSRI, against around 16G memory and 8 cores on your laptop)

Those additional resources might help to make your workload run faster, but not automatically! It will run faster

  • If your code can make use of the really large amount of RAM to load more of the data to process in memory. But if your workload does not require dozens of GB of memory, and your laptop does not face out of memory issues, or crash, when you run your workload, then you probably already have enough memory on your laptop, and will not gain a significant boost from the increased memory.
  • If you can run your workload in parallel, or enable the libraries you use to use the available cores. This will highly depend on the libraries you use. Do they support running their processes in parallel? Do you need to explicitly enable parallelism on a specific number of cores?

Proper parallelism is not achieved easily, it needs to be manually implemented within the library processes.

For example, Python has a "Global Interpreter Lock" (aka. GIL) that limit threads parallelism by design, so when you are doing some work on a spreadsheet with pandas, you are only going to use 1 thread (which is nice, because it makes the conceptualization and understanding of algorithms easier, but it also makes it harder to write truly efficient libraries)

You will need to use complementary libraries if you want to use more threads while processing data with pandas. There are multiple ways and libraries to achieve this, but the easiest, if you want to check it yourself with pandas, is to use pandarallel. You could also implement the parallelism yourself with concurrent.futures

The bad

Until now everything seems good, more memory, more cores... So, what's the catch? It can only get better, no?

Application and workspace running on the DSRI uses a persistent volume to avoid losing data when the application is restarted. On most workspaces this persistent volume is mounted on the workspace working directory.

This persistent volume is not hosted directly on the same node as your application, it's hosted on the cluster in a distributed fashion (remember you can attach this persistent volume to different applications, which might be hosted on different nodes themselves)

And distributed storage means: slower read and write times!

  • In your laptop the data is in a hard drive disc sitting at 2 cm of the CPU and memory.

  • In the DSRI your workspace might be on node 4, when the persistent volume is on node 8. In this case the data will need to go through the network

So if you write a script to just load data, do no computing, and write back the data to the persistent volume, it will probably be much faster on your laptop than on the DSRI!

The solution

Only 1 folder (and its subfolders) usually mounted on the persistent volume. The rest is "ephemeral storage", which is the data bound to the application you started, this means the data will be stored on the same node as your workspace.

Which might result in faster read/write speed! But also means the data will be lost if the workspace is restarted (which does not happen everyday, but can happen without notice)

A solution could be to:

  • Keep your code and important data as backup in the persistent volume (the workspace working dir usually)
  • Copy the data your process needs to load in a folder outside of the persistent volume (on the ephemeral storage)
  • Read/write data mostly from this folder on the ephemeral storage, avoid using the data in the persistent volume folder as much as possible
  • Copy the important result files or temporary files you don't want to lose from the folder on the ephemeral storage to the folder on the persistent storage

Let us know how it works for you on the Slack #general channel, and if you have suggestions to improve the workspaces.

- +

Increase your processes speed

DSRI provides a lot of computing resources, but there are a few things to know if you want to increase the speed of your processes.

The good

With the DSRI you get access to a workspace with more memory and cores than your laptop (around 200G memory, and 64 cores on the DSRI, against around 16G memory and 8 cores on your laptop)

Those additional resources might help to make your workload run faster, but not automatically! It will run faster

  • If your code can make use of the really large amount of RAM to load more of the data to process in memory. But if your workload does not require dozens of GB of memory, and your laptop does not face out of memory issues, or crash, when you run your workload, then you probably already have enough memory on your laptop, and will not gain a significant boost from the increased memory.
  • If you can run your workload in parallel, or enable the libraries you use to use the available cores. This will highly depend on the libraries you use. Do they support running their processes in parallel? Do you need to explicitly enable parallelism on a specific number of cores?

Proper parallelism is not achieved easily, it needs to be manually implemented within the library processes.

For example, Python has a "Global Interpreter Lock" (aka. GIL) that limit threads parallelism by design, so when you are doing some work on a spreadsheet with pandas, you are only going to use 1 thread (which is nice, because it makes the conceptualization and understanding of algorithms easier, but it also makes it harder to write truly efficient libraries)

You will need to use complementary libraries if you want to use more threads while processing data with pandas. There are multiple ways and libraries to achieve this, but the easiest, if you want to check it yourself with pandas, is to use pandarallel. You could also implement the parallelism yourself with concurrent.futures

The bad

Until now everything seems good, more memory, more cores... So, what's the catch? It can only get better, no?

Application and workspace running on the DSRI uses a persistent volume to avoid losing data when the application is restarted. On most workspaces this persistent volume is mounted on the workspace working directory.

This persistent volume is not hosted directly on the same node as your application, it's hosted on the cluster in a distributed fashion (remember you can attach this persistent volume to different applications, which might be hosted on different nodes themselves)

And distributed storage means: slower read and write times!

  • In your laptop the data is in a hard drive disc sitting at 2 cm of the CPU and memory.

  • In the DSRI your workspace might be on node 4, when the persistent volume is on node 8. In this case the data will need to go through the network

So if you write a script to just load data, do no computing, and write back the data to the persistent volume, it will probably be much faster on your laptop than on the DSRI!

The solution

Only 1 folder (and its subfolders) usually mounted on the persistent volume. The rest is "ephemeral storage", which is the data bound to the application you started, this means the data will be stored on the same node as your workspace.

Which might result in faster read/write speed! But also means the data will be lost if the workspace is restarted (which does not happen everyday, but can happen without notice)

A solution could be to:

  • Keep your code and important data as backup in the persistent volume (the workspace working dir usually)
  • Copy the data your process needs to load in a folder outside of the persistent volume (on the ephemeral storage)
  • Read/write data mostly from this folder on the ephemeral storage, avoid using the data in the persistent volume folder as much as possible
  • Copy the important result files or temporary files you don't want to lose from the folder on the ephemeral storage to the folder on the persistent storage

Let us know how it works for you on the Slack #general channel, and if you have suggestions to improve the workspaces.

+ \ No newline at end of file diff --git a/docs/index.html b/docs/index.html index 292f449fa..b081702ed 100644 --- a/docs/index.html +++ b/docs/index.html @@ -16,13 +16,13 @@ - +
-

Introduction

The Data Science Research Infrastructure is a cluster of servers to deploy workspaces and applications for Data Science.

It works by starting workspaces and applications in Docker containers that are automatically deployed to a powerful server on the cluster using Kubernetes, a container orchestration system. You can then access your workspace or application through an URL automatically generated.

Getting started

✅ What can be done on the DSRI

The DSRI is particularly useful if you need to:

  • Gain access to more computing resources (memory and CPUs), which enables you to load larger amount of data, or use more threads for parallelized tasks
  • Run jobs that takes a long time to complete
  • Deploy any database or service you need, and connect to it from your workspace easily
  • Book and start a workspace that uses one of our GPUs

The DSRI proposes a number of popular workspaces to work with data:

  • Multiple flavors of JupyterLab (scipy, tensorflow, all-spark, and more)
  • VisualStudio Code server (also available within the JupyterLab workspaces)
  • RStudio, with a complementary Shiny server
  • Matlab
  • Ubuntu Desktop

You can then install anything you want in your workspace using conda, pip, or apt.

Data storage

DSRI is a computing infrastructure, built and used to run data science workloads. DSRI stores data in a persistent manner, but all data stored on the DSRI is susceptible to be altered by the workloads you are running, and we cannot guarantee its immutability.

Always keep a safe copy of your data outside the DSRI. And don't rely on the DSRI for long term storage.

❌ What cannot be done

  • Since DSRI can only be accessed when using the UM VPN, deployed services will not be available on the public Internet 🔒
  • All activities must be legal in basis. You must closely examine and abide by the terms and conditions of any data, software, or web service that you use as part of your work 📜
  • You cannot reach data or servers hosted at Maastricht University from the DSRI by default. You will need to request access in advance here 📬️
  • Right now it is not possible to reach the central UM fileservices (MFS)
Request an account

If you are working at Maastricht University, see this page to request an account, and run your services on the DSRI.

The DSRI architecture

Here is a diagram providing a simplified explanation of how the DSRI works, using popular data science applications as examples (JupyterLab, RStudio, VSCode server)

DSRI in a nutshell

The DSRI specifications

Software

We use OKD 4.11, the Origin Community Distribution of Kubernetes that powers RedHat OpenShift, a distribution of the Kubernetes container orchestration tool. Kubernetes takes care of deploying the Docker containers on the cluster of servers, the OKD distribution extends it to improve security, and provide a user-friendly web UI to manage your applications.

We use RedHat Ceph storage for the distributed storage.

Hardware

  • 16 CPU nodes
RAM (GB)CPU (cores)Storage (TB)
Node capacity512 GB64 cores (128 threads)120 TB
Total capacity8 192 GB1 024 cores1 920 TB
  • 1 GPU node: Nvidia DGX1 8x Tesla V100 - 32GB GPU
GPUsRAM (GB)CPU (cores)
GPU node capacity8512 GB40 cores
DSRI infrastructure

Learn more about DSRI

See the following presentation about the Data Science Research Infrastructure

DSRI April 2021 Community Event Presentation
- +

Introduction

The Data Science Research Infrastructure is a cluster of servers to deploy workspaces and applications for Data Science.

It works by starting workspaces and applications in Docker containers that are automatically deployed to a powerful server on the cluster using Kubernetes, a container orchestration system. You can then access your workspace or application through an URL automatically generated.

Getting started

✅ What can be done on the DSRI

The DSRI is particularly useful if you need to:

  • Gain access to more computing resources (memory and CPUs), which enables you to load larger amount of data, or use more threads for parallelized tasks
  • Run jobs that takes a long time to complete
  • Deploy any database or service you need, and connect to it from your workspace easily
  • Book and start a workspace that uses one of our GPUs

The DSRI proposes a number of popular workspaces to work with data:

  • Multiple flavors of JupyterLab (scipy, tensorflow, all-spark, and more)
  • VisualStudio Code server (also available within the JupyterLab workspaces)
  • RStudio, with a complementary Shiny server
  • Matlab
  • Ubuntu Desktop

You can then install anything you want in your workspace using conda, pip, or apt.

Data storage

DSRI is a computing infrastructure, built and used to run data science workloads. DSRI stores data in a persistent manner, but all data stored on the DSRI is susceptible to be altered by the workloads you are running, and we cannot guarantee its immutability.

Always keep a safe copy of your data outside the DSRI. And don't rely on the DSRI for long term storage.

❌ What cannot be done

  • Since DSRI can only be accessed when using the UM VPN, deployed services will not be available on the public Internet 🔒
  • All activities must be legal in basis. You must closely examine and abide by the terms and conditions of any data, software, or web service that you use as part of your work 📜
  • You cannot reach data or servers hosted at Maastricht University from the DSRI by default. You will need to request access in advance here 📬️
  • Right now it is not possible to reach the central UM fileservices (MFS)
Request an account

If you are working at Maastricht University, see this page to request an account, and run your services on the DSRI.

The DSRI architecture

Here is a diagram providing a simplified explanation of how the DSRI works, using popular data science applications as examples (JupyterLab, RStudio, VSCode server)

DSRI in a nutshell

The DSRI specifications

Software

We use OKD 4.11, the Origin Community Distribution of Kubernetes that powers RedHat OpenShift, a distribution of the Kubernetes container orchestration tool. Kubernetes takes care of deploying the Docker containers on the cluster of servers, the OKD distribution extends it to improve security, and provide a user-friendly web UI to manage your applications.

We use RedHat Ceph storage for the distributed storage.

Hardware

  • 16 CPU nodes
RAM (GB)CPU (cores)Storage (TB)
Node capacity512 GB64 cores (128 threads)120 TB
Total capacity8 192 GB1 024 cores1 920 TB
  • 1 GPU node: Nvidia DGX1 8x Tesla V100 - 32GB GPU
GPUsRAM (GB)CPU (cores)
GPU node capacity8512 GB40 cores
DSRI infrastructure

Learn more about DSRI

See the following presentation about the Data Science Research Infrastructure

DSRI April 2021 Community Event Presentation
+ \ No newline at end of file diff --git a/docs/jupyterhub-spark/index.html b/docs/jupyterhub-spark/index.html index 45c655a7d..d47ff02fb 100644 --- a/docs/jupyterhub-spark/index.html +++ b/docs/jupyterhub-spark/index.html @@ -16,13 +16,13 @@ - +
-

JupyterHub with Spark

JupyterHub is ideal to enable multiple users easily start predefined workspaces in the same project. The complimentary Apache Spark cluster can be used from the workspaces to perform distributed processing.

🧊 Install kfctl

You will need to have the usual oc tool installed, and to install kfctl on your machine, a tool to deploy Kubeflow applications, download the latest version for your OS 📥️

You can then install it by downloading the binary and putting it in your path, for example on Linux:

wget https://github.com/kubeflow/kfctl/releases/download/v1.2.0/kfctl_v1.2.0-0-gbc038f9_linux.tar.gz
tar -xzf kfctl_v1.2.0-0-gbc038f9_linux.tar.gz
sudo mv kfctl /usr/local/bin/

Clone the repository with the DSRI custom images and deployments for the OpenDataHub platform, and go to the kfdef folder:

git clone https://github.com/MaastrichtU-IDS/odh-manifests
cd odh-manifests/kfdef

🪐 Deploy JupyterHub and Spark

Go the the kfdef folder

All scripts need to be run from the kfdef folder 📂

You can deploy JupyterHub with 2 different authentications system, use the file corresponding to your choice:

  • For the default DSRI authentication use kfctl_openshift_dsri.yaml

  • For GitHub authentication use kfctl_openshift_github.yaml

    • You need to create a new GitHub OAuth app: https://github.com/settings/developers

    • And provide the GitHub client ID and secret through environment variable before running the start script:

      export GITHUB_CLIENT_ID=YOUR_CLIENT_ID
      export GITHUB_CLIENT_SECRET=YOUR_CLIENT_SECRET

First you will need to change the namespace: in the file you want to deploy, to provide the project where you want to start JupyterHub (currently opendatahub-ids), then you can deploy JupyterHub and Spark with kfctl:

./start_odh.sh kfctl_openshift_dsri.yaml

🗄️ Persistent volumes are automatically created for each instance started in JupyterHub to insure persistence of the data even JupyterHub is stopped. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

⚡️ A Spark cluster with 3 workers is automatically created with the service name spark-cluster, you can use the URL of the master node to access it from your workspace: spark://spark-cluster:7077

✨ Use the Spark cluster

Matching Spark versions

Make sure all the Spark versions are matching, the current default version is 3.0.1

You can test the Spark cluster connection with PySpark:

from pyspark.sql import SparkSession, SQLContext
import os
import socket
# Create a Spark session
spark_cluster_url = "spark://spark-cluster:7077"
spark = SparkSession.builder.master(spark_cluster_url).getOrCreate()
sc = spark.sparkContext

# Test your Spark connection
spark.range(5, numPartitions=5).rdd.map(lambda x: socket.gethostname()).distinct().collect()
# Or try:
#x = ['spark', 'rdd', 'example', 'sample', 'example']
x = [1, 2, 3, 4, 5]
y = sc.parallelize(x)
y.collect()
# Or try:
data = [1, 2, 3, 4, 5]
distData = sc.parallelize(data)
distData.reduce(lambda a, b: a + b)

Match the version

Make sure all the Spark versions are matching, the current default version is 3.0.1:

  • Go to the Spark UI to verify the version of the Spark cluster
  • Run spark-shell --version to verify the version of the Spark binary installed in the workspace
  • Run pip list | grep pyspark to verify the version of the PySpark library

Check the JupyterLab workspace Dockerfile to change the version of Spark installed in the workspace, and see how you can download and install a new version of the Spark binary.

If you need to change the Python, Java or PySpark version in the workspace you can create a environment.yml file, for example for 2.4.5:

name: spark
channels:
- defaults
- conda-forge
- anaconda
dependencies:
- python=3.7
- openjdk=8
- ipykernel
- nb_conda_kernels
- pip
- pip:
- pyspark==2.4.5

Create the environment with conda:

mamba env create -f environment.yml

Spark UI

You can also create a route to access the Spark UI and monitor the activity on the Spark cluster:

oc expose svc/spark-cluster-ui

Get the Spark UI URL:

oc get route --selector radanalytics.io/service=ui --no-headers -o=custom-columns=HOST:.spec.host

New Spark cluster

You can create a new Spark cluster, for example here using Spark 3.0.1 with the installed Spark Operator:

cat <<EOF | oc apply -f -
apiVersion: radanalytics.io/v1
kind: SparkCluster
metadata:
name: spark-cluster
spec:
customImage: quay.io/radanalyticsio/openshift-spark:3.0.1-2
worker:
instances: '10'
memory: "4Gi"
cpu: 4
master:
instances: '1'
memory: "4Gi"
cpu: 4
env:
- name: SPARK_WORKER_CORES
value: 4
EOF

You can browse the list of available image versions here

See the Radanalytics Spark operator example configuration for more details on the Spark cluster configuration.

🗑️ Delete the deployment

Delete the running JupyterHub application and Spark cluster, including persistent volumes:

./delete_odh.sh kfctl_openshift_dsri.yaml
- +

JupyterHub with Spark

JupyterHub is ideal to enable multiple users easily start predefined workspaces in the same project. The complimentary Apache Spark cluster can be used from the workspaces to perform distributed processing.

🧊 Install kfctl

You will need to have the usual oc tool installed, and to install kfctl on your machine, a tool to deploy Kubeflow applications, download the latest version for your OS 📥️

You can then install it by downloading the binary and putting it in your path, for example on Linux:

wget https://github.com/kubeflow/kfctl/releases/download/v1.2.0/kfctl_v1.2.0-0-gbc038f9_linux.tar.gz
tar -xzf kfctl_v1.2.0-0-gbc038f9_linux.tar.gz
sudo mv kfctl /usr/local/bin/

Clone the repository with the DSRI custom images and deployments for the OpenDataHub platform, and go to the kfdef folder:

git clone https://github.com/MaastrichtU-IDS/odh-manifests
cd odh-manifests/kfdef

🪐 Deploy JupyterHub and Spark

Go the the kfdef folder

All scripts need to be run from the kfdef folder 📂

You can deploy JupyterHub with 2 different authentications system, use the file corresponding to your choice:

  • For the default DSRI authentication use kfctl_openshift_dsri.yaml

  • For GitHub authentication use kfctl_openshift_github.yaml

    • You need to create a new GitHub OAuth app: https://github.com/settings/developers

    • And provide the GitHub client ID and secret through environment variable before running the start script:

      export GITHUB_CLIENT_ID=YOUR_CLIENT_ID
      export GITHUB_CLIENT_SECRET=YOUR_CLIENT_SECRET

First you will need to change the namespace: in the file you want to deploy, to provide the project where you want to start JupyterHub (currently opendatahub-ids), then you can deploy JupyterHub and Spark with kfctl:

./start_odh.sh kfctl_openshift_dsri.yaml

🗄️ Persistent volumes are automatically created for each instance started in JupyterHub to insure persistence of the data even JupyterHub is stopped. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.

⚡️ A Spark cluster with 3 workers is automatically created with the service name spark-cluster, you can use the URL of the master node to access it from your workspace: spark://spark-cluster:7077

✨ Use the Spark cluster

Matching Spark versions

Make sure all the Spark versions are matching, the current default version is 3.0.1

You can test the Spark cluster connection with PySpark:

from pyspark.sql import SparkSession, SQLContext
import os
import socket
# Create a Spark session
spark_cluster_url = "spark://spark-cluster:7077"
spark = SparkSession.builder.master(spark_cluster_url).getOrCreate()
sc = spark.sparkContext

# Test your Spark connection
spark.range(5, numPartitions=5).rdd.map(lambda x: socket.gethostname()).distinct().collect()
# Or try:
#x = ['spark', 'rdd', 'example', 'sample', 'example']
x = [1, 2, 3, 4, 5]
y = sc.parallelize(x)
y.collect()
# Or try:
data = [1, 2, 3, 4, 5]
distData = sc.parallelize(data)
distData.reduce(lambda a, b: a + b)

Match the version

Make sure all the Spark versions are matching, the current default version is 3.0.1:

  • Go to the Spark UI to verify the version of the Spark cluster
  • Run spark-shell --version to verify the version of the Spark binary installed in the workspace
  • Run pip list | grep pyspark to verify the version of the PySpark library

Check the JupyterLab workspace Dockerfile to change the version of Spark installed in the workspace, and see how you can download and install a new version of the Spark binary.

If you need to change the Python, Java or PySpark version in the workspace you can create a environment.yml file, for example for 2.4.5:

name: spark
channels:
- defaults
- conda-forge
- anaconda
dependencies:
- python=3.7
- openjdk=8
- ipykernel
- nb_conda_kernels
- pip
- pip:
- pyspark==2.4.5

Create the environment with conda:

mamba env create -f environment.yml

Spark UI

You can also create a route to access the Spark UI and monitor the activity on the Spark cluster:

oc expose svc/spark-cluster-ui

Get the Spark UI URL:

oc get route --selector radanalytics.io/service=ui --no-headers -o=custom-columns=HOST:.spec.host

New Spark cluster

You can create a new Spark cluster, for example here using Spark 3.0.1 with the installed Spark Operator:

cat <<EOF | oc apply -f -
apiVersion: radanalytics.io/v1
kind: SparkCluster
metadata:
name: spark-cluster
spec:
customImage: quay.io/radanalyticsio/openshift-spark:3.0.1-2
worker:
instances: '10'
memory: "4Gi"
cpu: 4
master:
instances: '1'
memory: "4Gi"
cpu: 4
env:
- name: SPARK_WORKER_CORES
value: 4
EOF

You can browse the list of available image versions here

See the Radanalytics Spark operator example configuration for more details on the Spark cluster configuration.

🗑️ Delete the deployment

Delete the running JupyterHub application and Spark cluster, including persistent volumes:

./delete_odh.sh kfctl_openshift_dsri.yaml
+ \ No newline at end of file diff --git a/docs/jupyterhub-workspace/index.html b/docs/jupyterhub-workspace/index.html index 01cac5307..2d947e659 100644 --- a/docs/jupyterhub-workspace/index.html +++ b/docs/jupyterhub-workspace/index.html @@ -16,13 +16,13 @@ - +
-

JupyterHub workspace

🪐 Start your workspace

You can easily start a data science workspace with JupyterLab, VisualStudio Code and Conda pre-installed on the DSRI with JupyterHub:

  1. Connect to the UM VPN
  2. Go to https://jupyterhub-github.apps.dsri2.unimaas.nl
  3. Login with your GitHub account
  4. Choose the type of workspace, and the resources limitations
  5. Optionally you can provide additional parameters as environment variables:
    1. GIT_NAME and GIT_EMAIL: your name and email that will be used when committing with git
    2. GIT_URL: the URL of a git repository to be automatically cloned in the workspace, if there is a requirements.txt it will be automatically installed with pip

Once your workspace has started you can:

  • Use the persistent folder to put data that will be kept even when the server is stopped, or if you use a different type of workspace
  • Clone your code repository with git
  • Install packages with mamba/conda or pip
  • Go to the workspace overview: https://jupyterhub-github.apps.dsri2.unimaas.nl/hub/home to see your workspace, and stop it.
tip

Put all the commands you use to install the packages required to run your code in a file in the persistent folder (ideally in the git repository with your code), so you can easily reinstall your environment if your workspace is stopped.

📦️ Manage dependencies with Conda

In your workspace you can install new conda environments, if they include the packages nb_conda_kernels and ipykernel, then you will be able to easily start notebooks in those environments from the JupyterLab Launcher page.

Install a conda environment from a file with mamba (it is like conda but faster):

mamba env create -f environment.yml

You'll need to wait for 1 minute before the new conda environment becomes available on the JupyterLab Launcher page.

You can easily install an environment with a different version of Python if you need it. Here is an example of an environment.yml file to create an environment with Python 3.9, install the minimal dependencies required to easily starts notebooks in this environment with conda, and install a pip package:

name: py39
channels:
- defaults
- conda-forge
- anaconda
dependencies:
- python=3.9
- ipykernel
- nb_conda_kernels
- pip
- pip:
- matplotlib

🐙 Use git in JupyterLab

You can use git from the terminal.

You can also use the JupyterLab Git extension or the VisualStudio Code git integration to clone and manage your git repositories.

They will ask you for a username and personal access token if the repository is private, or the first time you want to push changes.

- +

JupyterHub workspace

🪐 Start your workspace

You can easily start a data science workspace with JupyterLab, VisualStudio Code and Conda pre-installed on the DSRI with JupyterHub:

  1. Connect to the UM VPN
  2. Go to https://jupyterhub-github.apps.dsri2.unimaas.nl
  3. Login with your GitHub account
  4. Choose the type of workspace, and the resources limitations
  5. Optionally you can provide additional parameters as environment variables:
    1. GIT_NAME and GIT_EMAIL: your name and email that will be used when committing with git
    2. GIT_URL: the URL of a git repository to be automatically cloned in the workspace, if there is a requirements.txt it will be automatically installed with pip

Once your workspace has started you can:

  • Use the persistent folder to put data that will be kept even when the server is stopped, or if you use a different type of workspace
  • Clone your code repository with git
  • Install packages with mamba/conda or pip
  • Go to the workspace overview: https://jupyterhub-github.apps.dsri2.unimaas.nl/hub/home to see your workspace, and stop it.
tip

Put all the commands you use to install the packages required to run your code in a file in the persistent folder (ideally in the git repository with your code), so you can easily reinstall your environment if your workspace is stopped.

📦️ Manage dependencies with Conda

In your workspace you can install new conda environments, if they include the packages nb_conda_kernels and ipykernel, then you will be able to easily start notebooks in those environments from the JupyterLab Launcher page.

Install a conda environment from a file with mamba (it is like conda but faster):

mamba env create -f environment.yml

You'll need to wait for 1 minute before the new conda environment becomes available on the JupyterLab Launcher page.

You can easily install an environment with a different version of Python if you need it. Here is an example of an environment.yml file to create an environment with Python 3.9, install the minimal dependencies required to easily starts notebooks in this environment with conda, and install a pip package:

name: py39
channels:
- defaults
- conda-forge
- anaconda
dependencies:
- python=3.9
- ipykernel
- nb_conda_kernels
- pip
- pip:
- matplotlib

🐙 Use git in JupyterLab

You can use git from the terminal.

You can also use the JupyterLab Git extension or the VisualStudio Code git integration to clone and manage your git repositories.

They will ask you for a username and personal access token if the repository is private, or the first time you want to push changes.

+ \ No newline at end of file diff --git a/docs/login-docker-registry/index.html b/docs/login-docker-registry/index.html index b43cda243..051910c25 100644 --- a/docs/login-docker-registry/index.html +++ b/docs/login-docker-registry/index.html @@ -16,13 +16,13 @@ - +
-

Login to Docker registries

Login to an external container registry can be helpful to pull private images, or increase the DockerHub pull limitations.

You will need to create a secret in your project, then link it to the default service account of this project.

We detail here the process for UM Container registry, GitHub Container Registry and Docker Hub, but the process is similar for any other container registry (e.g. quay.io)

UM Container registry

Access

You need to be connected to the UM network to access this container registry.

This container registry is available at UM Container registry. Here you can login using your UM credentials by clicking on the "Login via OIDC provider"

Harbor_login_page
Public Projects

You don't need to follow the steps below if you are using one of the Public projects. These are available without credentials.

Logging in with Docker CLI

  1. Go to UM Container registry, click on your username in the top right corner followed by clicking on User Profile. Click on the Copy icon.
  2. Login with your credentials:
docker login cr.icts.unimaas.nl

(Username)
(Copied in Step 1)

Using a Proxy Cache

  1. Go to UM Container registry, look for a project of type Proxy Cache. For each of the mayor registries we created a Proxy Cache. Remember the project name, for example dockerhub.
  2. On the DSRI you can deploy an image like in this example:
Harbor_proxy_cache
Docker CLI

The same concept can be applied using the docker CLI

docker pull cr.icts.unimaas.nl/dockerhub/ubuntu:22.04

Creating your own project

  1. Go to UM Container registry, click on + NEW PROJECT. Fill in the details of project name and Access Level (preferred method is to leave the checkbox unchecked).

  2. Click OK

Using your own user

  1. Go to UM Container registry, click on your username in the top right corner followed by clicking on User Profile. Click on the Copy icon.

  2. Create a secret to login to UM Harbor Container Registry in your project:

oc create secret docker-registry um-harbor-secret --docker-server=cr.icts.unimaas.nl --docker-username=<UM username> --docker-password=<copied in step 1>
  1. Link the login secret to the default service account:
oc secrets link default um-harbor-secret --for=pull

Using a robot account

  1. Go to UM Container registry, click on your project if you already created one.

  2. Click on the tab Robot Accounts

  3. Click on New Robot Account

  4. Create the Robot account to your liking

  5. Copy the secret or export it

  6. Create a secret to login to UM Harbor Container Registry in your project:

oc create secret docker-registry um-harbor-secret --docker-server=cr.icts.unimaas.nl --docker-username=<robot account name> --docker-password=<copied or exported in step 5>
  1. Link the login secret to the default service account:
oc secrets link default um-harbor-secret --for=pull

GitHub Container Registry

  1. Go to GitHub Settings, and create a Personal Access Token (PAT) which will be used as password to connect to the GitHub Container Registry

  2. Create a secret to login to GitHub Container Registry in your project:

oc create secret docker-registry github-ghcr-secret --docker-server=ghcr.io --docker-username=<github-username> --docker-password=<github-personal-access-token> --docker-email=<email-address>
  1. Link the login secret to the default service account:
oc secrets link default github-ghcr-secret --for=pull

DockerHub

Increase DockerHub limitations

Login with DockerHub also increase the DockerHub limitations to pull images in your project

  1. Create a secret to login to DockerHub in your project:
oc create secret docker-registry dockerhub-secret --docker-server=docker.io --docker-username=<dockerhub-username> --docker-password=<dockerhub-password> --docker-email=<email-address>
  1. Link the login secret to the default service account:
oc secrets link default dockerhub-secret --for=pull
- +

Login to Docker registries

Login to an external container registry can be helpful to pull private images, or increase the DockerHub pull limitations.

You will need to create a secret in your project, then link it to the default service account of this project.

We detail here the process for UM Container registry, GitHub Container Registry and Docker Hub, but the process is similar for any other container registry (e.g. quay.io)

UM Container registry

Access

You need to be connected to the UM network to access this container registry.

This container registry is available at UM Container registry. Here you can login using your UM credentials by clicking on the "Login via OIDC provider"

Harbor_login_page
Public Projects

You don't need to follow the steps below if you are using one of the Public projects. These are available without credentials.

Logging in with Docker CLI

  1. Go to UM Container registry, click on your username in the top right corner followed by clicking on User Profile. Click on the Copy icon.
  2. Login with your credentials:
docker login cr.icts.unimaas.nl

(Username)
(Copied in Step 1)

Using a Proxy Cache

  1. Go to UM Container registry, look for a project of type Proxy Cache. For each of the mayor registries we created a Proxy Cache. Remember the project name, for example dockerhub.
  2. On the DSRI you can deploy an image like in this example:
Harbor_proxy_cache
Docker CLI

The same concept can be applied using the docker CLI

docker pull cr.icts.unimaas.nl/dockerhub/ubuntu:22.04

Creating your own project

  1. Go to UM Container registry, click on + NEW PROJECT. Fill in the details of project name and Access Level (preferred method is to leave the checkbox unchecked).

  2. Click OK

Using your own user

  1. Go to UM Container registry, click on your username in the top right corner followed by clicking on User Profile. Click on the Copy icon.

  2. Create a secret to login to UM Harbor Container Registry in your project:

oc create secret docker-registry um-harbor-secret --docker-server=cr.icts.unimaas.nl --docker-username=<UM username> --docker-password=<copied in step 1>
  1. Link the login secret to the default service account:
oc secrets link default um-harbor-secret --for=pull

Using a robot account

  1. Go to UM Container registry, click on your project if you already created one.

  2. Click on the tab Robot Accounts

  3. Click on New Robot Account

  4. Create the Robot account to your liking

  5. Copy the secret or export it

  6. Create a secret to login to UM Harbor Container Registry in your project:

oc create secret docker-registry um-harbor-secret --docker-server=cr.icts.unimaas.nl --docker-username=<robot account name> --docker-password=<copied or exported in step 5>
  1. Link the login secret to the default service account:
oc secrets link default um-harbor-secret --for=pull

GitHub Container Registry

  1. Go to GitHub Settings, and create a Personal Access Token (PAT) which will be used as password to connect to the GitHub Container Registry

  2. Create a secret to login to GitHub Container Registry in your project:

oc create secret docker-registry github-ghcr-secret --docker-server=ghcr.io --docker-username=<github-username> --docker-password=<github-personal-access-token> --docker-email=<email-address>
  1. Link the login secret to the default service account:
oc secrets link default github-ghcr-secret --for=pull

DockerHub

Increase DockerHub limitations

Login with DockerHub also increase the DockerHub limitations to pull images in your project

  1. Create a secret to login to DockerHub in your project:
oc create secret docker-registry dockerhub-secret --docker-server=docker.io --docker-username=<dockerhub-username> --docker-password=<dockerhub-password> --docker-email=<email-address>
  1. Link the login secret to the default service account:
oc secrets link default dockerhub-secret --for=pull
+ \ No newline at end of file diff --git a/docs/mpi-jobs/index.html b/docs/mpi-jobs/index.html index c299b958f..afaa90c37 100644 --- a/docs/mpi-jobs/index.html +++ b/docs/mpi-jobs/index.html @@ -16,13 +16,13 @@ - +
-

Run MPI jobs

We deployed the MPI Operator from Kubeflow to run MPI jobs on the DSRI.

The MPI Operator makes it easy to run allreduce-style distributed training on Kubernetes. Please check out this blog post for an introduction to MPI Operator and its industry adoption.

Run MPI jobs on CPU

Checkout the repository of the CPU benchmark for a complete example of an MPI job: python script, Dockerfile, and the job deployment YAML.

  1. Clone the repository, and go to the example folder:
git clone https://github.com/kubeflow/mpi-operator.git
cd mpi-operator/examples/horovod
  1. Open the tensorflow-mnist.yaml file, and fix the apiVersion on the first line:
# From
apiVersion: kubeflow.org/v1
# To
apiVersion: kubeflow.org/v1alpha2

You will also need to specify those containers can run with the root user by adding the serviceAccountName between spec: and container: for the launcher and the worker templates:

      template:
spec:
serviceAccountName: anyuid
containers:
- image: docker.io/kubeflow/mpi-horovod-mnist

Your tensorflow-mnist.yaml file should look like this:

apiVersion: kubeflow.org/v1alpha2
kind: MPIJob
metadata:
name: tensorflow-mnist
spec:
slotsPerWorker: 1
cleanPodPolicy: Running
mpiReplicaSpecs:
Launcher:
replicas: 1
template:
spec:
serviceAccountName: anyuid
containers:
- image: docker.io/kubeflow/mpi-horovod-mnist
name: mpi-launcher
command:
- mpirun
args:
- -np
- "2"
- --allow-run-as-root
- -bind-to
- none
- -map-by
- slot
- -x
- LD_LIBRARY_PATH
- -x
- PATH
- -mca
- pml
- ob1
- -mca
- btl
- ^openib
- python
- /examples/tensorflow_mnist.py
resources:
limits:
cpu: 1
memory: 2Gi
Worker:
replicas: 2
template:
spec:
serviceAccountName: anyuid
containers:
- image: docker.io/kubeflow/mpi-horovod-mnist
name: mpi-worker
resources:
limits:
cpu: 2
memory: 4Gi

  1. Once this has been set, create the job in your current project on the DSRI (change with oc project my-project):
oc create -f tensorflow-mnist.yaml

You should see the 2 workers and the main job running in your project Topology page in the DSRI web UI. You can then easily check the logs of the launcher and workers.

To run your own MPI job on the DSRI, you can take a look at, and edit, the different files provided by the MPI Operator example:

🐍 tensorflow_mnist.py: the python script with the actual job to run

🐳 Dockerfile.cpu: the Dockerfile to define the image of the containers in which your job will run (install dependencies)

⛵️ tensorflow-mnist.yaml: the YAML file to define the MPI deployment on Kubernetes (number and limits of workers, mpirun command, etc)

Visit the Kubeflow documentation to create a MPI job for more details.

Contact us

Feel free to contact us on the DSRI Slack #helpdesk channel to discuss the use of MPI on the DSRI.

- +

Run MPI jobs

We deployed the MPI Operator from Kubeflow to run MPI jobs on the DSRI.

The MPI Operator makes it easy to run allreduce-style distributed training on Kubernetes. Please check out this blog post for an introduction to MPI Operator and its industry adoption.

Run MPI jobs on CPU

Checkout the repository of the CPU benchmark for a complete example of an MPI job: python script, Dockerfile, and the job deployment YAML.

  1. Clone the repository, and go to the example folder:
git clone https://github.com/kubeflow/mpi-operator.git
cd mpi-operator/examples/horovod
  1. Open the tensorflow-mnist.yaml file, and fix the apiVersion on the first line:
# From
apiVersion: kubeflow.org/v1
# To
apiVersion: kubeflow.org/v1alpha2

You will also need to specify those containers can run with the root user by adding the serviceAccountName between spec: and container: for the launcher and the worker templates:

      template:
spec:
serviceAccountName: anyuid
containers:
- image: docker.io/kubeflow/mpi-horovod-mnist

Your tensorflow-mnist.yaml file should look like this:

apiVersion: kubeflow.org/v1alpha2
kind: MPIJob
metadata:
name: tensorflow-mnist
spec:
slotsPerWorker: 1
cleanPodPolicy: Running
mpiReplicaSpecs:
Launcher:
replicas: 1
template:
spec:
serviceAccountName: anyuid
containers:
- image: docker.io/kubeflow/mpi-horovod-mnist
name: mpi-launcher
command:
- mpirun
args:
- -np
- "2"
- --allow-run-as-root
- -bind-to
- none
- -map-by
- slot
- -x
- LD_LIBRARY_PATH
- -x
- PATH
- -mca
- pml
- ob1
- -mca
- btl
- ^openib
- python
- /examples/tensorflow_mnist.py
resources:
limits:
cpu: 1
memory: 2Gi
Worker:
replicas: 2
template:
spec:
serviceAccountName: anyuid
containers:
- image: docker.io/kubeflow/mpi-horovod-mnist
name: mpi-worker
resources:
limits:
cpu: 2
memory: 4Gi

  1. Once this has been set, create the job in your current project on the DSRI (change with oc project my-project):
oc create -f tensorflow-mnist.yaml

You should see the 2 workers and the main job running in your project Topology page in the DSRI web UI. You can then easily check the logs of the launcher and workers.

To run your own MPI job on the DSRI, you can take a look at, and edit, the different files provided by the MPI Operator example:

🐍 tensorflow_mnist.py: the python script with the actual job to run

🐳 Dockerfile.cpu: the Dockerfile to define the image of the containers in which your job will run (install dependencies)

⛵️ tensorflow-mnist.yaml: the YAML file to define the MPI deployment on Kubernetes (number and limits of workers, mpirun command, etc)

Visit the Kubeflow documentation to create a MPI job for more details.

Contact us

Feel free to contact us on the DSRI Slack #helpdesk channel to discuss the use of MPI on the DSRI.

+ \ No newline at end of file diff --git a/docs/neuroscience/index.html b/docs/neuroscience/index.html index 610257bc0..285acd975 100644 --- a/docs/neuroscience/index.html +++ b/docs/neuroscience/index.html @@ -16,13 +16,13 @@ - +
-

Neuroscience research

Feedbacks welcome

We are not expert in Neuroscience ourselves, please contact us if you see any improvements that could be made to this page, or if you need any help to get it working.

The Neurodocker project helps you to create a Docker image with the Neuroscience softwares you need, such as FSL, FreeSurfer, AFNI or ANTs.

Checkout the Neurodocker documentation for more details: https://github.com/ReproNim/neurodocker

Examples

In this page we will show you how to generate a Docker image with popular Neuroscience research softwares installed such as FreeSurfer and FSL. Feel free to check the Neurodocker documentation, and adapt the installation process to your needs.

JupyterLab with FreeSurfer

Start a JupyterLab container with Freesurfer pre-installed providing admin (sudo) privileges to install anything you need from the terminal (e.g. pip or apt packages)

When instantiating the template you can provide a few parameters similar to the standard JupyterLab, such as:

  • Password to access the notebook
  • Optionally you can provide a git repository to be automatically cloned in the JupyterLab (if there is a requirements.txt packages will be automatically installed with pip)
  • Docker image to use for the notebook (see below for more details on customizing the docker image)
  • Your git username and email to automatically configure git

The DSRI will automatically create a persistent volume to store data you will put in the /home/jovyan/work folder (the folder used by the notebook interface). You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims

Deploy Freesurfer

You can also link your git repository to the project for automatic deployment see using git in JupyterLab

This can also be deployed using Helm from the terminal, the steps are:

helm repo add dsri https://maastrichtu-ids.github.io/dsri-helm-charts/
helm repo update
helm install freesurfer dsri/jupyterlab \
--set serviceAccount.name=anyuid \
--set openshiftRoute.enabled=true \
--set image.repository=ghcr.io/maastrichtu-ids/jupyterlab \
--set image.tag=freesurfer \
--set storage.mountPath=/root \
--set password=changeme
oc get route --selector app.kubernetes.io/instance=freesurfer --no-headers -o=custom-columns=HOST:.spec.host

Log in to the corresponding jupyter notebook and start the terminal, then enter freesurfer as a command

FreeSurfer and FSL

Generate a Dockerfile with:

  • FreeSurfer 6.0.1
  • FSL 6.0.3
docker run --rm repronim/neurodocker:0.7.0 generate docker \
--base debian:stretch --pkg-manager apt \
--freesurfer version=6.0.1 --fsl version=6.0.3 > Dockerfile

FreeSurfer and AFNI

Generate a Dockerfile with:

  • FreeSurfer 6.0.1
  • AFNI, R and Python3
docker run --rm repronim/neurodocker:0.7.0 generate docker \
--base debian:stretch --pkg-manager apt \
--afni version=latest install_r=true install_r_pkgs=true install_python3=true \
--freesurfer version=6.0.1 > Dockerfile

Deploy the generated Dockerfile

Before deploying the Dockerfile to the DSRI you can open it, and add commands to install additional package you are interested in, such as nighres or nipype.

Checkout the documentation to deploy the Dockerfile on DSRI.

UI with VNC

Running a UI with VNC (e.g. FSLeyes) is still a work in progress. See this issue for more details.

Use the GPUs

More details about using GPU with FSL: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/GPU

- +

Neuroscience research

Feedbacks welcome

We are not expert in Neuroscience ourselves, please contact us if you see any improvements that could be made to this page, or if you need any help to get it working.

The Neurodocker project helps you to create a Docker image with the Neuroscience softwares you need, such as FSL, FreeSurfer, AFNI or ANTs.

Checkout the Neurodocker documentation for more details: https://github.com/ReproNim/neurodocker

Examples

In this page we will show you how to generate a Docker image with popular Neuroscience research softwares installed such as FreeSurfer and FSL. Feel free to check the Neurodocker documentation, and adapt the installation process to your needs.

JupyterLab with FreeSurfer

Start a JupyterLab container with Freesurfer pre-installed providing admin (sudo) privileges to install anything you need from the terminal (e.g. pip or apt packages)

When instantiating the template you can provide a few parameters similar to the standard JupyterLab, such as:

  • Password to access the notebook
  • Optionally you can provide a git repository to be automatically cloned in the JupyterLab (if there is a requirements.txt packages will be automatically installed with pip)
  • Docker image to use for the notebook (see below for more details on customizing the docker image)
  • Your git username and email to automatically configure git

The DSRI will automatically create a persistent volume to store data you will put in the /home/jovyan/work folder (the folder used by the notebook interface). You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims

Deploy Freesurfer

You can also link your git repository to the project for automatic deployment see using git in JupyterLab

This can also be deployed using Helm from the terminal, the steps are:

helm repo add dsri https://maastrichtu-ids.github.io/dsri-helm-charts/
helm repo update
helm install freesurfer dsri/jupyterlab \
--set serviceAccount.name=anyuid \
--set openshiftRoute.enabled=true \
--set image.repository=ghcr.io/maastrichtu-ids/jupyterlab \
--set image.tag=freesurfer \
--set storage.mountPath=/root \
--set password=changeme
oc get route --selector app.kubernetes.io/instance=freesurfer --no-headers -o=custom-columns=HOST:.spec.host

Log in to the corresponding jupyter notebook and start the terminal, then enter freesurfer as a command

FreeSurfer and FSL

Generate a Dockerfile with:

  • FreeSurfer 6.0.1
  • FSL 6.0.3
docker run --rm repronim/neurodocker:0.7.0 generate docker \
--base debian:stretch --pkg-manager apt \
--freesurfer version=6.0.1 --fsl version=6.0.3 > Dockerfile

FreeSurfer and AFNI

Generate a Dockerfile with:

  • FreeSurfer 6.0.1
  • AFNI, R and Python3
docker run --rm repronim/neurodocker:0.7.0 generate docker \
--base debian:stretch --pkg-manager apt \
--afni version=latest install_r=true install_r_pkgs=true install_python3=true \
--freesurfer version=6.0.1 > Dockerfile

Deploy the generated Dockerfile

Before deploying the Dockerfile to the DSRI you can open it, and add commands to install additional package you are interested in, such as nighres or nipype.

Checkout the documentation to deploy the Dockerfile on DSRI.

UI with VNC

Running a UI with VNC (e.g. FSLeyes) is still a work in progress. See this issue for more details.

Use the GPUs

More details about using GPU with FSL: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/GPU

+ \ No newline at end of file diff --git a/docs/openshift-commands/index.html b/docs/openshift-commands/index.html index c1d0907fe..1d611594d 100644 --- a/docs/openshift-commands/index.html +++ b/docs/openshift-commands/index.html @@ -16,13 +16,13 @@ - +
-

Command Line Interface

Overview

Here is an overview of common oc commands:

CommandDescription
oc login --token=<token>Login to the DSRI OpenShift cluster in your terminal
oc get projectsList all available projects
oc project <project> Switch to project
oc get pods Get running pods (a pod can run one or multiple containers for your application)
oc rsh <pod_name> <command>Remote terminal connexion to a pod (Shell/Bash)
oc cp <from> <to>Copy files from host to container or vice versa, e.g. from host: oc cp <local dir> <pod>:<pod_dir> or from to host: oc cp <pod>:<pod_dir> <local dir>
oc rsync <from> <to>Similar to rsync command on Linux to synchronize directories between container and host or the other way around
oc exec <pod_id> <folder_path>Execute command in pods
oc delete pod <pod_id>Delete pod

Projects

List projects

oc projects

Connect to project

oc project my-project

ImageStreams

To update an ImageStream in your project to pull the latest update from the external repository (e.g. from ghcr.io or DockerHub):

oc import-image <imagestream-id>

Pods

Create pod from YAML

oc create -f my-pod.yaml

E.g. d2s-pod-virtuoso.yaml.

List pods

oc get pod

List running pods:

oc get pods --field-selector=status.phase=Running

Get specific pod

oc get pod | grep <pod_id>

Using selector with Apache Flink as example, and showing only the pod id without header:

oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name

Remote Shell connection

Connect to a pod with Bash.

oc rsh <pod_id>

Execute command in pod

Example creating a folder:

oc exec <pod_id> -- mkdir -p /mnt/workspace/resources

Delete pod

oc delete pod <pod_id>
Force pod deletion

If the pod is not properly deleted, you can force its deletion:

oc delete pod --force --grace-period=0 <pod_id>

Get pod logs

oc logs -f <pod_id>
Debug a pod

Get more details on how to debug a pod.

Create app from template

Create app from template using the CLI and providing parameters as arguments:

oc new-app my-template -p APPLICATION_NAME=my-app -p ADMIN_PASSWORD=mypassword

Example for the Semantic Web course notebooks:

oc new-app template-jupyterstack-notebook -p APPLICATION_NAME=swcourseName -p NOTEBOOK_PASSWORD=PASSWORD

oc delete all --selector template=template-jupyterstack-notebook

Copy files

See the Load data page.

- +

Command Line Interface

Overview

Here is an overview of common oc commands:

CommandDescription
oc login --token=<token>Login to the DSRI OpenShift cluster in your terminal
oc get projectsList all available projects
oc project <project> Switch to project
oc get pods Get running pods (a pod can run one or multiple containers for your application)
oc rsh <pod_name> <command>Remote terminal connexion to a pod (Shell/Bash)
oc cp <from> <to>Copy files from host to container or vice versa, e.g. from host: oc cp <local dir> <pod>:<pod_dir> or from to host: oc cp <pod>:<pod_dir> <local dir>
oc rsync <from> <to>Similar to rsync command on Linux to synchronize directories between container and host or the other way around
oc exec <pod_id> <folder_path>Execute command in pods
oc delete pod <pod_id>Delete pod

Projects

List projects

oc projects

Connect to project

oc project my-project

ImageStreams

To update an ImageStream in your project to pull the latest update from the external repository (e.g. from ghcr.io or DockerHub):

oc import-image <imagestream-id>

Pods

Create pod from YAML

oc create -f my-pod.yaml

E.g. d2s-pod-virtuoso.yaml.

List pods

oc get pod

List running pods:

oc get pods --field-selector=status.phase=Running

Get specific pod

oc get pod | grep <pod_id>

Using selector with Apache Flink as example, and showing only the pod id without header:

oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name

Remote Shell connection

Connect to a pod with Bash.

oc rsh <pod_id>

Execute command in pod

Example creating a folder:

oc exec <pod_id> -- mkdir -p /mnt/workspace/resources

Delete pod

oc delete pod <pod_id>
Force pod deletion

If the pod is not properly deleted, you can force its deletion:

oc delete pod --force --grace-period=0 <pod_id>

Get pod logs

oc logs -f <pod_id>
Debug a pod

Get more details on how to debug a pod.

Create app from template

Create app from template using the CLI and providing parameters as arguments:

oc new-app my-template -p APPLICATION_NAME=my-app -p ADMIN_PASSWORD=mypassword

Example for the Semantic Web course notebooks:

oc new-app template-jupyterstack-notebook -p APPLICATION_NAME=swcourseName -p NOTEBOOK_PASSWORD=PASSWORD

oc delete all --selector template=template-jupyterstack-notebook

Copy files

See the Load data page.

+ \ No newline at end of file diff --git a/docs/openshift-delete-objects/index.html b/docs/openshift-delete-objects/index.html index 05fa90e04..0559e3cc4 100644 --- a/docs/openshift-delete-objects/index.html +++ b/docs/openshift-delete-objects/index.html @@ -16,13 +16,13 @@ - +
-

Delete objects (advanced)

Be careful

This documentation provide guidelines to delete various types of objects in the OpenShift DSRI. Be careful when you are deleting object in your project, as it could be an object required to run an application.

It is recommend to use the oc tool to delete OpenShift objects, as it will allow to properly delete all objects related to specific deployments.

Project

Make sure you are connected to the right project:

oc project my-project

Delete an application

The best way to make sure all objects related to your application have been deleted is to use the command line providing your application name.

Different selectors can be used to easily delete all objects generated by an application deployment. 2 selectors can easily be found in the template configuration:

  • app : the name you gave when creating your application
  • template : the name of the template you used to create the application. Use it only if you want to delete all applications created by a specific template.
oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=my-application

Delete storage if necessary from the OpenShift web UI.

Force deletion

You can force the deletion if the objects are not deleting properly:

oc delete all,secret,configmaps,serviceaccount,rolebinding --force --grace-period=0 --selector app=my-application

Delete pod

Get the ID of the specific pod you want to delete:

oc get pod

Use the pod ID retrieved to delete the pod:

oc delete pod <POD_ID>
Force deletion

If the pod is not properly deleted, you can force its deletion:

oc delete pod --force --grace-period=0 <POD_ID>

Delete a project

Be careful

All objects and persistent storages in this project will be deleted and cannot be retrieved.

  1. To properly delete a project you need to first delete all objects in this project:
oc delete all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinstance --all -n <PROJECT_ID>
  1. Then delete the project:
oc delete project <PROJECT_ID>

Delete persistent storage

Be careful

All data stored in this persistent storage will be lost and cannot be retrieved.

oc delete pvc storage-name

Fix stuck deletions

Stuck provisioned service

If a provisioned service is stuck on Marked for deletion you might need to set finalizers to null in the YAML.

This can be done using the OpenShift web UI:

  • Go to the Provisionned Service in the OpenShift UI overview

  • Click on Edit YAML

  • Remove the finalizers:

      finalizers:
    - kubernetes-incubator/service-catalog

You can also do it using the oc CLI:

oc get serviceinstance

# Delete problematic line from serviceinstance to delete them
oc get serviceinstance -o yaml | grep Terminating | sed "/kubernetes-incubator/d"| oc apply -f -
No global catalog

The OpenShift Catalog does not handle deploying templates globally properly (on all projects). If a template is deployed globally, OpenShift will try to create unnecessary objects such as provisioned service (aka. ServiceInstance), or ClusterClasses. Those services are not used, and some of them cannot be deleted easily.

Catalog per project

At the moment it is more reliable to create the template in directly in your project if you need to use it multiple time.

Delete stuck project

Project can get stuck as marked for deletion. Usually due to Objects still present in the project that are not terminated or finalizers left in the some objects YAML file.

The following commands will allow you to clean up all the projects stuck in terminating state you have access to .

Force deletion of terminating projects:

for i in $(oc get projects  | grep Terminating| awk '{print $1}'); do echo $i; oc delete project --force --grace-period=0 $i ; done

Delete all objects in terminating projects:

for i in $(oc get projects  | grep Terminating| awk '{print $1}'); do echo $i; oc delete all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinstance --force --grace-period=0 --all -n $i ; done

Remove Kubernetes finalizers from terminating projects:

for i in $(oc get projects  | grep Terminating| awk '{print $1}'); do echo $i; oc get project $i -o yaml | sed "/kubernetes/d" | sed "/finalizers:/d" | oc apply -f - ; done
Fix deletion

If ServiceInstances refuses to get deleted, try to remove kubernetes finalizers:

for i in $(oc get projects  | grep Terminating| awk '{print $1}'); do echo $i; oc get serviceinstance -n $i -o yaml | sed "/kubernetes-incubator/d"| oc apply -f - ; done
Check deletion

Check if there are still objects in a project:

oc get all,configmap,pvc,serviceaccount,secret,rolebinding,serviceinstance
- +

Delete objects (advanced)

Be careful

This documentation provide guidelines to delete various types of objects in the OpenShift DSRI. Be careful when you are deleting object in your project, as it could be an object required to run an application.

It is recommend to use the oc tool to delete OpenShift objects, as it will allow to properly delete all objects related to specific deployments.

Project

Make sure you are connected to the right project:

oc project my-project

Delete an application

The best way to make sure all objects related to your application have been deleted is to use the command line providing your application name.

Different selectors can be used to easily delete all objects generated by an application deployment. 2 selectors can easily be found in the template configuration:

  • app : the name you gave when creating your application
  • template : the name of the template you used to create the application. Use it only if you want to delete all applications created by a specific template.
oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=my-application

Delete storage if necessary from the OpenShift web UI.

Force deletion

You can force the deletion if the objects are not deleting properly:

oc delete all,secret,configmaps,serviceaccount,rolebinding --force --grace-period=0 --selector app=my-application

Delete pod

Get the ID of the specific pod you want to delete:

oc get pod

Use the pod ID retrieved to delete the pod:

oc delete pod <POD_ID>
Force deletion

If the pod is not properly deleted, you can force its deletion:

oc delete pod --force --grace-period=0 <POD_ID>

Delete a project

Be careful

All objects and persistent storages in this project will be deleted and cannot be retrieved.

  1. To properly delete a project you need to first delete all objects in this project:
oc delete all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinstance --all -n <PROJECT_ID>
  1. Then delete the project:
oc delete project <PROJECT_ID>

Delete persistent storage

Be careful

All data stored in this persistent storage will be lost and cannot be retrieved.

oc delete pvc storage-name

Fix stuck deletions

Stuck provisioned service

If a provisioned service is stuck on Marked for deletion you might need to set finalizers to null in the YAML.

This can be done using the OpenShift web UI:

  • Go to the Provisionned Service in the OpenShift UI overview

  • Click on Edit YAML

  • Remove the finalizers:

      finalizers:
    - kubernetes-incubator/service-catalog

You can also do it using the oc CLI:

oc get serviceinstance

# Delete problematic line from serviceinstance to delete them
oc get serviceinstance -o yaml | grep Terminating | sed "/kubernetes-incubator/d"| oc apply -f -
No global catalog

The OpenShift Catalog does not handle deploying templates globally properly (on all projects). If a template is deployed globally, OpenShift will try to create unnecessary objects such as provisioned service (aka. ServiceInstance), or ClusterClasses. Those services are not used, and some of them cannot be deleted easily.

Catalog per project

At the moment it is more reliable to create the template in directly in your project if you need to use it multiple time.

Delete stuck project

Project can get stuck as marked for deletion. Usually due to Objects still present in the project that are not terminated or finalizers left in the some objects YAML file.

The following commands will allow you to clean up all the projects stuck in terminating state you have access to .

Force deletion of terminating projects:

for i in $(oc get projects  | grep Terminating| awk '{print $1}'); do echo $i; oc delete project --force --grace-period=0 $i ; done

Delete all objects in terminating projects:

for i in $(oc get projects  | grep Terminating| awk '{print $1}'); do echo $i; oc delete all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinstance --force --grace-period=0 --all -n $i ; done

Remove Kubernetes finalizers from terminating projects:

for i in $(oc get projects  | grep Terminating| awk '{print $1}'); do echo $i; oc get project $i -o yaml | sed "/kubernetes/d" | sed "/finalizers:/d" | oc apply -f - ; done
Fix deletion

If ServiceInstances refuses to get deleted, try to remove kubernetes finalizers:

for i in $(oc get projects  | grep Terminating| awk '{print $1}'); do echo $i; oc get serviceinstance -n $i -o yaml | sed "/kubernetes-incubator/d"| oc apply -f - ; done
Check deletion

Check if there are still objects in a project:

oc get all,configmap,pvc,serviceaccount,secret,rolebinding,serviceinstance
+ \ No newline at end of file diff --git a/docs/openshift-delete-services/index.html b/docs/openshift-delete-services/index.html index fcae1674b..78974f1bd 100644 --- a/docs/openshift-delete-services/index.html +++ b/docs/openshift-delete-services/index.html @@ -16,13 +16,13 @@ - +
-

Delete an application

It is recommend to use the oc tool to delete an application, as it will allow to properly delete all objects related to the application deployment.

Project

Make sure you are connected to the right project:

oc project my-project

From the terminal

The best way to make sure all objects related to your application have been deleted is to use the command line providing your application name:

oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=my-application
Force deletion

You can force the deletion if the objects are not deleting properly:

oc delete all,secret,configmaps,serviceaccount,rolebinding --force --grace-period=0 --selector app=my-application

From the web UI

We recommend to use the oc CLI to easily delete an application. But in the case you cannot install oc on your computer you can delete the different objects created by the application (easy to find in the Topology page):

  1. Delete the Route
  2. Delete the Service
  3. Delete the Deployment Config
Delete application from the web UI
- +

Delete an application

It is recommend to use the oc tool to delete an application, as it will allow to properly delete all objects related to the application deployment.

Project

Make sure you are connected to the right project:

oc project my-project

From the terminal

The best way to make sure all objects related to your application have been deleted is to use the command line providing your application name:

oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=my-application
Force deletion

You can force the deletion if the objects are not deleting properly:

oc delete all,secret,configmaps,serviceaccount,rolebinding --force --grace-period=0 --selector app=my-application

From the web UI

We recommend to use the oc CLI to easily delete an application. But in the case you cannot install oc on your computer you can delete the different objects created by the application (easy to find in the Topology page):

  1. Delete the Route
  2. Delete the Service
  3. Delete the Deployment Config
Delete application from the web UI
+ \ No newline at end of file diff --git a/docs/openshift-install/index.html b/docs/openshift-install/index.html index 5f481a0f5..057cd6ea0 100644 --- a/docs/openshift-install/index.html +++ b/docs/openshift-install/index.html @@ -16,13 +16,13 @@ - +
-

Install the client

Install the OpenShift Command Line Interface (CLI): oc to access the DSRI from your computer's terminal.

The oc CLI enables to perform operations on your applications deployed on the DSRI, such as:

  • Copy large files to or from the DSRI using oc cp
  • Connect to an application terminal using oc rsh
  • Get the applications running in your project with oc get pods

Install the oc client

On Linux

Download the oc and kubectl Command Line Interface clients:

wget https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz && tar xvf oc.tar.gz
sudo mv oc kubectl /usr/local/bin/

On Mac

Use brew:

brew install openshift-cli

Or manually download the program and add it to your path:

  1. Download https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/macosx/oc.tar.gz

  2. Unzip the archive

  3. Move the oc binary to a directory on your PATH.

    To check your PATH, open a terminal and execute the following command:

    echo $PATH

On Windows

  1. Create a folder for OpenShift in Program Files: C:\Program Files (x86)\OpenShift
  2. Click here to download the oc tool .zip file, and move it to C:\Program Files (x86)\OpenShift.
  3. Extract the .zip file.
  4. Next set the system PATH environment variables for the directory containing the oc.exe file, which now resides in your newly created OpenShift folder inside of C:\Program Files (x86)\OpenShift
    1. Open the Control Panel, and click on System
    2. Click on Advance system settings on the left or open the Advance tab of System Properties.
    3. Click the button labeled Environment Variables... at the bottom.
    4. Look for the option Path in either the User variables section (for the current user) or the System variables section (for all users on the system).
Set OC Path

This makes it easy to access the oc command line interface by simply opening up the PowerShell and typing in the oc command, e.g.:

oc version
Official documentation

Login in the terminal with oc

To use the oc Command Line Interface, you will need to authenticate to the DSRI in your terminal:

PASSWORD NOT SUPPORTED

Authentication to the oc Command Line Interface using your password is not supported.

oc login --token=<token>

The token is provided by the Web UI:

  1. Go to the DSRI web UI.

  2. Click on the Copy Login Command button (in the top right of the page).

    Deploy VSCode
  3. Paste the copied command in your terminal, and execute it to login with oc 🔑

Login command

The command should look like this:

oc login https://api.dsri2.unimaas.nl:6443 --token=$GENERATED_TOKEN
- +

Install the client

Install the OpenShift Command Line Interface (CLI): oc to access the DSRI from your computer's terminal.

The oc CLI enables to perform operations on your applications deployed on the DSRI, such as:

  • Copy large files to or from the DSRI using oc cp
  • Connect to an application terminal using oc rsh
  • Get the applications running in your project with oc get pods

Install the oc client

On Linux

Download the oc and kubectl Command Line Interface clients:

wget https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz && tar xvf oc.tar.gz
sudo mv oc kubectl /usr/local/bin/

On Mac

Use brew:

brew install openshift-cli

Or manually download the program and add it to your path:

  1. Download https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/macosx/oc.tar.gz

  2. Unzip the archive

  3. Move the oc binary to a directory on your PATH.

    To check your PATH, open a terminal and execute the following command:

    echo $PATH

On Windows

  1. Create a folder for OpenShift in Program Files: C:\Program Files (x86)\OpenShift
  2. Click here to download the oc tool .zip file, and move it to C:\Program Files (x86)\OpenShift.
  3. Extract the .zip file.
  4. Next set the system PATH environment variables for the directory containing the oc.exe file, which now resides in your newly created OpenShift folder inside of C:\Program Files (x86)\OpenShift
    1. Open the Control Panel, and click on System
    2. Click on Advance system settings on the left or open the Advance tab of System Properties.
    3. Click the button labeled Environment Variables... at the bottom.
    4. Look for the option Path in either the User variables section (for the current user) or the System variables section (for all users on the system).
Set OC Path

This makes it easy to access the oc command line interface by simply opening up the PowerShell and typing in the oc command, e.g.:

oc version
Official documentation

Login in the terminal with oc

To use the oc Command Line Interface, you will need to authenticate to the DSRI in your terminal:

PASSWORD NOT SUPPORTED

Authentication to the oc Command Line Interface using your password is not supported.

oc login --token=<token>

The token is provided by the Web UI:

  1. Go to the DSRI web UI.

  2. Click on the Copy Login Command button (in the top right of the page).

    Deploy VSCode
  3. Paste the copied command in your terminal, and execute it to login with oc 🔑

Login command

The command should look like this:

oc login https://api.dsri2.unimaas.nl:6443 --token=$GENERATED_TOKEN
+ \ No newline at end of file diff --git a/docs/openshift-load-data/index.html b/docs/openshift-load-data/index.html index cc3c606ae..d763fe997 100644 --- a/docs/openshift-load-data/index.html +++ b/docs/openshift-load-data/index.html @@ -16,13 +16,13 @@ - +
-

Upload data

In RStudio, JupyterLab and VSCode

  • If you are using JupyterLab or VSCode you should be able to load data to the container by simply drag and drop the files to upload in the JupyterLab/VSCode web UI.
  • For RStudio, use the Upload file button in the RStudio web UI to upload files from your computer to the RStudio workspace.
File too big

If those solutions don't work due to the files size, try one of the solutions below.

Copy large files with the terminal

The quickest way to upload large files or folders from a laptop or server to the DSRI is to use the oc command line interface.

Install the client

To install the oc client on your laptop/server, visit the Install the client page

oc cp directly copy, and overwrite existing files, from a laptop or server to an Application pod on the DSRI.

First get the <pod_id> using your application name:

oc get pod --selector app=<my_application_name>

Copy from local to pod

Folders are uploaded recursively by default:

oc cp <folder_to_copy> <pod_id>:<absolute_path_in_pod>
Use absolute path in the pod

You need to provide the absolute (full) path where you want to copy it in the pod. Use your application workspace path, e.g. /home/jovyan for JupyterLab or /home/rstudio for RStudio)

For example:

oc cp my-folder jupyterlab-000:/home/jovyan

You can also use this one-liner to automatically get the pod ID based on your app label:

oc get pod --selector app=<my_application_name> | xargs -I{} oc cp <folder_to_copy> {}:<absolute_path_in_pod>

Copy from pod to local

Just do the inverse:

oc cp <pod_ID>:<path_to_copy> <local_destination>

Download data from SURFdrive

You can download data from your SURFdrive to your pod by creating a public link to the file:

  1. Go to the file in SURFdrive you'd like to share
  2. Click share and the create public link
  3. Fill in a name for the public link (like DSRI). The name does not matter much, but it can help you keep track of the goal of the public link.
  4. Click copy to clipboard
  5. Visit link in browser and copy the direct URL displayed on that page.
  6. Use the direct URL you just copied to download the file using either wget or curl (e.g. "wget https://surfdrive.surf.nl/files/index.php/s/5mFwyAKj4UexlJb/download")
  7. Revoke link in the SURFdrive portal

Synchronizes files with oc rsync

If you have a lot of large files and/or they are updated regularly, you can use rsync as it synchronizes the files if they already exist, preventing duplication and making synchronization faster. You can also see the progress with rsync which you cannot with cp. And if the upload is stopped for any reason rsync should pick it up from where it stopped (instead of restarting from scratch like oc cp does)

caution

Rsync does not work with symlinks (created with ln -s)

Sync local to pod

oc rsync --progress <folder_to_sync> <pod-id>:<sync_path_in_pod>

You can also use this one-liner to automatically get the pod ID based on your app label:

oc get pod --selector app=<my_application_name> | xargs -I{} oc rsync --progress <folder_to_sync> {}:<absolute_path_in_pod>

Sync pod to local

Again, do the inverse:

oc rsync --progress <pod-id>:<folder_to_sync> <local_destination_to_sync>

More options

You can use more options to improve the upload of large files:

--compresscompress file data during the transfer
--deletedelete files not present in source
--watchWatch directory for changes and resync automatically

One-liner

- +

Upload data

In RStudio, JupyterLab and VSCode

  • If you are using JupyterLab or VSCode you should be able to load data to the container by simply drag and drop the files to upload in the JupyterLab/VSCode web UI.
  • For RStudio, use the Upload file button in the RStudio web UI to upload files from your computer to the RStudio workspace.
File too big

If those solutions don't work due to the files size, try one of the solutions below.

Copy large files with the terminal

The quickest way to upload large files or folders from a laptop or server to the DSRI is to use the oc command line interface.

Install the client

To install the oc client on your laptop/server, visit the Install the client page

oc cp directly copy, and overwrite existing files, from a laptop or server to an Application pod on the DSRI.

First get the <pod_id> using your application name:

oc get pod --selector app=<my_application_name>

Copy from local to pod

Folders are uploaded recursively by default:

oc cp <folder_to_copy> <pod_id>:<absolute_path_in_pod>
Use absolute path in the pod

You need to provide the absolute (full) path where you want to copy it in the pod. Use your application workspace path, e.g. /home/jovyan for JupyterLab or /home/rstudio for RStudio)

For example:

oc cp my-folder jupyterlab-000:/home/jovyan

You can also use this one-liner to automatically get the pod ID based on your app label:

oc get pod --selector app=<my_application_name> | xargs -I{} oc cp <folder_to_copy> {}:<absolute_path_in_pod>

Copy from pod to local

Just do the inverse:

oc cp <pod_ID>:<path_to_copy> <local_destination>

Download data from SURFdrive

You can download data from your SURFdrive to your pod by creating a public link to the file:

  1. Go to the file in SURFdrive you'd like to share
  2. Click share and the create public link
  3. Fill in a name for the public link (like DSRI). The name does not matter much, but it can help you keep track of the goal of the public link.
  4. Click copy to clipboard
  5. Visit link in browser and copy the direct URL displayed on that page.
  6. Use the direct URL you just copied to download the file using either wget or curl (e.g. "wget https://surfdrive.surf.nl/files/index.php/s/5mFwyAKj4UexlJb/download")
  7. Revoke link in the SURFdrive portal

Synchronizes files with oc rsync

If you have a lot of large files and/or they are updated regularly, you can use rsync as it synchronizes the files if they already exist, preventing duplication and making synchronization faster. You can also see the progress with rsync which you cannot with cp. And if the upload is stopped for any reason rsync should pick it up from where it stopped (instead of restarting from scratch like oc cp does)

caution

Rsync does not work with symlinks (created with ln -s)

Sync local to pod

oc rsync --progress <folder_to_sync> <pod-id>:<sync_path_in_pod>

You can also use this one-liner to automatically get the pod ID based on your app label:

oc get pod --selector app=<my_application_name> | xargs -I{} oc rsync --progress <folder_to_sync> {}:<absolute_path_in_pod>

Sync pod to local

Again, do the inverse:

oc rsync --progress <pod-id>:<folder_to_sync> <local_destination_to_sync>

More options

You can use more options to improve the upload of large files:

--compresscompress file data during the transfer
--deletedelete files not present in source
--watchWatch directory for changes and resync automatically

One-liner

+ \ No newline at end of file diff --git a/docs/openshift-storage/index.html b/docs/openshift-storage/index.html index d5f838f44..0006eecf4 100644 --- a/docs/openshift-storage/index.html +++ b/docs/openshift-storage/index.html @@ -16,13 +16,13 @@ - +
-

Data storage

Different storages can be used when running services on the DSRI:

🦋 Ephemeral storage: storage is bound to the pod, data will be lost when the pod is deleted (but this deployment does not require to request the creation of a persistent storage, and is faster to test code).

Dynamic storage: automatically create a persistent storage when starting an application. Can also be created in the OpenShift web UI, using the dynamic-maprfs Storage Class.

🗄️ Persistent storage: You can use a persistent storage volume to store data. Please see the Create the Persistent Storage section. You can do this yourself. Please keep in mind that there are no backups made of data on DSRI.

Storage per project

A storage (aka. Persistent Volume Claim) is only accessible in the project where it has been created.

Create the Persistent Storage

  1. Switch to the Administrator view

  2. Go to the Project panel

  3. Select your project

  4. Expand the Storage panel then go to the Persistent Volume Claim panel

  5. Click the button call Create Persistent Volume Claim

    then you will redirect the wizard of Create Persistent Volume Claim

  6. Provide the unique Persistent Volume Claim Name start with pvc-

    example: pvc-filebrowser

  7. Select the Access Mode RWXand Storage Size

    Access ModeCLI abbreviationDescription
    ReadWriteOnceRWOThe volume can be mounted as read-write by a single node.
    ReadOnlyManyROXThe volume can be mounted as read-only by many nodes.
    ReadWriteManyRWXThe volume can be mounted as read-write by many nodes.
  8. Click Create

Create Persistent StorageCreate Persistent Storage
info

The DSRI using the Openshift Container Stroage ( OCS) which is based on CEPH offers ReadWriteOnce access mode.

  • ReadWriteOnce (RWO) volumes cannot be mounted on multiple nodes. Use the ReadWriteMany (RWX) access mode when possible. If a node fails, the system does not allow the attached RWO volume to be mounted on a new node because it is already assigned to the failed node. If you encounter a multi-attach error message as a result, force delete the pod on a shut down or crashed node.

Static persistent volumes provides a sustainable persistent storage over time for applications that need to run regular Docker images (which usually use the root user).

info

Some Applications such as Jupyter template automatically creates a persistent storage

Connect the Existing Persistent Storage

On the Topology page select your application,

  1. Click Action on your application

  2. Select the Add Storage option from the dropdown list.

  3. Select the Use Existing Claim option from the Add Storage wizard and Select the Claim

  4. Add the Mount Path

  5. Save

Add Existing Persistent StorageAdd Existing Persistent Storage
info

You can try above method if you want to connect more applications to the same storage

Expand existing Persistent Storage

  1. Switch to the Administrator view

  2. Go to the Project panel

  3. Select your project

  4. Expand the Storage panel then go to the Persistent Volume Claim panel

  5. Click on the three dots (⋮) next to the Persistent Volume Claim you want to expand.

  6. Click on Expand PVC in the menu.

  7. Enter the size you want to expand your PVC with.

  8. Hit Expand. It can take upto 2 minutes before your PVC is expanded.

Use the dynamic storage

Dynamic persistent volumes can be created automatically by an application template.

Dynamic storage can also be created manually, go to Storage on the left sidebar in a project:

  1. Click Create Storage top right of the Storage page.
  2. Storage class: ceph-fs
  3. Access Mode:
    • Single User (RWO): only the user who created this volume can read/write to this volume.
    • Shared Access (RWX): all users with access to the projects can read/write this volume.
    • Read Only (ROX): all users with access to the projects can read this volume.

Use the ephemeral storage

Disabled

We currently disabled this solution by default, as it was confusing for users and would lead to data loss.

When creating a pod, OpenShift will by default use ephemeral storage. It creates a volumes bind to the pod. So the volume will be deleted.

It is recommended to use dynamic provisioning for a more sustainable storage solution. But ephemeral storage can be sufficient for testing.

- +

Data storage

Different storages can be used when running services on the DSRI:

🦋 Ephemeral storage: storage is bound to the pod, data will be lost when the pod is deleted (but this deployment does not require to request the creation of a persistent storage, and is faster to test code).

Dynamic storage: automatically create a persistent storage when starting an application. Can also be created in the OpenShift web UI, using the dynamic-maprfs Storage Class.

🗄️ Persistent storage: You can use a persistent storage volume to store data. Please see the Create the Persistent Storage section. You can do this yourself. Please keep in mind that there are no backups made of data on DSRI.

Storage per project

A storage (aka. Persistent Volume Claim) is only accessible in the project where it has been created.

Create the Persistent Storage

  1. Switch to the Administrator view

  2. Go to the Project panel

  3. Select your project

  4. Expand the Storage panel then go to the Persistent Volume Claim panel

  5. Click the button call Create Persistent Volume Claim

    then you will redirect the wizard of Create Persistent Volume Claim

  6. Provide the unique Persistent Volume Claim Name start with pvc-

    example: pvc-filebrowser

  7. Select the Access Mode RWXand Storage Size

    Access ModeCLI abbreviationDescription
    ReadWriteOnceRWOThe volume can be mounted as read-write by a single node.
    ReadOnlyManyROXThe volume can be mounted as read-only by many nodes.
    ReadWriteManyRWXThe volume can be mounted as read-write by many nodes.
  8. Click Create

Create Persistent StorageCreate Persistent Storage
info

The DSRI using the Openshift Container Stroage ( OCS) which is based on CEPH offers ReadWriteOnce access mode.

  • ReadWriteOnce (RWO) volumes cannot be mounted on multiple nodes. Use the ReadWriteMany (RWX) access mode when possible. If a node fails, the system does not allow the attached RWO volume to be mounted on a new node because it is already assigned to the failed node. If you encounter a multi-attach error message as a result, force delete the pod on a shut down or crashed node.

Static persistent volumes provides a sustainable persistent storage over time for applications that need to run regular Docker images (which usually use the root user).

info

Some Applications such as Jupyter template automatically creates a persistent storage

Connect the Existing Persistent Storage

On the Topology page select your application,

  1. Click Action on your application

  2. Select the Add Storage option from the dropdown list.

  3. Select the Use Existing Claim option from the Add Storage wizard and Select the Claim

  4. Add the Mount Path

  5. Save

Add Existing Persistent StorageAdd Existing Persistent Storage
info

You can try above method if you want to connect more applications to the same storage

Expand existing Persistent Storage

  1. Switch to the Administrator view

  2. Go to the Project panel

  3. Select your project

  4. Expand the Storage panel then go to the Persistent Volume Claim panel

  5. Click on the three dots (⋮) next to the Persistent Volume Claim you want to expand.

  6. Click on Expand PVC in the menu.

  7. Enter the size you want to expand your PVC with.

  8. Hit Expand. It can take upto 2 minutes before your PVC is expanded.

Use the dynamic storage

Dynamic persistent volumes can be created automatically by an application template.

Dynamic storage can also be created manually, go to Storage on the left sidebar in a project:

  1. Click Create Storage top right of the Storage page.
  2. Storage class: ceph-fs
  3. Access Mode:
    • Single User (RWO): only the user who created this volume can read/write to this volume.
    • Shared Access (RWX): all users with access to the projects can read/write this volume.
    • Read Only (ROX): all users with access to the projects can read this volume.

Use the ephemeral storage

Disabled

We currently disabled this solution by default, as it was confusing for users and would lead to data loss.

When creating a pod, OpenShift will by default use ephemeral storage. It creates a volumes bind to the pod. So the volume will be deleted.

It is recommended to use dynamic provisioning for a more sustainable storage solution. But ephemeral storage can be sufficient for testing.

+ \ No newline at end of file diff --git a/docs/operators/index.html b/docs/operators/index.html index 6e624b8a0..1690bb88f 100644 --- a/docs/operators/index.html +++ b/docs/operators/index.html @@ -16,13 +16,13 @@ - +
-

Install from Operators

The Operator Framework is an open source toolkit to manage Kubernetes native applications, called Operators, in an effective, automated, and scalable way.

Use existing Operators

You can explore published Operators at https://operatorhub.io

Install existing Operators

Contact us

Contact us on the DSRI Slack #helpdesk channel, if you want to install a new Operator on the DSRI.

Build Operators

Install the operator-sdk tool. See the official documentation.

Operators can be built using 3 different approaches:

  • Helm: a framework to define the deployment logic based on regular kubernetes YAML, but less capabilities for complete auto-update and insights.
  • Ansible: define the deployment logic with Ansible, provide maximum capabilities.
  • Golang: define the deployment logic in Golang, provide maximum capabilities, but require more code.

External resources

Documentation:

Examples:

- +

Install from Operators

The Operator Framework is an open source toolkit to manage Kubernetes native applications, called Operators, in an effective, automated, and scalable way.

Use existing Operators

You can explore published Operators at https://operatorhub.io

Install existing Operators

Contact us

Contact us on the DSRI Slack #helpdesk channel, if you want to install a new Operator on the DSRI.

Build Operators

Install the operator-sdk tool. See the official documentation.

Operators can be built using 3 different approaches:

  • Helm: a framework to define the deployment logic based on regular kubernetes YAML, but less capabilities for complete auto-update and insights.
  • Ansible: define the deployment logic with Ansible, provide maximum capabilities.
  • Golang: define the deployment logic in Golang, provide maximum capabilities, but require more code.

External resources

Documentation:

Examples:

+ \ No newline at end of file diff --git a/docs/prepare-project-for-dsri/index.html b/docs/prepare-project-for-dsri/index.html index c265f4c78..2cb92b0a9 100644 --- a/docs/prepare-project-for-dsri/index.html +++ b/docs/prepare-project-for-dsri/index.html @@ -16,13 +16,13 @@ - +
-

Prepare your project

Code in a git repository

Using git is mandatory to deploy your code on the DSRI. Store your code in a git repository to keep track of changes, and make it easier to share and re-use your code outside of your computer.

Platform recommendations

We recommend those platforms depending on your use-case:

Any other git platform, such as BitBucket or gitlab.com, is fine too.

Get your data ready

If your project is using a large amount of data that cannot be pushed to a git repository, you will need to use a persistent storage to store your data on the DSRI. See the Storage on the DSRI documentation for more details about creating a persistent storage.

Here are the options to upload your data to the DSRI storage:

Data is on your local machine

If the data is stored on a local machine, such as your computer:

  • Drag and drop files from your computer to the VisualStudio Code or JupyterLab web UI, if applicable.
  • Otherwise, use the oc cp command to copy data to your application pod. See the Load data documentation page for more information.
Upload to persistent storage

Make sure you upload the data to a folder mounted on a persistent storage in the pod to avoid losing your data if the pod restarts.

Data is on a server

Same as for your laptop, you will need to install and use the oc cp command to copy data to your application pod. See the Load data documentation page for more information.

Request access to internal UM servers

In certain cases, UM servers are not accessible by default from the DSRI. This is even the case for servers that are normally publicly accessible. To be able to access these UM servers from the DSRI, we need to put in the request to open the connection.

Please let us know either the servername and port you like to access, or the URL (e.g. um-vm0057.unimaas.nl on port 443 or https://gitlab.maastrichtuniversity.nl). You can reach out to us either by mail or by Slack.

The procedure is described in the diagram below:

Access procedure UM servers
- +

Prepare your project

Code in a git repository

Using git is mandatory to deploy your code on the DSRI. Store your code in a git repository to keep track of changes, and make it easier to share and re-use your code outside of your computer.

Platform recommendations

We recommend those platforms depending on your use-case:

Any other git platform, such as BitBucket or gitlab.com, is fine too.

Get your data ready

If your project is using a large amount of data that cannot be pushed to a git repository, you will need to use a persistent storage to store your data on the DSRI. See the Storage on the DSRI documentation for more details about creating a persistent storage.

Here are the options to upload your data to the DSRI storage:

Data is on your local machine

If the data is stored on a local machine, such as your computer:

  • Drag and drop files from your computer to the VisualStudio Code or JupyterLab web UI, if applicable.
  • Otherwise, use the oc cp command to copy data to your application pod. See the Load data documentation page for more information.
Upload to persistent storage

Make sure you upload the data to a folder mounted on a persistent storage in the pod to avoid losing your data if the pod restarts.

Data is on a server

Same as for your laptop, you will need to install and use the oc cp command to copy data to your application pod. See the Load data documentation page for more information.

Request access to internal UM servers

In certain cases, UM servers are not accessible by default from the DSRI. This is even the case for servers that are normally publicly accessible. To be able to access these UM servers from the DSRI, we need to put in the request to open the connection.

Please let us know either the servername and port you like to access, or the URL (e.g. um-vm0057.unimaas.nl on port 443 or https://gitlab.maastrichtuniversity.nl). You can reach out to us either by mail or by Slack.

The procedure is described in the diagram below:

Access procedure UM servers
+ \ No newline at end of file diff --git a/docs/profile-pytorch-code/index.html b/docs/profile-pytorch-code/index.html index 5fab9ea5f..dab375a9a 100644 --- a/docs/profile-pytorch-code/index.html +++ b/docs/profile-pytorch-code/index.html @@ -16,13 +16,13 @@ - +
-

PyTorch Profiling

What is profiling?

According to wikipedia:

"Profiling is a form of dynamic program analysis that measures, for example, the space (memory) or time complexity of a program, the usage of particular instructions, or the frequency and duration of function calls. Most commonly, profiling information serves to aid program optimization, and more specifically, performance engineering."

Why should I care about profiling?

You may know that training large models like GPT-3 takes several million dollars source and a few hundred MWh source. If the engineers that trained these models did not spend time on optimization, it might have been several million dollars and hunderds of MWh more.

Sure, the model you'd like to train is probably not quite as big. But maybe you want to train it 10000 times, because you want to do hyperparameter optimization. And even if you only train it once, it may take quite a bit of compute resources, i.e. money and energy.

When should I care about profiling?

Well, you should always care if your code runs efficiently, but there's different levels of caring.

From personal experience: if I know I'm going to run a code only once, for a few days, on a single GPU, I'll probably not create a full profile. What I would do is inspect my GPU and CPU utilization during my runs, just to see if it is somewhat efficient, and if I didn't make any obvious mistakes (e.g. accidentally not using the GPU, even if I have one available).

If I know that I'll run my code on multiple GPUs, for multiple days, (potentially) on multiple nodes, and/or I need to run it multiple times, I know that my resource footprint is going to be large, and it's worth spending some time and effort to optimize the code. That's when I'll create a profile. The good part is: the more often you do it, the quicker and more adapt you become at it.

How DSRI team can help you?

We can assist you with analyzing the bottleneck/s in your deep learning pipeline and recommend the improvments to speed up your pipeline.

External Resources and references

- +

PyTorch Profiling

What is profiling?

According to wikipedia:

"Profiling is a form of dynamic program analysis that measures, for example, the space (memory) or time complexity of a program, the usage of particular instructions, or the frequency and duration of function calls. Most commonly, profiling information serves to aid program optimization, and more specifically, performance engineering."

Why should I care about profiling?

You may know that training large models like GPT-3 takes several million dollars source and a few hundred MWh source. If the engineers that trained these models did not spend time on optimization, it might have been several million dollars and hunderds of MWh more.

Sure, the model you'd like to train is probably not quite as big. But maybe you want to train it 10000 times, because you want to do hyperparameter optimization. And even if you only train it once, it may take quite a bit of compute resources, i.e. money and energy.

When should I care about profiling?

Well, you should always care if your code runs efficiently, but there's different levels of caring.

From personal experience: if I know I'm going to run a code only once, for a few days, on a single GPU, I'll probably not create a full profile. What I would do is inspect my GPU and CPU utilization during my runs, just to see if it is somewhat efficient, and if I didn't make any obvious mistakes (e.g. accidentally not using the GPU, even if I have one available).

If I know that I'll run my code on multiple GPUs, for multiple days, (potentially) on multiple nodes, and/or I need to run it multiple times, I know that my resource footprint is going to be large, and it's worth spending some time and effort to optimize the code. That's when I'll create a profile. The good part is: the more often you do it, the quicker and more adapt you become at it.

How DSRI team can help you?

We can assist you with analyzing the bottleneck/s in your deep learning pipeline and recommend the improvments to speed up your pipeline.

External Resources and references

+ \ No newline at end of file diff --git a/docs/project-management/index.html b/docs/project-management/index.html index 845d3d4cb..56419002c 100644 --- a/docs/project-management/index.html +++ b/docs/project-management/index.html @@ -16,13 +16,13 @@ - +
-

Create a new Project

Create a project using the web UI

Avoid creating multiple projects

Try to avoid to create multiple projects for nothing please. Be responsible and delete applications you are not using anymore in your project to free resources, instead of creating a new project with a different number at the end.

It is also easier to connect your different applications containers and storages when you create them in the same project.

You can create a project using the Developer perspective, as follows:

  1. Click the Project drop-down menu to see a list of all available projects. Select Create Project.

  2. In the Create Project dialog box, enter a unique name in the Name field. Use a short and meaningful name for your project as the project identifier is unique across all projects, such as workspace-yourname or ml-covid-pathways

  3. Add the Display Name DSR Workshopand Description DSRI Community Workshop Projectsdetails for the project.

  4. Click Create.

  5. Use the left navigation panel to navigate to the Project view and see the dashboard for your project.

Create Project
  1. Optional:

    • Use the Project drop-down menu at the top of the screen and select all projects to list all of the projects in your cluster.
    • Use the Details tab to see the project details.
    • If you have adequate permissions for a project, you can use the Project Access tab to provide or revoke administrator, edit, and view privileges for the project.

Create a project using the CLI

You need to be logged in to the DSRI and copy the login command.

  • Run

    oc new-project <project_name> --description="<description>" --display-name="<display_name>"
  • Example

    oc new-project dsri-workshop --description="DSRI Workshop" \
    --display-name="DSRI Community Workshop Projects"
Reuse your project

Only create new projects when it is necessary (for a new project). You can easily clean up your current project instead of creating a new one every time you want to try something.

Access permissions for developers to your project

You can use the Project view in the Developer perspective to grant or revoke access permissions to your project.

To add users to your project and provide Admin, Edit, or View access to them:

  1. In the Developer perspective, navigate to the Project view.

  2. In the Project page, select the Project Access tab.

  3. Click Add Access to add a new row of permissions to the default ones.

    Project Access
  1. Enter the user name, click the Select a role drop-down list, and select an appropriate role.

  2. Click Save to add the new permissions.

You can also use:

  • The Select a role drop-down list, to modify the access permissions of an existing user.

  • The Remove Access icon, to completely remove the access permissions of an existing user to the project.

info

Advanced role-based access control is managed in the Roles and Roles Binding views in the Administrator perspective

Delete a project using the web UI

  1. Navigate to HomeProjects.

  2. Locate the project that you want to delete from the list of projects.

  3. On the far right side of the project listing, select Delete Project from the Options menu kebab.

  4. When the Delete Project pane opens, enter the name of the project that you want to delete in the field.

  5. Click Delete.

    Delete Project

Delete a project using the CLI

Delete Project

When you delete a project, the server updates the project status to Terminating from Active. Then, the server clears all content from a project that is in the Terminating state before finally removing the project. While a project is in Terminating status, you cannot add new content to the project. Projects can be deleted from the CLI or the web console.

You need to be logged in to the DSRI and copy the login command.

  • Run

    oc delete project <project_name>
  • Example

    oc delete project dsri-workshop
- +

Create a new Project

Create a project using the web UI

Avoid creating multiple projects

Try to avoid to create multiple projects for nothing please. Be responsible and delete applications you are not using anymore in your project to free resources, instead of creating a new project with a different number at the end.

It is also easier to connect your different applications containers and storages when you create them in the same project.

You can create a project using the Developer perspective, as follows:

  1. Click the Project drop-down menu to see a list of all available projects. Select Create Project.

  2. In the Create Project dialog box, enter a unique name in the Name field. Use a short and meaningful name for your project as the project identifier is unique across all projects, such as workspace-yourname or ml-covid-pathways

  3. Add the Display Name DSR Workshopand Description DSRI Community Workshop Projectsdetails for the project.

  4. Click Create.

  5. Use the left navigation panel to navigate to the Project view and see the dashboard for your project.

Create Project
  1. Optional:

    • Use the Project drop-down menu at the top of the screen and select all projects to list all of the projects in your cluster.
    • Use the Details tab to see the project details.
    • If you have adequate permissions for a project, you can use the Project Access tab to provide or revoke administrator, edit, and view privileges for the project.

Create a project using the CLI

You need to be logged in to the DSRI and copy the login command.

  • Run

    oc new-project <project_name> --description="<description>" --display-name="<display_name>"
  • Example

    oc new-project dsri-workshop --description="DSRI Workshop" \
    --display-name="DSRI Community Workshop Projects"
Reuse your project

Only create new projects when it is necessary (for a new project). You can easily clean up your current project instead of creating a new one every time you want to try something.

Access permissions for developers to your project

You can use the Project view in the Developer perspective to grant or revoke access permissions to your project.

To add users to your project and provide Admin, Edit, or View access to them:

  1. In the Developer perspective, navigate to the Project view.

  2. In the Project page, select the Project Access tab.

  3. Click Add Access to add a new row of permissions to the default ones.

    Project Access
  1. Enter the user name, click the Select a role drop-down list, and select an appropriate role.

  2. Click Save to add the new permissions.

You can also use:

  • The Select a role drop-down list, to modify the access permissions of an existing user.

  • The Remove Access icon, to completely remove the access permissions of an existing user to the project.

info

Advanced role-based access control is managed in the Roles and Roles Binding views in the Administrator perspective

Delete a project using the web UI

  1. Navigate to HomeProjects.

  2. Locate the project that you want to delete from the list of projects.

  3. On the far right side of the project listing, select Delete Project from the Options menu kebab.

  4. When the Delete Project pane opens, enter the name of the project that you want to delete in the field.

  5. Click Delete.

    Delete Project

Delete a project using the CLI

Delete Project

When you delete a project, the server updates the project status to Terminating from Active. Then, the server clears all content from a project that is in the Terminating state before finally removing the project. While a project is in Terminating status, you cannot add new content to the project. Projects can be deleted from the CLI or the web console.

You need to be logged in to the DSRI and copy the login command.

  • Run

    oc delete project <project_name>
  • Example

    oc delete project dsri-workshop
+ \ No newline at end of file diff --git a/docs/sensible-data/index.html b/docs/sensible-data/index.html index a9339f76c..1971ded1f 100644 --- a/docs/sensible-data/index.html +++ b/docs/sensible-data/index.html @@ -16,13 +16,13 @@ - +
-

Working with sensible data

Reminder: DSRI restrictions

  • Since DSRI can only be accessed when on the physical UM network or using the UM VPN, deployed services will not be available on the public Internet 🔒
  • All activities must be legal in basis. You must closely examine and abide by the terms and conditions of any data, software, or web service that you use as part of your work 📜

Disclaimer

The DSRI administration disclaims all responsibility in the misuse of sensible data processed on the DSRI

We can guarantee you that only you, and 4 administrators are able to access the data (you might need to see with the data owner if that is not a problem)

Feel to ask us more details

- +

Working with sensible data

Reminder: DSRI restrictions

  • Since DSRI can only be accessed when on the physical UM network or using the UM VPN, deployed services will not be available on the public Internet 🔒
  • All activities must be legal in basis. You must closely examine and abide by the terms and conditions of any data, software, or web service that you use as part of your work 📜

Disclaimer

The DSRI administration disclaims all responsibility in the misuse of sensible data processed on the DSRI

We can guarantee you that only you, and 4 administrators are able to access the data (you might need to see with the data owner if that is not a problem)

Feel to ask us more details

+ \ No newline at end of file diff --git a/docs/speeding-tensorflow-dl/index.html b/docs/speeding-tensorflow-dl/index.html index a9eaef809..d9c77c34d 100644 --- a/docs/speeding-tensorflow-dl/index.html +++ b/docs/speeding-tensorflow-dl/index.html @@ -16,7 +16,7 @@ - + @@ -43,8 +43,8 @@ 2) Quantize the Model 3) Prune the Model 4) Use Fused Operations -5) Enable GPU Persistence

How DSRI team can help you?

We can assist you with analyzing the bottleneck/s in your deep learning pipeline and recommend the improvments to speed up your pipeline.

External Resources and references

  • This documentation is adopted from the "Practical Deep Learning for Cloud, Mobile, and Edge by Koul etl (publish by O’Reilly)
- +5) Enable GPU Persistence

How DSRI team can help you?

We can assist you with analyzing the bottleneck/s in your deep learning pipeline and recommend the improvments to speed up your pipeline.

External Resources and references

  • This documentation is adopted from the "Practical Deep Learning for Cloud, Mobile, and Edge by Koul etl (publish by O’Reilly)
+ \ No newline at end of file diff --git a/docs/start-workspace/index.html b/docs/start-workspace/index.html index 87aac5ec6..5b9d9782b 100644 --- a/docs/start-workspace/index.html +++ b/docs/start-workspace/index.html @@ -16,13 +16,13 @@ - +
-

Start your workspace

This page will help you to start a workspace to run your code and experiments on the DSRI in a container.

Introduction to containers

Anything running in DSRI needs to be running in a docker container. Docker containers are namespaces that share the kernel on a linux system, you can see them as a clean minimalist Linux computers with only what you need to run your programs installed. This allows you to completely control the environment where your code runs, and avoid conflicts.

When running experiments we can start from existing images that have been already published for popular data science applications with a web interface. You can use, for example, JupyterLab when running python, RStudio when running R, or VisualStudio Code if you prefer.

Once you access a running container, you can install anything you need like if it was a linux/ubuntu computer (most of them runs with admin privileges), and run anything via the notebook/RStudio/VSCode interface, or the terminal.

Choose your interface

First step to get your code running on the DSRI is to pick the base interface you want to use to access your workspace on the DSRI.

We prepared generic Docker images for data science workspaces with your favorite web UI pre-installed to easily deploy your workspace. So you just need to choose your favorite workspace, start the container, access it, add your code, and install your dependencies.

  1. Login to the DSRI dashboard
  2. Select your project, or create one with a meaningful short name representing your project, e.g. workspace-yourname
  3. Go to the +Add page, and select to add From Developer Catalog => All services
Access catalog
  1. Search for templates corresponding to the application you want to deploy among the one described below (make sure the filter for templates is properly checked).

JupyterLab: Access and run your code using the popular Jupyter notebooks, with kernel for python, java, R, julia. It also provides a good web interface to access the terminal, upload and browse the files.

VisualStudio Code: Your daily IDE, but in your browser, running on the DSRI.

RStudio: R users favorite's.

The terminal: For people who are used to the terminal and just want to run scripts, it provides smaller and more stable images, which makes installation and deployment easier. You can use the Ubuntu template to start a basic ubuntu image and access it from the terminal.

Any web interface: You can easily run and access most programs with a web interface on the DSRI. You can use the template Custom workspace if your application is exposed on port 8888. Otherwise visit the page Anatomy of a DSRI application for more details.

Desktop interface: there is the possibility to start a container as a Linux operating system with a graphical desktop interface. It can be useful to deploy software like Matlab, but the setup can be a bit more complex. You will get an Ubuntu computer with a basic Desktop interface, running on the DSRI, that you can access directly in your web browser. The desktop interface is accessed through a web application by using noVNC, which exposes the VNC connection without needing a VNC client.

More applications

You can also find more documentation on the different applications that can be deployed from the DSRI under Deploy applications in the menu on the left.

Start your workspace

Once you chose your favorite way to run your experiments, you can click on the application you want to use for your workspace. Checkout the description to learn more details about the application that will be deployed.

Then click on Instantiate Template, and fill the parameters, such as the password to access the web UI. Note that the application name needs to be unique in the project. Finally click on the Create button.

Filter templates catalog

You should see your application in your project dashboard, it can take a few seconds to a few minutes to pull the docker image and start the application.

Once the application has started you will be able to access it by clicking on its circle, then click the Route, that has been automatically generated for the web interface, in the Resources tab.

Check the workshop

For a more detailed tutorial, you can follow the workshop to start Data Science applications on the DSRI

Upload your code and data

We recommend you to use git to clone your project code in your workspace, as it helps sharing and managing the evolution of your project.

It will be preinstalled in most images, otherwise you can install it easily with apt-get install git

With web interface like JupyterLab, VisualStudio Code and Rstudio you can easily upload small and medium size files directly through the UI with a drag and drop.

Otherwise you can use the terminal, install the oc client, and use the oc cp or oc rsync commands to upload large files to your workspace on the DSRI. See the Upload data page for more details.

Install your dependencies

Once the workspace is started, you can install the different dependencies you need to run your experiments.

It is recommended to save all the commands you used to install the different requirements in a script (e.g. install.sh). This will insure you can reinstall the environment easily and faithfully if the container is restarted. You can also use them to create a Docker image with everything prepared for your application.

Most containers for science are based on debian/ubuntu, so you can install new packages with apt-get:

apt-get update
apt-get install -y build-essentials wget curl

Run your code

You can use your web interface to run your code as you like to do: notebooks, rstudio, execute via VSCode

Note that for jobs which are running for a long time the web UI is not always the best solution, e.g. Jupyter notebooks can be quite instable when running a 30 min codeblock.

A quick solution for that is to run your code in scripts, using the bash terminal. You can use the nohup prefix, and & suffix to run your script in the background, so that you can even disconnect, and come back later to check the results and logs.

For example with a python script, you would do:

nohup python my_script.py &

The script will run in the background, and all terminal output will be stored in the file nohup.out

You can also check if the process is currently running by typing ps aux or top

You can kill the process by getting the process ID (PID) using the previous commands, and then: kill -9 PID

Stop your application

When you are not using your application anymore you can stop the pod. If you are using a Dynamic or Persistent storage you can restart the pod and continue working with all your data in the same state as you left it.

Do not waste resources

Please think of stopping applications you are not using to avoid consuming unnecessary resources.

On the Topology page click on the down arrow ⬇️ next to the number of pods deployed.

Scale down pod

You can then restart the pod by clicking the up arrow ⬆️

Note that starting more than 1 pod will not increase the amount of resources you have access to, most of the time it will only waste resources and might ends up in weird behavior on your side. The web UI will randomly assign you to 1 of the pod started when you access it. This only works for clusters with multiple workers, such as Apache Flink and Spark. Or if you connect directly to each pod with the terminal to run different processes.

Start your application

When you try to access your workspace and you encounter the page below, usually this indicates that your pod is not running. For example, this will be the case if you stopped your pod, or if there was maintenance on the cluster.

Screenshot of page that says Application is not available

To start the pod, go to the Topology page, and click on the up arrow ⬆️ next to the number of pods deployed. Make sure you scale it to 1. Scaling it to more than 1 will not increase the amount of resources you have access to, most of the time it will only waste resources and causes weird behavior on your side.

Scale down pod
Do not waste resources

Please only scale up resources you're using, and scale down when you're not using them anymore. Consuming resources consumes unnecessary power and might prevent other users from using the DSRI.

Optional: define a docker image

Once you have tested your workspace and you know how to set it up it can be helpful to define a Dockerfile to build and publish a Docker image with everything directly installed (instead of installing your requirements after starting a generic workspace)

  1. Start from an existing generic Docker image, depending on the base technologies you need, such as Debian, Ubuntu, Python, JupyterLab, VisualStudio Code, RStudio...
  2. Add your source code in the Docker image using ADD . . or COPY . .
  3. Install dependencies (e.g. RUN apt-get install gfortran)
  4. Define which command to run when starting the container (e.g. ENTRYPOINT["jupyter", "lab"])

Here is a simple example Dockerfile for a python application:

# The base image to start from, choose the one with everything you need installed
FROM python:3.8

# Change the user and working directory to make sure we are using root
USER root
WORKDIR /root

# Install additional packages
RUN apt-get update && \
apt-get install build-essentials

# This line will copy all files and folder that are in the same folder as the Dockerfile (usually the code you want to run in the container)
ADD . .

# This line will install all the python packages described in the requirements.txt of your source code
RUN pip install -r requirements.txt && \
pip install notebook jupyterlab

# Command to run when the container is started, here it starts JupyterLab as a service
ENTRYPOINT [ "jupyter", "lab" ]

Here are some examples of Dockerfile for various type of web applications:

See the guide to Publish a Docker image for more details on this topic.

- +

Start your workspace

This page will help you to start a workspace to run your code and experiments on the DSRI in a container.

Introduction to containers

Anything running in DSRI needs to be running in a docker container. Docker containers are namespaces that share the kernel on a linux system, you can see them as a clean minimalist Linux computers with only what you need to run your programs installed. This allows you to completely control the environment where your code runs, and avoid conflicts.

When running experiments we can start from existing images that have been already published for popular data science applications with a web interface. You can use, for example, JupyterLab when running python, RStudio when running R, or VisualStudio Code if you prefer.

Once you access a running container, you can install anything you need like if it was a linux/ubuntu computer (most of them runs with admin privileges), and run anything via the notebook/RStudio/VSCode interface, or the terminal.

Choose your interface

First step to get your code running on the DSRI is to pick the base interface you want to use to access your workspace on the DSRI.

We prepared generic Docker images for data science workspaces with your favorite web UI pre-installed to easily deploy your workspace. So you just need to choose your favorite workspace, start the container, access it, add your code, and install your dependencies.

  1. Login to the DSRI dashboard
  2. Select your project, or create one with a meaningful short name representing your project, e.g. workspace-yourname
  3. Go to the +Add page, and select to add From Developer Catalog => All services
Access catalog
  1. Search for templates corresponding to the application you want to deploy among the one described below (make sure the filter for templates is properly checked).

JupyterLab: Access and run your code using the popular Jupyter notebooks, with kernel for python, java, R, julia. It also provides a good web interface to access the terminal, upload and browse the files.

VisualStudio Code: Your daily IDE, but in your browser, running on the DSRI.

RStudio: R users favorite's.

The terminal: For people who are used to the terminal and just want to run scripts, it provides smaller and more stable images, which makes installation and deployment easier. You can use the Ubuntu template to start a basic ubuntu image and access it from the terminal.

Any web interface: You can easily run and access most programs with a web interface on the DSRI. You can use the template Custom workspace if your application is exposed on port 8888. Otherwise visit the page Anatomy of a DSRI application for more details.

Desktop interface: there is the possibility to start a container as a Linux operating system with a graphical desktop interface. It can be useful to deploy software like Matlab, but the setup can be a bit more complex. You will get an Ubuntu computer with a basic Desktop interface, running on the DSRI, that you can access directly in your web browser. The desktop interface is accessed through a web application by using noVNC, which exposes the VNC connection without needing a VNC client.

More applications

You can also find more documentation on the different applications that can be deployed from the DSRI under Deploy applications in the menu on the left.

Start your workspace

Once you chose your favorite way to run your experiments, you can click on the application you want to use for your workspace. Checkout the description to learn more details about the application that will be deployed.

Then click on Instantiate Template, and fill the parameters, such as the password to access the web UI. Note that the application name needs to be unique in the project. Finally click on the Create button.

Filter templates catalog

You should see your application in your project dashboard, it can take a few seconds to a few minutes to pull the docker image and start the application.

Once the application has started you will be able to access it by clicking on its circle, then click the Route, that has been automatically generated for the web interface, in the Resources tab.

Check the workshop

For a more detailed tutorial, you can follow the workshop to start Data Science applications on the DSRI

Upload your code and data

We recommend you to use git to clone your project code in your workspace, as it helps sharing and managing the evolution of your project.

It will be preinstalled in most images, otherwise you can install it easily with apt-get install git

With web interface like JupyterLab, VisualStudio Code and Rstudio you can easily upload small and medium size files directly through the UI with a drag and drop.

Otherwise you can use the terminal, install the oc client, and use the oc cp or oc rsync commands to upload large files to your workspace on the DSRI. See the Upload data page for more details.

Install your dependencies

Once the workspace is started, you can install the different dependencies you need to run your experiments.

It is recommended to save all the commands you used to install the different requirements in a script (e.g. install.sh). This will insure you can reinstall the environment easily and faithfully if the container is restarted. You can also use them to create a Docker image with everything prepared for your application.

Most containers for science are based on debian/ubuntu, so you can install new packages with apt-get:

apt-get update
apt-get install -y build-essentials wget curl

Run your code

You can use your web interface to run your code as you like to do: notebooks, rstudio, execute via VSCode

Note that for jobs which are running for a long time the web UI is not always the best solution, e.g. Jupyter notebooks can be quite instable when running a 30 min codeblock.

A quick solution for that is to run your code in scripts, using the bash terminal. You can use the nohup prefix, and & suffix to run your script in the background, so that you can even disconnect, and come back later to check the results and logs.

For example with a python script, you would do:

nohup python my_script.py &

The script will run in the background, and all terminal output will be stored in the file nohup.out

You can also check if the process is currently running by typing ps aux or top

You can kill the process by getting the process ID (PID) using the previous commands, and then: kill -9 PID

Stop your application

When you are not using your application anymore you can stop the pod. If you are using a Dynamic or Persistent storage you can restart the pod and continue working with all your data in the same state as you left it.

Do not waste resources

Please think of stopping applications you are not using to avoid consuming unnecessary resources.

On the Topology page click on the down arrow ⬇️ next to the number of pods deployed.

Scale down pod

You can then restart the pod by clicking the up arrow ⬆️

Note that starting more than 1 pod will not increase the amount of resources you have access to, most of the time it will only waste resources and might ends up in weird behavior on your side. The web UI will randomly assign you to 1 of the pod started when you access it. This only works for clusters with multiple workers, such as Apache Flink and Spark. Or if you connect directly to each pod with the terminal to run different processes.

Start your application

When you try to access your workspace and you encounter the page below, usually this indicates that your pod is not running. For example, this will be the case if you stopped your pod, or if there was maintenance on the cluster.

Screenshot of page that says Application is not available

To start the pod, go to the Topology page, and click on the up arrow ⬆️ next to the number of pods deployed. Make sure you scale it to 1. Scaling it to more than 1 will not increase the amount of resources you have access to, most of the time it will only waste resources and causes weird behavior on your side.

Scale down pod
Do not waste resources

Please only scale up resources you're using, and scale down when you're not using them anymore. Consuming resources consumes unnecessary power and might prevent other users from using the DSRI.

Optional: define a docker image

Once you have tested your workspace and you know how to set it up it can be helpful to define a Dockerfile to build and publish a Docker image with everything directly installed (instead of installing your requirements after starting a generic workspace)

  1. Start from an existing generic Docker image, depending on the base technologies you need, such as Debian, Ubuntu, Python, JupyterLab, VisualStudio Code, RStudio...
  2. Add your source code in the Docker image using ADD . . or COPY . .
  3. Install dependencies (e.g. RUN apt-get install gfortran)
  4. Define which command to run when starting the container (e.g. ENTRYPOINT["jupyter", "lab"])

Here is a simple example Dockerfile for a python application:

# The base image to start from, choose the one with everything you need installed
FROM python:3.8

# Change the user and working directory to make sure we are using root
USER root
WORKDIR /root

# Install additional packages
RUN apt-get update && \
apt-get install build-essentials

# This line will copy all files and folder that are in the same folder as the Dockerfile (usually the code you want to run in the container)
ADD . .

# This line will install all the python packages described in the requirements.txt of your source code
RUN pip install -r requirements.txt && \
pip install notebook jupyterlab

# Command to run when the container is started, here it starts JupyterLab as a service
ENTRYPOINT [ "jupyter", "lab" ]

Here are some examples of Dockerfile for various type of web applications:

See the guide to Publish a Docker image for more details on this topic.

+ \ No newline at end of file diff --git a/docs/surf-offerings/index.html b/docs/surf-offerings/index.html index 2f906e853..bd788ddf0 100644 --- a/docs/surf-offerings/index.html +++ b/docs/surf-offerings/index.html @@ -16,7 +16,7 @@ - + @@ -27,8 +27,8 @@ but it organises its data in so-called containers that contain objects. There is no tree-like structure with files and directories. There are only containers with objects in them. SURF Object Store service is based on Ceph RGW and provides access using the S3 protocol, which is the defacto standard for addressing object storage.

How to Get Started with SURF Services?

The DSRI team is here to help you navigate SURF’s services, including:

1) Grant Applications:
We assist researchers in applying for SURF grants. For instance:

* Small applications: Up to 1 million System Billing Units (SBU) on Snellius and/or 100 TB of dCache storage.(https://www.surf.nl/en/small-compute-applications-nwo)
* Large applications: Customized resource allocations based on project needs.

2) Resource Estimation:
Unsure about your computing and storage requirements? We help estimate your needs in terms of SURF’s billing units.

3) Use Case Analysis:
-We assess whether your research project is a good fit for SURF’s services.

External Resources and references

- +We assess whether your research project is a good fit for SURF’s services.

External Resources and references

+ \ No newline at end of file diff --git a/docs/tools-machine-learning/index.html b/docs/tools-machine-learning/index.html index 1de13da7d..e0e2b7dbf 100644 --- a/docs/tools-machine-learning/index.html +++ b/docs/tools-machine-learning/index.html @@ -16,13 +16,13 @@ - +
-

Libraries for Machine Learning

Work in progress

This page is in development, feel free to edit it to add more information.

Machine Learning libraries

SciKit Learn

https://scikit-learn.org/stable/

Deep Learning libraries

See this article for more details about modern Deep Learning libraries.

Tensorflow

Python library developed by Google.

https://www.tensorflow.org/

PyTorch

Python library developed by Facebook.

https://pytorch.org/

Deep Java Library

Java library developed by Amazon. See the introduction article.

https://djl.ai/

Sonnet

Layer on top of Tensorflow.

https://sonnet.readthedocs.io/en/latest/

Keras

Python library. Layer on top of Tensorflow, CNTK, Theano.

https://keras.io/

Metaflow

Layer on top of Tensorflow, PyTorch, SciKit Learn developed by Netflix.

https://metaflow.org/

- +

Libraries for Machine Learning

Work in progress

This page is in development, feel free to edit it to add more information.

Machine Learning libraries

SciKit Learn

https://scikit-learn.org/stable/

Deep Learning libraries

See this article for more details about modern Deep Learning libraries.

Tensorflow

Python library developed by Google.

https://www.tensorflow.org/

PyTorch

Python library developed by Facebook.

https://pytorch.org/

Deep Java Library

Java library developed by Amazon. See the introduction article.

https://djl.ai/

Sonnet

Layer on top of Tensorflow.

https://sonnet.readthedocs.io/en/latest/

Keras

Python library. Layer on top of Tensorflow, CNTK, Theano.

https://keras.io/

Metaflow

Layer on top of Tensorflow, PyTorch, SciKit Learn developed by Netflix.

https://metaflow.org/

+ \ No newline at end of file diff --git a/docs/workflows-airflow/index.html b/docs/workflows-airflow/index.html index e160fff5a..3552a5fa1 100644 --- a/docs/workflows-airflow/index.html +++ b/docs/workflows-airflow/index.html @@ -16,13 +16,13 @@ - +
-

Deploy Airflow

Deploy Apache Airflow to run workflows (aka. DAGs), hosted in a Git repository, on the DSRI.

Install the chart

You will need to have Helm installed on your computer to deploy a Helm chart, see the Helm docs for more details.

Install the Helm chart to be able to deploy Airflow on the DSRI:

helm repo add apache-airflow https://airflow.apache.org
helm repo update

Deploy Airflow

You can quickly deploy Airflow on the DSRI, with DAGs automatically synchronized with your Git repository.

We use a values.yml file with all default parameters pre-defined for the DSRI, so you just need to edit the password and git repository configuration in this command, and run it:

helm install airflow apache-airflow/airflow \
-f https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/airflow/values.yml \
--set webserver.defaultUser.password=yourpassword \
--set dags.gitSync.repo=https://github.com/bio2kg/bio2kg-etl.git \
--set dags.gitSync.branch=main \
--set dags.gitSync.subPath=workflows/dags
info

If you need to do more configuration you can download the a values.yml file, edit it directly to your settings and use this file locally with -f values.yml

A few seconds after Airflow started to install, you will need to fix the postgresql deployment in a different terminal window (unfortunately setting the serviceAccount.name of the sub chart postgresql don't work, even if it should be possible according to the official helm docs). Run this command to fix postgresql:

oc patch statefulset/airflow-postgresql --patch '{"spec":{"template":{"spec": {"serviceAccountName": "anyuid"}}}}'

Once Airflow finished to deploy, you can access its web interface temporarily by forwarding the webserver on your machine at http://localhost:8080

oc port-forward svc/airflow-webserver 8080:8080

Or permanently expose the interface on a URL accessible when logged to the UM VPN, with HTTPS enabled:

oc expose svc/airflow-webserver
oc patch route/airflow-webserver --patch '{"spec":{"tls": {"termination": "edge", "insecureEdgeTerminationPolicy": "Redirect"}}}'

Finally, get the route to the Airflow web interface, or access it via the DSRI web UI:

oc get routes

Example workflows

You can find example DAGs for bash operator, python operator and Kubernetes pod operator here.

Here an example of a DAG using the Kubernetes pod operator to run tasks as pods, you will need to change the namespace parameter to your DSRI project where Airflow is deployed:

from airflow import DAG
from datetime import datetime, timedelta
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.operators.dummy_operator import DummyOperator

default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.utcnow(),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
dag = DAG(
'kubernetes_pod_operator',
default_args=default_args,
schedule_interval=None
# schedule_interval=timedelta(minutes=10)
)

start = DummyOperator(task_id='run_this_first', dag=dag)

passing = KubernetesPodOperator(
namespace='CHANGEME',
image="python:3.6",
cmds=["python","-c"],
arguments=["print('hello world')"],
labels={"app": "airflow"},
name="passing-test",
task_id="passing-task",
get_logs=True,
dag=dag
)

passing.set_upstream(start)

Delete the chart

helm uninstall airflow

See also

Here are a few links for more details on the official Airflow Helm chart:

Other ways to deploy Airflow on OpenShift:

- +

Deploy Airflow

Deploy Apache Airflow to run workflows (aka. DAGs), hosted in a Git repository, on the DSRI.

Install the chart

You will need to have Helm installed on your computer to deploy a Helm chart, see the Helm docs for more details.

Install the Helm chart to be able to deploy Airflow on the DSRI:

helm repo add apache-airflow https://airflow.apache.org
helm repo update

Deploy Airflow

You can quickly deploy Airflow on the DSRI, with DAGs automatically synchronized with your Git repository.

We use a values.yml file with all default parameters pre-defined for the DSRI, so you just need to edit the password and git repository configuration in this command, and run it:

helm install airflow apache-airflow/airflow \
-f https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/airflow/values.yml \
--set webserver.defaultUser.password=yourpassword \
--set dags.gitSync.repo=https://github.com/bio2kg/bio2kg-etl.git \
--set dags.gitSync.branch=main \
--set dags.gitSync.subPath=workflows/dags
info

If you need to do more configuration you can download the a values.yml file, edit it directly to your settings and use this file locally with -f values.yml

A few seconds after Airflow started to install, you will need to fix the postgresql deployment in a different terminal window (unfortunately setting the serviceAccount.name of the sub chart postgresql don't work, even if it should be possible according to the official helm docs). Run this command to fix postgresql:

oc patch statefulset/airflow-postgresql --patch '{"spec":{"template":{"spec": {"serviceAccountName": "anyuid"}}}}'

Once Airflow finished to deploy, you can access its web interface temporarily by forwarding the webserver on your machine at http://localhost:8080

oc port-forward svc/airflow-webserver 8080:8080

Or permanently expose the interface on a URL accessible when logged to the UM VPN, with HTTPS enabled:

oc expose svc/airflow-webserver
oc patch route/airflow-webserver --patch '{"spec":{"tls": {"termination": "edge", "insecureEdgeTerminationPolicy": "Redirect"}}}'

Finally, get the route to the Airflow web interface, or access it via the DSRI web UI:

oc get routes

Example workflows

You can find example DAGs for bash operator, python operator and Kubernetes pod operator here.

Here an example of a DAG using the Kubernetes pod operator to run tasks as pods, you will need to change the namespace parameter to your DSRI project where Airflow is deployed:

from airflow import DAG
from datetime import datetime, timedelta
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.operators.dummy_operator import DummyOperator

default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.utcnow(),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
dag = DAG(
'kubernetes_pod_operator',
default_args=default_args,
schedule_interval=None
# schedule_interval=timedelta(minutes=10)
)

start = DummyOperator(task_id='run_this_first', dag=dag)

passing = KubernetesPodOperator(
namespace='CHANGEME',
image="python:3.6",
cmds=["python","-c"],
arguments=["print('hello world')"],
labels={"app": "airflow"},
name="passing-test",
task_id="passing-task",
get_logs=True,
dag=dag
)

passing.set_upstream(start)

Delete the chart

helm uninstall airflow

See also

Here are a few links for more details on the official Airflow Helm chart:

Other ways to deploy Airflow on OpenShift:

+ \ No newline at end of file diff --git a/docs/workflows-argo/index.html b/docs/workflows-argo/index.html index adf577dac..f22a4f343 100644 --- a/docs/workflows-argo/index.html +++ b/docs/workflows-argo/index.html @@ -16,13 +16,13 @@ - +
-

Run Argo workflows

Install in your project

Argo needs to be installed in your project, contact the DSRI team to request it.

Install the argo client

Argo 🦑 is a container native workflow engine for Kubernetes supporting both DAG and step based workflows.

Download and install the Argo client on your computer to start workflows on the DSRI.

On Ubuntu

sudo curl -L -o /usr/local/bin/argo https://github.com/argoproj/argo/releases/download/v2.4.2/argo-linux-amd64
sudo chmod +x /usr/local/bin/argo

On MacOS

brew install argoproj/tap/argo

On Windows

Get Argo executable version 2.4.2 from Argo Releases on GitHub.

See official Argo documentation.

Test Argo

Run Hello world workflow to test if Argo has been properly installed. And take a look at the examples provided in Argo documentation to discover how to use the different features available.

argo submit --watch https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml
Logged in

You will need to have the oc client installed and be logged in with oc login, see the install documentation page.

Install Argo in your project

Argo workflows with Helm

Deploy the Argo Helm chart.

  1. Install and use helm
  2. Add the Helm charts repository:
helm repo add argo https://argoproj.github.io/argo-helm
  1. Install chart:
helm install my-argo argo/argo --version 0.15.2

ArgoCD Operator

Ask on the DSRI Slack #helpdesk channel to have the ArgoCD Operator installed in your project.

Uninstall argo

On Ubuntu

sudo rm /usr/local/bin/argo

You can now reinstall a newer version of Argo.


Run workflows to convert structured data to RDF

We will use examples from the MaastrichtU-IDS/d2s-core project.

Clone the repository

git clone --recursive https://github.com/MaastrichtU-IDS/d2s-project-template.git
cd d2s-project-template

Authenticate to the OpenShift cluster using oc login .

Workflow to convert XML files to RDF

argo submit d2s-core/argo/workflows/d2s-workflow-transform-xml.yaml \
-f support/config/config-transform-xml-drugbank.yml
Provide config files

Config files can be provided using the -f arguments, but are not necessary.

argo submit d2s-core/argo/workflows/d2s-workflow-transform-xml-dag.yaml \
-f support/config/config-transform-xml-drugbank.yml

Workflow to convert CSV files to RDF

  • Steps-based workflow for CSV files
argo submit d2s-core/argo/workflows/d2s-workflow-transform-csv.yaml \
-f support/config/config-transform-csv-stitch.yml
  • DAG workflow for CSV files
argo submit d2s-core/argo/workflows/d2s-workflow-transform-csv-dag.yaml \
-f support/config/config-transform-csv-stitch.yml
Solve issue

Try this to solve issue related to steps services IP: {{steps.nginx-server.pod-ip}}


Argo commands

List running Argo workflows

argo list

Stop a workflow

argo terminate my-workflow
Workflow

This might not stop the workflow, in this case use:

argo delete my-workflow

Delete a workflow

argo delete my-workflow

Debug a workflow

Get into a container, to understand why it bugs, by creating a YAML with the command tail -f /dev/null to keep it hanging.

See the example in the d2s-argo-workflow repository:

apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: test-devnull-argo-
spec:
entrypoint: execute-workflow

# Use existing volume
volumes:
- name: workdir
persistentVolumeClaim:
claimName: pvc-mapr-projects-test-vincent

templates:
- name: execute-workflow
steps:
- - name: run-rdfunit
template: rdfunit

- name: rdfunit
container:
image: umids/rdfunit:latest
command: [tail]
args: ["-f", "/dev/null"]
volumeMounts:
- name: workdir
mountPath: /data
subPath: dqa-workspace

Then start the workflow:

argo submit --serviceaccount argo tests/test-devnull-argo.yaml

And connect with the Shell (change the pod ID to your pod ID):

oc rsh test-devnull-argo-pod
- +

Run Argo workflows

Install in your project

Argo needs to be installed in your project, contact the DSRI team to request it.

Install the argo client

Argo 🦑 is a container native workflow engine for Kubernetes supporting both DAG and step based workflows.

Download and install the Argo client on your computer to start workflows on the DSRI.

On Ubuntu

sudo curl -L -o /usr/local/bin/argo https://github.com/argoproj/argo/releases/download/v2.4.2/argo-linux-amd64
sudo chmod +x /usr/local/bin/argo

On MacOS

brew install argoproj/tap/argo

On Windows

Get Argo executable version 2.4.2 from Argo Releases on GitHub.

See official Argo documentation.

Test Argo

Run Hello world workflow to test if Argo has been properly installed. And take a look at the examples provided in Argo documentation to discover how to use the different features available.

argo submit --watch https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml
Logged in

You will need to have the oc client installed and be logged in with oc login, see the install documentation page.

Install Argo in your project

Argo workflows with Helm

Deploy the Argo Helm chart.

  1. Install and use helm
  2. Add the Helm charts repository:
helm repo add argo https://argoproj.github.io/argo-helm
  1. Install chart:
helm install my-argo argo/argo --version 0.15.2

ArgoCD Operator

Ask on the DSRI Slack #helpdesk channel to have the ArgoCD Operator installed in your project.

Uninstall argo

On Ubuntu

sudo rm /usr/local/bin/argo

You can now reinstall a newer version of Argo.


Run workflows to convert structured data to RDF

We will use examples from the MaastrichtU-IDS/d2s-core project.

Clone the repository

git clone --recursive https://github.com/MaastrichtU-IDS/d2s-project-template.git
cd d2s-project-template

Authenticate to the OpenShift cluster using oc login .

Workflow to convert XML files to RDF

argo submit d2s-core/argo/workflows/d2s-workflow-transform-xml.yaml \
-f support/config/config-transform-xml-drugbank.yml
Provide config files

Config files can be provided using the -f arguments, but are not necessary.

argo submit d2s-core/argo/workflows/d2s-workflow-transform-xml-dag.yaml \
-f support/config/config-transform-xml-drugbank.yml

Workflow to convert CSV files to RDF

  • Steps-based workflow for CSV files
argo submit d2s-core/argo/workflows/d2s-workflow-transform-csv.yaml \
-f support/config/config-transform-csv-stitch.yml
  • DAG workflow for CSV files
argo submit d2s-core/argo/workflows/d2s-workflow-transform-csv-dag.yaml \
-f support/config/config-transform-csv-stitch.yml
Solve issue

Try this to solve issue related to steps services IP: {{steps.nginx-server.pod-ip}}


Argo commands

List running Argo workflows

argo list

Stop a workflow

argo terminate my-workflow
Workflow

This might not stop the workflow, in this case use:

argo delete my-workflow

Delete a workflow

argo delete my-workflow

Debug a workflow

Get into a container, to understand why it bugs, by creating a YAML with the command tail -f /dev/null to keep it hanging.

See the example in the d2s-argo-workflow repository:

apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: test-devnull-argo-
spec:
entrypoint: execute-workflow

# Use existing volume
volumes:
- name: workdir
persistentVolumeClaim:
claimName: pvc-mapr-projects-test-vincent

templates:
- name: execute-workflow
steps:
- - name: run-rdfunit
template: rdfunit

- name: rdfunit
container:
image: umids/rdfunit:latest
command: [tail]
args: ["-f", "/dev/null"]
volumeMounts:
- name: workdir
mountPath: /data
subPath: dqa-workspace

Then start the workflow:

argo submit --serviceaccount argo tests/test-devnull-argo.yaml

And connect with the Shell (change the pod ID to your pod ID):

oc rsh test-devnull-argo-pod
+ \ No newline at end of file diff --git a/docs/workflows-cwl/index.html b/docs/workflows-cwl/index.html index cddf945db..effe7152f 100644 --- a/docs/workflows-cwl/index.html +++ b/docs/workflows-cwl/index.html @@ -16,13 +16,13 @@ - +
-

Run CWL workflows

The Common Workflow Language (CWL) is an open standard for describing analysis workflows and tools in a way that makes them portable and scalable across a variety of software and hardware environments.

We use the CWL Calrissian implementation, note that this project is young and still in development, feel free to report issues and contribute to its documentation.

Clone the repository

  1. Git clone in /calrissian on a persistent volume on the cluster from a terminal.
cd /data/calrissian
git clone --recursive https://github.com/MaastrichtU-IDS/d2s-project-template.git
cd d2s-project-template
  1. You will need to create the folder for the workflow output data, in our example it is output-data:
mkdir /data/calrissian/output-data
  1. You might need to give permissions (CWL execution will fail due to permissions issues otherwise).
chmod -R 777 /data/calrissian

Start pod

Start the CWL execution from your computer using the oc client. Define the CWL command arguments to run in run-workflows-cwl.yaml (be careful to properly define the paths to the CWL files in the pod storage).

oc create -f d2s-core/support/run-workflows-cwl.yaml
Delete the pod

You will need to delete the pod if you want to re-create it.

Delete created pod

oc delete -f d2s-core/support/run-workflows-cwl.yaml
- +

Run CWL workflows

The Common Workflow Language (CWL) is an open standard for describing analysis workflows and tools in a way that makes them portable and scalable across a variety of software and hardware environments.

We use the CWL Calrissian implementation, note that this project is young and still in development, feel free to report issues and contribute to its documentation.

Clone the repository

  1. Git clone in /calrissian on a persistent volume on the cluster from a terminal.
cd /data/calrissian
git clone --recursive https://github.com/MaastrichtU-IDS/d2s-project-template.git
cd d2s-project-template
  1. You will need to create the folder for the workflow output data, in our example it is output-data:
mkdir /data/calrissian/output-data
  1. You might need to give permissions (CWL execution will fail due to permissions issues otherwise).
chmod -R 777 /data/calrissian

Start pod

Start the CWL execution from your computer using the oc client. Define the CWL command arguments to run in run-workflows-cwl.yaml (be careful to properly define the paths to the CWL files in the pod storage).

oc create -f d2s-core/support/run-workflows-cwl.yaml
Delete the pod

You will need to delete the pod if you want to re-create it.

Delete created pod

oc delete -f d2s-core/support/run-workflows-cwl.yaml
+ \ No newline at end of file diff --git a/docs/workflows-github-actions/index.html b/docs/workflows-github-actions/index.html index 5233f882e..2313fcf51 100644 --- a/docs/workflows-github-actions/index.html +++ b/docs/workflows-github-actions/index.html @@ -16,14 +16,14 @@ - +

Deploy GitHub Runners

Deploy a GitHub Actions runner to run workflows simple to define using YAML, and hosted in your GitHub repository on the DSRI. This allows you to run larger workloads than on GitHub-hosted runners, which are limited to 7G RAM, 1 CPU and 6h per job.

Here are some of the advantage of GitHub Actions:

  • A step can be any Bash command, or a reusable Action from the GitHub Marketplace, which can be easily define from a Docker container, and share with your collaborators
  • Parallelization can easily be added manually or dynamically to up to 255 jobs
  • It provides a good logging system directly available in your repository on GitHub
  • Define triggers (on code push, cron job, manual request), and secrets (such as passwords) easily

For more information about GitHub Actions workflows, go to https://github.com/features/actions

Install the chart

You will need to have Helm installed on your computer to deploy a GitHub Actions Runner, see the Helm docs for more details.

Install the Helm chart to be able to deploy the GitHub Actions Runner on the DSRI:

helm repo add openshift-actions-runner https://redhat-actions.github.io/openshift-actions-runner-chart
helm repo update

Then create a GitHub Personal Access Token as per the instructions in the runner image README.

tl;dr: go to your Settings on GitHub: https://github.com/settings/tokens, click the button to create a new token, give it a meaningful name (e.g. DSRI Runner my-project), and check the following permissions:

✅️ repo (maybe also workflow?)

✅️ admin:org if the Runner is for an organization

Deploy a Runner

Before deploying the runner, make sure you are in the project where you want to deploy it:

oc project my-project

For an organization

Deploy a runner available for all repositories of an organization (you can fine tune the access via GitHub Settings)

  1. Provide the token previously created, and the organization name
export GITHUB_PAT="TOKEN"
export GITHUB_OWNER=My-Org
  1. Deploy the runner for the organization:
helm install actions-runner openshift-actions-runner/actions-runner \
--set-string githubPat=$GITHUB_PAT \
--set-string githubOwner=$GITHUB_OWNER \
--set runnerLabels="{ dsri, $GITHUB_OWNER }" \
--set replicas=3 \
--set serviceAccountName=anyuid \
--set memoryRequest="512Mi" \
--set memoryLimit="100Gi" \
--set cpuRequest="100m" \
--set cpuLimit="64"

You can also change the default runner image:

    --set runnerImage=ghcr.io/vemonet/github-actions-conda-runner \
--set runnerTag=latest

Checkout all available parameters here

  1. Check the deployment:
helm get manifest actions-runner | kubectl get -f -

Go to your organization Settings page on GitHub, then go to the Actions tab, and scroll to the bottom. In the list of active runners you should see the runners you just deployed.

For a repository

You can also deploy a runner for a specific repository:

export GITHUB_PAT="TOKEN"
# For an org runner, this is the org.
# For a repo runner, this is the repo owner (org or user).
export GITHUB_OWNER=vemonet
# For an org runner, omit this argument.
# For a repo runner, the repo name.
export GITHUB_REPO=shapes-of-you

Deploy the runner:

helm install actions-runner openshift-actions-runner/actions-runner \
--set-string githubPat=$GITHUB_PAT \
--set-string githubOwner=$GITHUB_OWNER \
--set-string githubRepository=$GITHUB_REPO \
--set runnerLabels="{ dsri, anything-helpful }"

Define Actions to run on DSRI

You can now set GitHub Action workflows, in the .github/workflows folder, to be run on this runner (the repository needs to be under the organization, or user you added the workflow to). The job will be sent to run on the DSRI:

jobs:
your-job:
runs-on: ["self-hosted", "dsri", "my-org" ]
steps: ...

Uninstall the runner

helm uninstall actions-runner

Deploy using GitHub Actions workflows

Experimental

Experimental: this deployment workflow is still experimental, let us know on Slack if you are interested in using it.

Alternatively you can also build and deploy your application using a GitHub Actions workflow.

You will need to connect to the UM VPN in your workflow by defining 2 secrets for VPN_USER and VPN_PASSWORD, this is done by this step:

- name: Connect to the VPN
run: |
sudo apt-get install -y openconnect network-manager-openconnect
echo '${{ secrets.VPN_PASSWORD }}' | sudo openconnect --passwd-on-stdin --no-xmlpost --non-inter --background --authgroup 01-Employees --user ${{ secrets.VPN_USER }} vpn.maastrichtuniversity.nl
sleep 10
RedHat documentation

RedHat provides the following instructions and template to deploy an application on OpenShift

The OpenShift Starter workflow will:

  • Checkout your repository
  • Perform a Docker build
  • Push the built image to an image registry
  • Log in to your OpenShift cluster
  • Create an OpenShift app from the image and expose it to the internet.

Before you begin:

  • Have write access to a container image registry such as quay.io or Dockerhub.
  • Have access to an OpenShift cluster.
  • The project you wish to add this workflow to should have a Dockerfile.
    • If you don't have a Dockerfile at the repository root, see the buildah-build step.
    • Builds from scratch are also available, but require more configuration.

To get the workflow running:

  1. Add this workflow to your repository.
  2. Edit the top-level 'env' section, which contains a list of environment variables that must be configured.
  3. Create the secrets referenced in the 'env' section under your repository Settings.
  4. Edit the 'branches' in the 'on' section to trigger the workflow on a push to your branch.
  5. Commit and push your changes.

For a more sophisticated example, see https://github.com/redhat-actions/spring-petclinic/blob/main/.github/workflows/petclinic-sample.yaml -Also see our GitHub organization, https://github.com/redhat-actions/

name: Deploy to OpenShift

# ⬇️ Modify the fields marked with ⬇️ to fit your project, and create any secrets that are referenced.
# https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secrets
env:
# ⬇️ EDIT with your registry and registry path.
REGISTRY: ghcr.io/maastrichtu-ids
# ⬇️ EDIT with your registry username.
REGISTRY_USER: <username>
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}

# ⬇️ EDIT to log into your OpenShift cluster and set up the context.
# See https://github.com/redhat-actions/oc-login#readme for how to retrieve these values.
OPENSHIFT_SERVER: ${{ secrets.OPENSHIFT_SERVER }}
OPENSHIFT_TOKEN: ${{ secrets.OPENSHIFT_TOKEN }}

# ⬇️ EDIT with the port your application should be accessible on.
APP_PORT: 8080

# ⬇️ EDIT if you wish to set the kube context's namespace after login. Leave blank to use the default namespace.
OPENSHIFT_NAMESPACE: ""

# If you wish to manually provide the APP_NAME and TAG, set them here, otherwise they will be auto-detected.
APP_NAME: "my-app"
TAG: ""

on:
# https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows
push:
# Edit to the branch(es) you want to build and deploy on each push.
branches: [ main ]

jobs:
openshift-ci-cd:
name: Build and deploy to OpenShift
runs-on: ubuntu-20.04

steps:
- uses: actions/checkout@v2

- name: Determine app name
if: env.APP_NAME == ''
run: |
echo "APP_NAME=$(basename $PWD)" | tee -a $GITHUB_ENV

- name: Determine tag
if: env.TAG == ''
run: |
echo "TAG=${GITHUB_SHA::7}" | tee -a $GITHUB_ENV

# https://github.com/redhat-actions/buildah-build#readme
- name: Build from Dockerfile
uses: redhat-actions/buildah-build@v1
with:
image: ${{ env.APP_NAME }}
tag: ${{ env.TAG }}
# If you don't have a dockerfile, see:
# https://github.com/redhat-actions/buildah-build#building-from-scratch
# Otherwise, point this to your Dockerfile relative to the repository root.
dockerfiles: |
./Dockerfile

# https://github.com/redhat-actions/push-to-registry#readme
- name: Push to registry
id: push-to-registry
uses: redhat-actions/push-to-registry@v1
with:
image: ${{ env.APP_NAME }}
tag: ${{ env.TAG }}
registry: ${{ env.REGISTRY }}
username: ${{ env.REGISTRY_USER }}
password: ${{ env.REGISTRY_PASSWORD }}

# The path the image was pushed to is now stored in ${{ steps.push-to-registry.outputs.registry-path }}

- name: Connect to the VPN
run: |
sudo apt-get install -y openconnect network-manager-openconnect
echo '${{ secrets.VPN_PASSWORD }}' | sudo openconnect --passwd-on-stdin --no-xmlpost --non-inter --background --authgroup 01-Employees --user ${{ secrets.VPN_USER }} vpn.maastrichtuniversity.nl
sleep 10

# oc-login works on all platforms, but oc must be installed first.
# The GitHub Ubuntu runner already includes oc.
# https://github.com/redhat-actions/oc-login#readme
- name: Log in to OpenShift
uses: redhat-actions/oc-login@v1
with:
openshift_server_url: ${{ env.OPENSHIFT_SERVER }}
openshift_token: ${{ env.OPENSHIFT_TOKEN }}
insecure_skip_tls_verify: true
namespace: ${{ env.OPENSHIFT_NAMESPACE }}

# This step should create a deployment, service, and route to run your app and expose it to the internet.
# Feel free to replace this with 'oc apply', 'helm install', or however you like to deploy your app.
- name: Create and expose app
run: |
export IMAGE="${{ steps.push-to-registry.outputs.registry-path }}"
export PORT=${{ env.APP_PORT }}

export SELECTOR="app=${{ env.APP_NAME }}"
echo "SELECTOR=$SELECTOR" >> $GITHUB_ENV

set -x
# Take down any old deployment
oc delete all --selector="$SELECTOR"
oc new-app --name $APP_NAME --docker-image="$IMAGE"

# Make sure the app port is exposed
oc patch svc $APP_NAME -p "{ \"spec\": { \"ports\": [{ \"name\": \"$PORT-tcp\", \"port\": $PORT }] } }"
oc expose service $APP_NAME --port=$PORT

oc get all --selector="$SELECTOR"
set +x

export ROUTE="$(oc get route $APP_NAME -o jsonpath='{.spec.host}')"
echo "$APP_NAME is exposed at $ROUTE"
echo "ROUTE=$ROUTE" >> $GITHUB_ENV

- name: View application route
run: |
[[ -n ${{ env.ROUTE }} ]] || (echo "Determining application route failed in previous step"; exit 1)
echo "======================== Your application is available at: ========================"
echo ${{ env.ROUTE }}
echo "==================================================================================="
echo
echo "Your app can be taken down with: \"oc delete all --selector='${{ env.SELECTOR }}'\""

See also

- +Also see our GitHub organization, https://github.com/redhat-actions/

name: Deploy to OpenShift

# ⬇️ Modify the fields marked with ⬇️ to fit your project, and create any secrets that are referenced.
# https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secrets
env:
# ⬇️ EDIT with your registry and registry path.
REGISTRY: ghcr.io/maastrichtu-ids
# ⬇️ EDIT with your registry username.
REGISTRY_USER: <username>
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}

# ⬇️ EDIT to log into your OpenShift cluster and set up the context.
# See https://github.com/redhat-actions/oc-login#readme for how to retrieve these values.
OPENSHIFT_SERVER: ${{ secrets.OPENSHIFT_SERVER }}
OPENSHIFT_TOKEN: ${{ secrets.OPENSHIFT_TOKEN }}

# ⬇️ EDIT with the port your application should be accessible on.
APP_PORT: 8080

# ⬇️ EDIT if you wish to set the kube context's namespace after login. Leave blank to use the default namespace.
OPENSHIFT_NAMESPACE: ""

# If you wish to manually provide the APP_NAME and TAG, set them here, otherwise they will be auto-detected.
APP_NAME: "my-app"
TAG: ""

on:
# https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows
push:
# Edit to the branch(es) you want to build and deploy on each push.
branches: [ main ]

jobs:
openshift-ci-cd:
name: Build and deploy to OpenShift
runs-on: ubuntu-20.04

steps:
- uses: actions/checkout@v2

- name: Determine app name
if: env.APP_NAME == ''
run: |
echo "APP_NAME=$(basename $PWD)" | tee -a $GITHUB_ENV

- name: Determine tag
if: env.TAG == ''
run: |
echo "TAG=${GITHUB_SHA::7}" | tee -a $GITHUB_ENV

# https://github.com/redhat-actions/buildah-build#readme
- name: Build from Dockerfile
uses: redhat-actions/buildah-build@v1
with:
image: ${{ env.APP_NAME }}
tag: ${{ env.TAG }}
# If you don't have a dockerfile, see:
# https://github.com/redhat-actions/buildah-build#building-from-scratch
# Otherwise, point this to your Dockerfile relative to the repository root.
dockerfiles: |
./Dockerfile

# https://github.com/redhat-actions/push-to-registry#readme
- name: Push to registry
id: push-to-registry
uses: redhat-actions/push-to-registry@v1
with:
image: ${{ env.APP_NAME }}
tag: ${{ env.TAG }}
registry: ${{ env.REGISTRY }}
username: ${{ env.REGISTRY_USER }}
password: ${{ env.REGISTRY_PASSWORD }}

# The path the image was pushed to is now stored in ${{ steps.push-to-registry.outputs.registry-path }}

- name: Connect to the VPN
run: |
sudo apt-get install -y openconnect network-manager-openconnect
echo '${{ secrets.VPN_PASSWORD }}' | sudo openconnect --passwd-on-stdin --no-xmlpost --non-inter --background --authgroup 01-Employees --user ${{ secrets.VPN_USER }} vpn.maastrichtuniversity.nl
sleep 10

# oc-login works on all platforms, but oc must be installed first.
# The GitHub Ubuntu runner already includes oc.
# https://github.com/redhat-actions/oc-login#readme
- name: Log in to OpenShift
uses: redhat-actions/oc-login@v1
with:
openshift_server_url: ${{ env.OPENSHIFT_SERVER }}
openshift_token: ${{ env.OPENSHIFT_TOKEN }}
insecure_skip_tls_verify: true
namespace: ${{ env.OPENSHIFT_NAMESPACE }}

# This step should create a deployment, service, and route to run your app and expose it to the internet.
# Feel free to replace this with 'oc apply', 'helm install', or however you like to deploy your app.
- name: Create and expose app
run: |
export IMAGE="${{ steps.push-to-registry.outputs.registry-path }}"
export PORT=${{ env.APP_PORT }}

export SELECTOR="app=${{ env.APP_NAME }}"
echo "SELECTOR=$SELECTOR" >> $GITHUB_ENV

set -x
# Take down any old deployment
oc delete all --selector="$SELECTOR"
oc new-app --name $APP_NAME --docker-image="$IMAGE"

# Make sure the app port is exposed
oc patch svc $APP_NAME -p "{ \"spec\": { \"ports\": [{ \"name\": \"$PORT-tcp\", \"port\": $PORT }] } }"
oc expose service $APP_NAME --port=$PORT

oc get all --selector="$SELECTOR"
set +x

export ROUTE="$(oc get route $APP_NAME -o jsonpath='{.spec.host}')"
echo "$APP_NAME is exposed at $ROUTE"
echo "ROUTE=$ROUTE" >> $GITHUB_ENV

- name: View application route
run: |
[[ -n ${{ env.ROUTE }} ]] || (echo "Determining application route failed in previous step"; exit 1)
echo "======================== Your application is available at: ========================"
echo ${{ env.ROUTE }}
echo "==================================================================================="
echo
echo "Your app can be taken down with: \"oc delete all --selector='${{ env.SELECTOR }}'\""

See also

+ \ No newline at end of file diff --git a/docs/workflows-introduction/index.html b/docs/workflows-introduction/index.html index eb2023daf..71d544fcd 100644 --- a/docs/workflows-introduction/index.html +++ b/docs/workflows-introduction/index.html @@ -16,13 +16,13 @@ - +
-

Introduction to workflows

Work in progress

Running workflows on the DSRI is a work in progress. It usually requires some knowledge about how to orchestrate containers.

Introduction

Multiple technologies are available to run workflows on OpenShift/Kubernetes clusters. Each has its strengths and weaknesses in different areas.

Use-case dependant

The technology to use needs to be chosen depending on your use-case.

Current solutions on the DSRI

Those solutions can easily be deployed on the DSRI. Let

GitHub Actions workflows

GitHub Actions allows you to define automatically containerized workflows through a simple YAML file hosted in your GitHub repository.

See the page about GitHub Actions runners for more details, and to deploy runners on the DSRI.

Apache Airflow

Airflow is a platform to programmatically author, schedule and monitor workflows, aka. DAGs (directed acyclic graphs).

See the page about Airflow for more details, and to deploy Airflow on the DSRI.

Argo

Argo is a container native workflow engine for Kubernetes supporting both DAG and step based workflows.

  • Workflows easy to define using Kubernetes-like YAML files.
  • Easy to define if your workflow is composed of Docker containers to run with arguments.
Contact us

Contact us if you want to run Argo workflow on the DSRI

More options

Let us know if you are interested in deploying, and using, any of those workflows on the DSRI.

Kubeflow

Optimized for Tensorflow workflows on Kubernetes.

Pipelines written in Python.

Apache Airflow

Define, schedule and run workflows.

Can be deployed with OpenDataHub, see also this deployment for OpenShift.

See also: Airflow on Kubernetes blog, and Kubernetes in Airflow documentation.

Volcano

Run batch pipelines on Kubernetes with Volcano.

  • More a scheduler than a workflow engine.

  • Volcano can be used to run Spark, Kubeflow or KubeGene workflows.

Nextflow

Nextflow has been developed by the genomic research scientific community and is built to run bioinformatics pipeline.

Define your workflow in a Bash script fashion, providing input, output and the command to run. Without the need to create and use Docker container for Conda pipelines.

CWL

  • Developed by the genomic research scientific community.
  • Good support for provenance description (export as RDF).
  • Support on OpenShift still in development
  • Propose a GUI to build the workflows: Rabix Composer

KubeGene

KubeGene is a turn-key genome sequencing workflow management framework.

See the Workflow example, and how to define a tool.

Seldon

Open-source platform for rapidly deploying machine learning models on Kubernetes. Manage, serve and scale models built in any framework on Kubernetes.

Contact us

Feel free to contact us if you have any questions about running workflows on DSRI or to request the support of a new technology.

- +

Introduction to workflows

Work in progress

Running workflows on the DSRI is a work in progress. It usually requires some knowledge about how to orchestrate containers.

Introduction

Multiple technologies are available to run workflows on OpenShift/Kubernetes clusters. Each has its strengths and weaknesses in different areas.

Use-case dependant

The technology to use needs to be chosen depending on your use-case.

Current solutions on the DSRI

Those solutions can easily be deployed on the DSRI. Let

GitHub Actions workflows

GitHub Actions allows you to define automatically containerized workflows through a simple YAML file hosted in your GitHub repository.

See the page about GitHub Actions runners for more details, and to deploy runners on the DSRI.

Apache Airflow

Airflow is a platform to programmatically author, schedule and monitor workflows, aka. DAGs (directed acyclic graphs).

See the page about Airflow for more details, and to deploy Airflow on the DSRI.

Argo

Argo is a container native workflow engine for Kubernetes supporting both DAG and step based workflows.

  • Workflows easy to define using Kubernetes-like YAML files.
  • Easy to define if your workflow is composed of Docker containers to run with arguments.
Contact us

Contact us if you want to run Argo workflow on the DSRI

More options

Let us know if you are interested in deploying, and using, any of those workflows on the DSRI.

Kubeflow

Optimized for Tensorflow workflows on Kubernetes.

Pipelines written in Python.

Apache Airflow

Define, schedule and run workflows.

Can be deployed with OpenDataHub, see also this deployment for OpenShift.

See also: Airflow on Kubernetes blog, and Kubernetes in Airflow documentation.

Volcano

Run batch pipelines on Kubernetes with Volcano.

  • More a scheduler than a workflow engine.

  • Volcano can be used to run Spark, Kubeflow or KubeGene workflows.

Nextflow

Nextflow has been developed by the genomic research scientific community and is built to run bioinformatics pipeline.

Define your workflow in a Bash script fashion, providing input, output and the command to run. Without the need to create and use Docker container for Conda pipelines.

CWL

  • Developed by the genomic research scientific community.
  • Good support for provenance description (export as RDF).
  • Support on OpenShift still in development
  • Propose a GUI to build the workflows: Rabix Composer

KubeGene

KubeGene is a turn-key genome sequencing workflow management framework.

See the Workflow example, and how to define a tool.

Seldon

Open-source platform for rapidly deploying machine learning models on Kubernetes. Manage, serve and scale models built in any framework on Kubernetes.

Contact us

Feel free to contact us if you have any questions about running workflows on DSRI or to request the support of a new technology.

+ \ No newline at end of file diff --git a/docs/workflows-nextflow/index.html b/docs/workflows-nextflow/index.html index b3e60a36b..ea3d8d9d1 100644 --- a/docs/workflows-nextflow/index.html +++ b/docs/workflows-nextflow/index.html @@ -16,13 +16,13 @@ - +
-

Run Nextflow workflows

Nextflow enables scalable and reproducible scientific workflows using software containers. It allows the adaptation of pipelines written in the most common scripting languages.

Nextflow has been developed by the genomic research scientific community and is built to run bioinformatics pipeline.

Define your workflow in a Bash script fashion, providing input, output and the command to run. Without the need to create and use Docker container for Conda pipelines

Install Nextflow

Install the nextflow client on your computer:

wget -qO- https://get.nextflow.io | bash
Official documentation

Run workflow

Try the hello world workflow from Nextflow using an existing storage:

nextflow kuberun https://github.com/nextflow-io/hello -v pvc-mapr-projects-showcase:/data
Use Conda environments

You can easily define Conda environments and workflows with Nextflow.

- +

Run Nextflow workflows

Nextflow enables scalable and reproducible scientific workflows using software containers. It allows the adaptation of pipelines written in the most common scripting languages.

Nextflow has been developed by the genomic research scientific community and is built to run bioinformatics pipeline.

Define your workflow in a Bash script fashion, providing input, output and the command to run. Without the need to create and use Docker container for Conda pipelines

Install Nextflow

Install the nextflow client on your computer:

wget -qO- https://get.nextflow.io | bash
Official documentation

Run workflow

Try the hello world workflow from Nextflow using an existing storage:

nextflow kuberun https://github.com/nextflow-io/hello -v pvc-mapr-projects-showcase:/data
Use Conda environments

You can easily define Conda environments and workflows with Nextflow.

+ \ No newline at end of file diff --git a/gpu-booking/index.html b/gpu-booking/index.html index 53f5acd34..630d77a06 100644 --- a/gpu-booking/index.html +++ b/gpu-booking/index.html @@ -16,13 +16,13 @@ - +

Book a GPU

Once you booked a GPU, you will receive an email with more information, and the GPU will be enabled in your DSRI project for the period requested. You can book a GPU for a maximum of 7 days.

The DSRI has 7 GPUs, the number in the badge on a date indicates the number of GPUs already booked this day, and greyed out days are already fully booked.

Your UM email:

Must end with @maastrichtuniversity.nl or @student.maastrichtuniversity.nl

The DSRI project ID where to enable GPU:

The project ID should only contains alphanumeric characters and -

The ID of the app deployed on the DSRI where we will enable GPU:

Make sure this value is right as it will be used to automatically enable the GPU in this app. The app ID should only contains alphanumeric characters and -

Dec 2024
MonTueWedThuFriSatSun
Jan 2025
MonTueWedThuFriSatSun
⚠️  
✔️  GPU requested successfully, you will receive an email with more information to use the GPU on the DSRI once your booking starts.

⚠️ If you don't see any colored number on the calendar please reload the page, sometimes ReactJS fails to initialize the page

🔎 You can see a more detailed view of the GPU schedule here

❌ If you want to cancel your reservation, please send an email to DSRI-SUPPORT-L@maastrichtuniversity.nl

- + \ No newline at end of file diff --git a/help/index.html b/help/index.html index c0618263c..11a9a7828 100644 --- a/help/index.html +++ b/help/index.html @@ -16,13 +16,13 @@ - +

Need help?

If you need help or have questions about the Data Science Research Infrastructure, try one of the mechanisms above:

📝 Submit a ticket

If you are having technical issues, such as "my pod does not restart anymore", and need help from the DSRI team, submit a ticket in the ICTS Self-Service Portal.

💬 Join the DSRI Slack

If you have a broader questions for the DSRI community, such as "which kind of tools could I use to run imaging workflows on the DSRI?", ask your questions on a public Slack channel.

Contact us at dsri-support-l@maastrichtuniversity.nl to get an invitation to the DSRI Slack channel

🧹 Delete your data

You can request us to delete the data related to you in the DSRI user database, and in the DSRI cluster. Contact dsri-support-l@maastrichtuniversity.nl to request the deletion of your data.

- + \ No newline at end of file diff --git a/index.html b/index.html index 2d0a0170f..808e08fd0 100644 --- a/index.html +++ b/index.html @@ -16,13 +16,13 @@ - +

Data Science Research Infrastructure

A distributed and scalable infrastructure to run Data Science experiments at Maastricht University

Get started quickly

Get started quickly

Easily deploy popular Data Science workspaces, from the DSRI web interface in a few clicks, such as RStudio, JupyterLab or VisualStudio Code.

Run any program in a container

Run any program in a container

Most programs can be installed and run in containers easily. Customize an existing workspace image, or create a new one from scratch to deploy exactly what you need.

Share computing environments

Share computing environments

Share development environments and tips with other researchers at Maastricht University. Help improve those environments for you, and everyone in the community!

How do you deploy an application on the DSRI?

- + \ No newline at end of file diff --git a/register/index.html b/register/index.html index 8b236ddad..606b2c77e 100644 --- a/register/index.html +++ b/register/index.html @@ -16,13 +16,13 @@ - +

Register to access the DSRI

We need to know a bit about you and your project before granting you access to the Data Science Research Infrastructure at Maastricht University.

Your UM email:

Must end with @maastrichtuniversity.nl or @student.maastrichtuniversity.nl

Your first and last name:

Your employee, or student ID, at Maastricht University:

Your affiliation:

The type of project you will use the DSRI for:

A brief description of your project:

A short identifier slug for your project:

Provide a link to your code repository if possible (e.g. GitHub, GitLab):

Expected number of collaborators, if you organise a workshop with multiple users:

Until when do you expect to use the DSRI?

How did you hear about us?

The DSRI infrastructure is accessible for UM researchers and students. Although DSRI has all relevant security and privacy measures in place to ensure a stable system, we want to emphasize it is the responsibility of the user which data they are using to perform their work on DSRI.
GDPR compliance is the responsibility of the researcher. In case there are any questions, you can visit this website (https://library.maastrichtuniversity.nl/research-support/rdm/guide/#personal-data-privacy-security) or contact your faculty information manager.

If your project uses sensitive data, provide the AVG number for GDPR compliance:

✔️  User registered successfully, after manual review you will receive an email with more information on how to access and use the DSRI.
⚠️  
- + \ No newline at end of file diff --git a/search-index.json b/search-index.json index 3b3e93366..a28bb5e69 100644 --- a/search-index.json +++ b/search-index.json @@ -1 +1 @@ -[{"documents":[{"i":1,"t":"Blog Title","u":"/blog/2016/03/11/blog-post","b":[]},{"i":3,"t":"New Blog Post","u":"/blog/2017/04/10/blog-post-two","b":[]},{"i":5,"t":"Adding RSS Support - RSS Truncation Test","u":"/blog/2017/09/25/testing-rss","b":[]},{"i":7,"t":"Adding RSS Support","u":"/blog/2017/09/26/adding-rss","b":[]},{"i":9,"t":"New Version 1.0.0","u":"/blog/2017/10/24/new-version-1.0.0","b":[]},{"i":11,"t":"","u":"/blog/archive","b":[]},{"i":12,"t":"Access the DSRI","u":"/docs/access-dsri","b":["Documentation","Get started"]},{"i":26,"t":"Introduction","u":"/docs/","b":["Documentation","Get started"]},{"i":42,"t":"Data streaming","u":"/docs/catalog-data-streaming","b":["Documentation"]},{"i":46,"t":"Imaging softwares","u":"/docs/catalog-imaging","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":50,"t":"Genomics","u":"/docs/catalog-genomics","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":54,"t":"OpenDataHub","u":"/docs/catalog-opendatahub","b":["Documentation"]},{"i":62,"t":"Access UM servers","u":"/docs/access-um-servers","b":["Documentation","Guides"]},{"i":66,"t":"Utilities","u":"/docs/catalog-utilities","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":77,"t":"Checkpointing Machine Learning Training","u":"/docs/checkpointing-ml-training","b":["Documentation","Guides"]},{"i":91,"t":"","u":"/docs/contribute","b":["Documentation","Guides"]},{"i":108,"t":"Deploy Dask Cluster","u":"/docs/dask-cluster","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":116,"t":"Parallelization using Dask","u":"/docs/dask-tutorial","b":["Documentation","Guides"]},{"i":132,"t":"Deploy from a Docker image","u":"/docs/deploy-from-docker","b":["Documentation","Deploy applications"]},{"i":146,"t":"deploy-gitlab-runner","u":"/docs/deploy-gitlab-runner","b":["Documentation"]},{"i":148,"t":"Databases","u":"/docs/deploy-database","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":168,"t":"Jupyter Notebooks","u":"/docs/deploy-jupyter","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":178,"t":"Matlab","u":"/docs/deploy-matlab","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":190,"t":"GPU applications","u":"/docs/deploy-on-gpu","b":["Documentation","Deploy applications"]},{"i":208,"t":"RStudio","u":"/docs/deploy-rstudio","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":218,"t":"JupyterHub","u":"/docs/deploy-jupyterhub","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":289,"t":"VisualStudio Code","u":"/docs/deploy-vscode","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":297,"t":"Enabling VPN access in WSL2","u":"/docs/enabling-vpn-wsl","b":["Documentation","Miscellaneous"]},{"i":301,"t":"Spark cluster","u":"/docs/deploy-spark","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":319,"t":"Glossary","u":"/docs/glossary","b":["Documentation","Guides"]},{"i":328,"t":"Anatomy of a DSRI application","u":"/docs/anatomy-of-an-application","b":["Documentation","Deploy applications"]},{"i":364,"t":"Deploy from a Dockerfile","u":"/docs/guide-dockerfile-to-openshift","b":["Documentation","Deploy applications"]},{"i":382,"t":"Install local OpenShift","u":"/docs/guide-local-install","b":["Documentation"]},{"i":416,"t":"Publish a Docker image","u":"/docs/guide-publish-image","b":["Documentation","Guides"]},{"i":435,"t":"Monitor your applications","u":"/docs/guide-monitoring","b":["Documentation","Get started"]},{"i":441,"t":"Install UM VPN","u":"/docs/guide-vpn","b":["Documentation","Guides"]},{"i":447,"t":"Install from Helm charts","u":"/docs/helm","b":["Documentation","Deploy applications"]},{"i":464,"t":"Increase your processes speed","u":"/docs/increase-process-speed","b":["Documentation","Miscellaneous"]},{"i":472,"t":"Prepare a workshop","u":"/docs/guide-workshop","b":["Documentation","Guides"]},{"i":484,"t":"Known Issues","u":"/docs/guide-known-issues","b":["Documentation","Guides"]},{"i":498,"t":"JupyterHub with Spark","u":"/docs/jupyterhub-spark","b":["Documentation"]},{"i":514,"t":"Run MPI jobs","u":"/docs/mpi-jobs","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":518,"t":"JupyterHub workspace","u":"/docs/jupyterhub-workspace","b":["Documentation"]},{"i":526,"t":"Neuroscience research","u":"/docs/neuroscience","b":["Documentation","Deploy applications","Data Science catalog"]},{"i":538,"t":"Command Line Interface","u":"/docs/openshift-commands","b":["Documentation","Guides"]},{"i":568,"t":"Install the client","u":"/docs/openshift-install","b":["Documentation","Get started"]},{"i":579,"t":"Delete an application","u":"/docs/openshift-delete-services","b":["Documentation","Get started"]},{"i":585,"t":"Login to Docker registries","u":"/docs/login-docker-registry","b":["Documentation","Guides"]},{"i":603,"t":"Upload data","u":"/docs/openshift-load-data","b":["Documentation","Get started"]},{"i":624,"t":"Prepare your project","u":"/docs/prepare-project-for-dsri","b":["Documentation","Get started"]},{"i":636,"t":"Install from Operators","u":"/docs/operators","b":["Documentation","Deploy applications"]},{"i":644,"t":"Data storage","u":"/docs/openshift-storage","b":["Documentation","Guides"]},{"i":656,"t":"Delete objects (advanced)","u":"/docs/openshift-delete-objects","b":["Documentation","Guides"]},{"i":671,"t":"Create a new Project","u":"/docs/project-management","b":["Documentation","Guides"]},{"i":683,"t":"PyTorch Profiling","u":"/docs/profile-pytorch-code","b":["Documentation","Miscellaneous"]},{"i":695,"t":"Tensorflow Optimization","u":"/docs/speeding-tensorflow-dl","b":["Documentation","Miscellaneous"]},{"i":715,"t":"Working with sensible data","u":"/docs/sensible-data","b":["Documentation"]},{"i":721,"t":"SURF Offerings","u":"/docs/surf-offerings","b":["Documentation","Miscellaneous"]},{"i":734,"t":"Start your workspace","u":"/docs/start-workspace","b":["Documentation","Get started"]},{"i":753,"t":"Deploy Airflow","u":"/docs/workflows-airflow","b":["Documentation","Guides","Workflows"]},{"i":764,"t":"Run Argo workflows","u":"/docs/workflows-argo","b":["Documentation","Guides","Workflows"]},{"i":800,"t":"Run CWL workflows","u":"/docs/workflows-cwl","b":["Documentation","Guides","Workflows"]},{"i":808,"t":"Libraries for Machine Learning","u":"/docs/tools-machine-learning","b":["Documentation","Guides"]},{"i":828,"t":"Introduction to workflows","u":"/docs/workflows-introduction","b":["Documentation","Guides","Workflows"]},{"i":856,"t":"Run Nextflow workflows","u":"/docs/workflows-nextflow","b":["Documentation","Guides","Workflows"]},{"i":862,"t":"Deploy GitHub Runners","u":"/docs/workflows-github-actions","b":["Documentation","Guides","Workflows"]}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/1",[0,3.475,1,4.015]],["t/3",[0,2.926,2,2.627,3,3.381]],["t/5",[4,1.985,5,3.115,6,1.985,7,2.293,8,2.293]],["t/7",[4,2.926,5,2.926,6,2.926]],["t/9",[2,2.627,9,3.381,10,3.381]],["t/11",[]],["t/12",[11,3.12,12,3.475]],["t/26",[13,4.279]],["t/42",[14,2.854,15,4.015]],["t/46",[16,3.12,17,4.015]],["t/50",[18,4.943]],["t/54",[19,4.943]],["t/62",[11,2.627,20,2.926,21,3.381]],["t/66",[22,4.943]],["t/77",[23,2.919,24,2.527,25,2.527,26,2.919]],["t/91",[]],["t/108",[27,2.076,28,2.926,29,2.926]],["t/116",[28,2.926,30,3.381,31,3.381]],["t/132",[16,2.627,27,2.076,32,2.627]],["t/146",[27,2.076,33,3.381,34,2.926]],["t/148",[35,4.943]],["t/168",[36,4.015,37,4.015]],["t/178",[38,4.943]],["t/190",[39,4.015,40,2.854]],["t/208",[41,4.943]],["t/218",[42,3.841]],["t/289",[43,4.015,44,4.015]],["t/297",[11,2.268,45,2.919,46,2.527,47,2.919]],["t/301",[29,3.475,48,3.475]],["t/319",[49,4.943]],["t/328",[12,2.926,40,2.403,50,3.381]],["t/364",[27,2.466,51,4.015]],["t/382",[52,2.225,53,3.381,54,3.381]],["t/416",[16,2.627,32,2.627,55,3.381]],["t/435",[40,2.854,56,4.015]],["t/441",[20,2.926,46,2.926,52,2.225]],["t/447",[52,2.225,57,3.381,58,3.381]],["t/464",[59,3.381,60,3.381,61,3.381]],["t/472",[62,3.475,63,4.015]],["t/484",[64,4.015,65,4.015]],["t/498",[42,3.12,48,3.475]],["t/514",[66,2.403,67,3.381,68,3.381]],["t/518",[42,3.12,69,3.475]],["t/526",[70,4.015,71,4.015]],["t/538",[72,3.381,73,3.381,74,3.381]],["t/568",[52,2.642,75,4.015]],["t/579",[40,2.854,76,3.475]],["t/585",[32,2.627,77,3.381,78,3.381]],["t/603",[14,2.854,79,4.015]],["t/624",[62,3.475,80,3.475]],["t/636",[52,2.642,81,4.015]],["t/644",[14,2.854,82,4.015]],["t/656",[76,2.926,83,3.381,84,3.381]],["t/671",[2,2.627,80,2.926,85,3.381]],["t/683",[86,4.015,87,4.015]],["t/695",[88,4.015,89,4.015]],["t/715",[14,2.403,90,3.381,91,3.381]],["t/721",[92,4.015,93,4.015]],["t/734",[69,3.475,94,4.015]],["t/753",[27,2.466,95,4.015]],["t/764",[66,2.403,96,3.381,97,2.403]],["t/800",[66,2.403,97,2.403,98,3.381]],["t/808",[24,2.926,25,2.926,99,3.381]],["t/828",[13,3.475,97,2.854]],["t/856",[66,2.403,97,2.403,100,3.381]],["t/862",[27,2.076,34,2.926,101,3.381]]],"invertedIndex":[["1.0.0",{"_index":10,"t":{"9":{"position":[[12,5]]}}}],["access",{"_index":11,"t":{"12":{"position":[[0,6]]},"62":{"position":[[0,6]]},"297":{"position":[[13,6]]}}}],["ad",{"_index":4,"t":{"5":{"position":[[0,6]]},"7":{"position":[[0,6]]}}}],["advanc",{"_index":84,"t":{"656":{"position":[[15,10]]}}}],["airflow",{"_index":95,"t":{"753":{"position":[[7,7]]}}}],["anatomi",{"_index":50,"t":{"328":{"position":[[0,7]]}}}],["applic",{"_index":40,"t":{"190":{"position":[[4,12]]},"328":{"position":[[18,11]]},"435":{"position":[[13,12]]},"579":{"position":[[10,11]]}}}],["argo",{"_index":96,"t":{"764":{"position":[[4,4]]}}}],["blog",{"_index":0,"t":{"1":{"position":[[0,4]]},"3":{"position":[[4,4]]}}}],["chart",{"_index":58,"t":{"447":{"position":[[18,6]]}}}],["checkpoint",{"_index":23,"t":{"77":{"position":[[0,13]]}}}],["client",{"_index":75,"t":{"568":{"position":[[12,6]]}}}],["cluster",{"_index":29,"t":{"108":{"position":[[12,7]]},"301":{"position":[[6,7]]}}}],["code",{"_index":44,"t":{"289":{"position":[[13,4]]}}}],["command",{"_index":72,"t":{"538":{"position":[[0,7]]}}}],["creat",{"_index":85,"t":{"671":{"position":[[0,6]]}}}],["cwl",{"_index":98,"t":{"800":{"position":[[4,3]]}}}],["dask",{"_index":28,"t":{"108":{"position":[[7,4]]},"116":{"position":[[22,4]]}}}],["data",{"_index":14,"t":{"42":{"position":[[0,4]]},"603":{"position":[[7,4]]},"644":{"position":[[0,4]]},"715":{"position":[[22,4]]}}}],["databas",{"_index":35,"t":{"148":{"position":[[0,9]]}}}],["delet",{"_index":76,"t":{"579":{"position":[[0,6]]},"656":{"position":[[0,6]]}}}],["deploy",{"_index":27,"t":{"108":{"position":[[0,6]]},"132":{"position":[[0,6]]},"146":{"position":[[0,6]]},"364":{"position":[[0,6]]},"753":{"position":[[0,6]]},"862":{"position":[[0,6]]}}}],["docker",{"_index":32,"t":{"132":{"position":[[14,6]]},"416":{"position":[[10,6]]},"585":{"position":[[9,6]]}}}],["dockerfil",{"_index":51,"t":{"364":{"position":[[14,10]]}}}],["dsri",{"_index":12,"t":{"12":{"position":[[11,4]]},"328":{"position":[[13,4]]}}}],["enabl",{"_index":45,"t":{"297":{"position":[[0,8]]}}}],["genom",{"_index":18,"t":{"50":{"position":[[0,8]]}}}],["github",{"_index":101,"t":{"862":{"position":[[7,6]]}}}],["gitlab",{"_index":33,"t":{"146":{"position":[[7,6]]}}}],["glossari",{"_index":49,"t":{"319":{"position":[[0,8]]}}}],["gpu",{"_index":39,"t":{"190":{"position":[[0,3]]}}}],["helm",{"_index":57,"t":{"447":{"position":[[13,4]]}}}],["imag",{"_index":16,"t":{"46":{"position":[[0,7]]},"132":{"position":[[21,5]]},"416":{"position":[[17,5]]}}}],["increas",{"_index":59,"t":{"464":{"position":[[0,8]]}}}],["instal",{"_index":52,"t":{"382":{"position":[[0,7]]},"441":{"position":[[0,7]]},"447":{"position":[[0,7]]},"568":{"position":[[0,7]]},"636":{"position":[[0,7]]}}}],["interfac",{"_index":74,"t":{"538":{"position":[[13,9]]}}}],["introduct",{"_index":13,"t":{"26":{"position":[[0,12]]},"828":{"position":[[0,12]]}}}],["issu",{"_index":65,"t":{"484":{"position":[[6,6]]}}}],["job",{"_index":68,"t":{"514":{"position":[[8,4]]}}}],["jupyt",{"_index":36,"t":{"168":{"position":[[0,7]]}}}],["jupyterhub",{"_index":42,"t":{"218":{"position":[[0,10]]},"498":{"position":[[0,10]]},"518":{"position":[[0,10]]}}}],["known",{"_index":64,"t":{"484":{"position":[[0,5]]}}}],["learn",{"_index":25,"t":{"77":{"position":[[22,8]]},"808":{"position":[[22,8]]}}}],["librari",{"_index":99,"t":{"808":{"position":[[0,9]]}}}],["line",{"_index":73,"t":{"538":{"position":[[8,4]]}}}],["local",{"_index":53,"t":{"382":{"position":[[8,5]]}}}],["login",{"_index":77,"t":{"585":{"position":[[0,5]]}}}],["machin",{"_index":24,"t":{"77":{"position":[[14,7]]},"808":{"position":[[14,7]]}}}],["matlab",{"_index":38,"t":{"178":{"position":[[0,6]]}}}],["monitor",{"_index":56,"t":{"435":{"position":[[0,7]]}}}],["mpi",{"_index":67,"t":{"514":{"position":[[4,3]]}}}],["neurosci",{"_index":70,"t":{"526":{"position":[[0,12]]}}}],["new",{"_index":2,"t":{"3":{"position":[[0,3]]},"9":{"position":[[0,3]]},"671":{"position":[[9,3]]}}}],["nextflow",{"_index":100,"t":{"856":{"position":[[4,8]]}}}],["notebook",{"_index":37,"t":{"168":{"position":[[8,9]]}}}],["object",{"_index":83,"t":{"656":{"position":[[7,7]]}}}],["offer",{"_index":93,"t":{"721":{"position":[[5,9]]}}}],["opendatahub",{"_index":19,"t":{"54":{"position":[[0,11]]}}}],["openshift",{"_index":54,"t":{"382":{"position":[[14,9]]}}}],["oper",{"_index":81,"t":{"636":{"position":[[13,9]]}}}],["optim",{"_index":89,"t":{"695":{"position":[[11,12]]}}}],["parallel",{"_index":30,"t":{"116":{"position":[[0,15]]}}}],["post",{"_index":3,"t":{"3":{"position":[[9,4]]}}}],["prepar",{"_index":62,"t":{"472":{"position":[[0,7]]},"624":{"position":[[0,7]]}}}],["process",{"_index":60,"t":{"464":{"position":[[14,9]]}}}],["profil",{"_index":87,"t":{"683":{"position":[[8,9]]}}}],["project",{"_index":80,"t":{"624":{"position":[[13,7]]},"671":{"position":[[13,7]]}}}],["publish",{"_index":55,"t":{"416":{"position":[[0,7]]}}}],["pytorch",{"_index":86,"t":{"683":{"position":[[0,7]]}}}],["registri",{"_index":78,"t":{"585":{"position":[[16,10]]}}}],["research",{"_index":71,"t":{"526":{"position":[[13,8]]}}}],["rss",{"_index":5,"t":{"5":{"position":[[7,3],[21,3]]},"7":{"position":[[7,3]]}}}],["rstudio",{"_index":41,"t":{"208":{"position":[[0,7]]}}}],["run",{"_index":66,"t":{"514":{"position":[[0,3]]},"764":{"position":[[0,3]]},"800":{"position":[[0,3]]},"856":{"position":[[0,3]]}}}],["runner",{"_index":34,"t":{"146":{"position":[[14,6]]},"862":{"position":[[14,7]]}}}],["sensibl",{"_index":91,"t":{"715":{"position":[[13,8]]}}}],["server",{"_index":21,"t":{"62":{"position":[[10,7]]}}}],["softwar",{"_index":17,"t":{"46":{"position":[[8,9]]}}}],["spark",{"_index":48,"t":{"301":{"position":[[0,5]]},"498":{"position":[[16,5]]}}}],["speed",{"_index":61,"t":{"464":{"position":[[24,5]]}}}],["start",{"_index":94,"t":{"734":{"position":[[0,5]]}}}],["storag",{"_index":82,"t":{"644":{"position":[[5,7]]}}}],["stream",{"_index":15,"t":{"42":{"position":[[5,9]]}}}],["support",{"_index":6,"t":{"5":{"position":[[11,7]]},"7":{"position":[[11,7]]}}}],["surf",{"_index":92,"t":{"721":{"position":[[0,4]]}}}],["tensorflow",{"_index":88,"t":{"695":{"position":[[0,10]]}}}],["test",{"_index":8,"t":{"5":{"position":[[36,4]]}}}],["titl",{"_index":1,"t":{"1":{"position":[[5,5]]}}}],["train",{"_index":26,"t":{"77":{"position":[[31,8]]}}}],["truncat",{"_index":7,"t":{"5":{"position":[[25,10]]}}}],["um",{"_index":20,"t":{"62":{"position":[[7,2]]},"441":{"position":[[8,2]]}}}],["upload",{"_index":79,"t":{"603":{"position":[[0,6]]}}}],["us",{"_index":31,"t":{"116":{"position":[[16,5]]}}}],["util",{"_index":22,"t":{"66":{"position":[[0,9]]}}}],["version",{"_index":9,"t":{"9":{"position":[[4,7]]}}}],["visualstudio",{"_index":43,"t":{"289":{"position":[[0,12]]}}}],["vpn",{"_index":46,"t":{"297":{"position":[[9,3]]},"441":{"position":[[11,3]]}}}],["work",{"_index":90,"t":{"715":{"position":[[0,7]]}}}],["workflow",{"_index":97,"t":{"764":{"position":[[9,9]]},"800":{"position":[[8,9]]},"828":{"position":[[16,9]]},"856":{"position":[[13,9]]}}}],["workshop",{"_index":63,"t":{"472":{"position":[[10,8]]}}}],["workspac",{"_index":69,"t":{"518":{"position":[[11,9]]},"734":{"position":[[11,9]]}}}],["wsl2",{"_index":47,"t":{"297":{"position":[[23,4]]}}}]],"pipeline":["stemmer"]}},{"documents":[{"i":14,"t":"Request an account","u":"/docs/access-dsri","h":"#request-an-account","p":12},{"i":16,"t":"Connect to the UM network","u":"/docs/access-dsri","h":"#connect-to-the-um-network","p":12},{"i":18,"t":"Access the web UI","u":"/docs/access-dsri","h":"#access-the-web-ui","p":12},{"i":20,"t":"Access your project","u":"/docs/access-dsri","h":"#access-your-project","p":12},{"i":22,"t":"About the web UI","u":"/docs/access-dsri","h":"#about-the-web-ui","p":12},{"i":24,"t":"Accessing the Developer perspective","u":"/docs/access-dsri","h":"#accessing-the-developer-perspective","p":12},{"i":28,"t":"Getting started","u":"/docs/","h":"#getting-started","p":26},{"i":29,"t":"✅ What can be done on the DSRI","u":"/docs/","h":"#-what-can-be-done-on-the-dsri","p":26},{"i":31,"t":"❌ What cannot be done","u":"/docs/","h":"#-what-cannot-be-done","p":26},{"i":33,"t":"The DSRI architecture","u":"/docs/","h":"#the-dsri-architecture","p":26},{"i":35,"t":"The DSRI specifications","u":"/docs/","h":"#the-dsri-specifications","p":26},{"i":36,"t":"Software","u":"/docs/","h":"#software","p":26},{"i":38,"t":"Hardware","u":"/docs/","h":"#hardware","p":26},{"i":40,"t":"Learn more about DSRI","u":"/docs/","h":"#learn-more-about-dsri","p":26},{"i":44,"t":"Apache Flink","u":"/docs/catalog-data-streaming","h":"#apache-flink","p":42},{"i":48,"t":"CellProfiler","u":"/docs/catalog-imaging","h":"#cellprofiler","p":46},{"i":52,"t":"Trinity RNA Seq","u":"/docs/catalog-genomics","h":"#trinity-rna-seq","p":50},{"i":56,"t":"Components available on DSRI","u":"/docs/catalog-opendatahub","h":"#components-available-on-dsri","p":54},{"i":58,"t":"Start Spark with JupyterHub","u":"/docs/catalog-opendatahub","h":"#start-spark-with-jupyterhub","p":54},{"i":60,"t":"All components","u":"/docs/catalog-opendatahub","h":"#all-components","p":54},{"i":64,"t":"Request access to internal UM servers","u":"/docs/access-um-servers","h":"#request-access-to-internal-um-servers","p":62},{"i":68,"t":"Ubuntu","u":"/docs/catalog-utilities","h":"#ubuntu","p":66},{"i":69,"t":"With the terminal","u":"/docs/catalog-utilities","h":"#with-the-terminal","p":66},{"i":71,"t":"With a web UI","u":"/docs/catalog-utilities","h":"#with-a-web-ui","p":66},{"i":73,"t":"File browser","u":"/docs/catalog-utilities","h":"#file-browser","p":66},{"i":75,"t":"Creating or Connecting an Existing Persistent Storage","u":"/docs/catalog-utilities","h":"#creating-or-connecting-an-existing-persistent-storage","p":66},{"i":79,"t":"What is Checkpointing?","u":"/docs/checkpointing-ml-training","h":"#what-is-checkpointing","p":77},{"i":81,"t":"Checkpointing fequency?","u":"/docs/checkpointing-ml-training","h":"#checkpointing-fequency","p":77},{"i":83,"t":"Support for Checkpointing in Tensorflow/Keras and PyTorch ?","u":"/docs/checkpointing-ml-training","h":"#support-for-checkpointing-in-tensorflowkeras-and-pytorch-","p":77},{"i":85,"t":"Example of Tensorflow/Keras based checkpointing:","u":"/docs/checkpointing-ml-training","h":"#example-of-tensorflowkeras-based-checkpointing","p":77},{"i":87,"t":"Example of PyTorch based checkpointing:","u":"/docs/checkpointing-ml-training","h":"#example-of-pytorch-based-checkpointing","p":77},{"i":89,"t":"External Resources","u":"/docs/checkpointing-ml-training","h":"#external-resources","p":77},{"i":92,"t":"Contribute","u":"/docs/contribute","h":"","p":91},{"i":94,"t":"⚡ Quick edit on GitHub","u":"/docs/contribute","h":"#-quick-edit-on-github","p":91},{"i":96,"t":"🏗️ Larger changes locally","u":"/docs/contribute","h":"#️-larger-changes-locally","p":91},{"i":98,"t":"🔄 Automated deployment","u":"/docs/contribute","h":"#-automated-deployment","p":91},{"i":100,"t":"📝 Help","u":"/docs/contribute","h":"#-help","p":91},{"i":102,"t":"🔎 Files locations","u":"/docs/contribute","h":"#-files-locations","p":91},{"i":104,"t":"🦄 Markdown tip","u":"/docs/contribute","h":"#-markdown-tip","p":91},{"i":106,"t":"✔️ Pull Request process","u":"/docs/contribute","h":"#️-pull-request-process","p":91},{"i":110,"t":"🧊 Installation with Helm","u":"/docs/dask-cluster","h":"#-installation-with-helm","p":108},{"i":112,"t":"🪐 Configure a Route for the Cluster","u":"/docs/dask-cluster","h":"#-configure-a-route-for-the-cluster","p":108},{"i":114,"t":"🪐 Access the Jupyter Password/Token","u":"/docs/dask-cluster","h":"#-access-the-jupyter-passwordtoken","p":108},{"i":118,"t":"🧊 Installation","u":"/docs/dask-tutorial","h":"#-installation","p":116},{"i":120,"t":"🪐 Basic Concepts of Dask","u":"/docs/dask-tutorial","h":"#-basic-concepts-of-dask","p":116},{"i":122,"t":"✨ Selecting columns and element-wise operations","u":"/docs/dask-tutorial","h":"#-selecting-columns-and-element-wise-operations","p":116},{"i":124,"t":"⚡️ Conditional filtering","u":"/docs/dask-tutorial","h":"#️-conditional-filtering","p":116},{"i":126,"t":"✨ Common summary statistics","u":"/docs/dask-tutorial","h":"#-common-summary-statistics","p":116},{"i":128,"t":"✨ Groupby","u":"/docs/dask-tutorial","h":"#-groupby","p":116},{"i":130,"t":"⚡️ Lazy evaluation","u":"/docs/dask-tutorial","h":"#️-lazy-evaluation","p":116},{"i":134,"t":"Find an image for your service","u":"/docs/deploy-from-docker","h":"#find-an-image-for-your-service","p":132},{"i":136,"t":"Deploy the image on DSRI","u":"/docs/deploy-from-docker","h":"#deploy-the-image-on-dsri","p":132},{"i":138,"t":"Build and push a new Docker image","u":"/docs/deploy-from-docker","h":"#build-and-push-a-new-docker-image","p":132},{"i":140,"t":"Define a Dockerfile","u":"/docs/deploy-from-docker","h":"#define-a-dockerfile","p":132},{"i":142,"t":"Build the image","u":"/docs/deploy-from-docker","h":"#build-the-image","p":132},{"i":144,"t":"Push to DockerHub","u":"/docs/deploy-from-docker","h":"#push-to-dockerhub","p":132},{"i":150,"t":"SQL databases","u":"/docs/deploy-database","h":"#sql-databases","p":148},{"i":152,"t":"Start PostgreSQL 🐘","u":"/docs/deploy-database","h":"#start-postgresql-","p":148},{"i":154,"t":"Start MySQL 🐬","u":"/docs/deploy-database","h":"#start-mysql-","p":148},{"i":156,"t":"NoSQL databases","u":"/docs/deploy-database","h":"#nosql-databases","p":148},{"i":157,"t":"MongoDB 🌿","u":"/docs/deploy-database","h":"#mongodb-","p":148},{"i":159,"t":"Redis 🎲","u":"/docs/deploy-database","h":"#redis-","p":148},{"i":161,"t":"Graph databases","u":"/docs/deploy-database","h":"#graph-databases","p":148},{"i":162,"t":"OpenLink Virtuoso triplestore","u":"/docs/deploy-database","h":"#openlink-virtuoso-triplestore","p":148},{"i":164,"t":"Ontotext GraphDB triplestore","u":"/docs/deploy-database","h":"#ontotext-graphdb-triplestore","p":148},{"i":166,"t":"AllegroGraph","u":"/docs/deploy-database","h":"#allegrograph","p":148},{"i":170,"t":"🪐 Start JupyterLab","u":"/docs/deploy-jupyter","h":"#-start-jupyterlab","p":168},{"i":172,"t":"📦️ Manage dependencies with Conda","u":"/docs/deploy-jupyter","h":"#️-manage-dependencies-with-conda","p":168},{"i":174,"t":"🐙 Use git in JupyterLab","u":"/docs/deploy-jupyter","h":"#-use-git-in-jupyterlab","p":168},{"i":176,"t":"🐶 Example","u":"/docs/deploy-jupyter","h":"#-example","p":168},{"i":180,"t":"Use the official Matlab image","u":"/docs/deploy-matlab","h":"#use-the-official-matlab-image","p":178},{"i":182,"t":"Use a stable Matlab image","u":"/docs/deploy-matlab","h":"#use-a-stable-matlab-image","p":178},{"i":184,"t":"Use Matlab in Jupyter","u":"/docs/deploy-matlab","h":"#use-matlab-in-jupyter","p":178},{"i":186,"t":"Deploy Matlab on GPU","u":"/docs/deploy-matlab","h":"#deploy-matlab-on-gpu","p":178},{"i":188,"t":"Build your own Matlab image","u":"/docs/deploy-matlab","h":"#build-your-own-matlab-image","p":178},{"i":192,"t":"Prepare your GPU workspace","u":"/docs/deploy-on-gpu","h":"#prepare-your-gpu-workspace","p":190},{"i":194,"t":"About the docker images","u":"/docs/deploy-on-gpu","h":"#about-the-docker-images","p":190},{"i":196,"t":"Deploy the workspace","u":"/docs/deploy-on-gpu","h":"#deploy-the-workspace","p":190},{"i":198,"t":"Prepare the workspace","u":"/docs/deploy-on-gpu","h":"#prepare-the-workspace","p":190},{"i":200,"t":"Enable the GPU","u":"/docs/deploy-on-gpu","h":"#enable-the-gpu","p":190},{"i":202,"t":"Disable the GPU","u":"/docs/deploy-on-gpu","h":"#disable-the-gpu","p":190},{"i":204,"t":"Increase the number of GPUs","u":"/docs/deploy-on-gpu","h":"#increase-the-number-of-gpus","p":190},{"i":206,"t":"Install GPU drivers in any image","u":"/docs/deploy-on-gpu","h":"#install-gpu-drivers-in-any-image","p":190},{"i":210,"t":"Start RStudio","u":"/docs/deploy-rstudio","h":"#start-rstudio","p":208},{"i":212,"t":"Restricted RStudio with Shiny server","u":"/docs/deploy-rstudio","h":"#restricted-rstudio-with-shiny-server","p":208},{"i":214,"t":"Use Git in RStudio","u":"/docs/deploy-rstudio","h":"#use-git-in-rstudio","p":208},{"i":216,"t":"Run R jobs","u":"/docs/deploy-rstudio","h":"#run-r-jobs","p":208},{"i":220,"t":"Downloading and adjusting the config.yaml","u":"/docs/deploy-jupyterhub","h":"#downloading-and-adjusting-the-configyaml","p":218},{"i":222,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":223,"t":"Setting user's default persistent volume size","u":"/docs/deploy-jupyterhub","h":"#setting-users-default-persistent-volume-size","p":218},{"i":225,"t":"Configuring an authentication method","u":"/docs/deploy-jupyterhub","h":"#configuring-an-authentication-method","p":218},{"i":227,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":229,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":230,"t":"Deploying JupyterHub using the DSRI website 🪐","u":"/docs/deploy-jupyterhub","h":"#deploying-jupyterhub-using-the-dsri-website-","p":218},{"i":232,"t":"Installing the JupyterHub Helm Chart repository","u":"/docs/deploy-jupyterhub","h":"#installing-the-jupyterhub-helm-chart-repository","p":218},{"i":234,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":236,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":238,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":240,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":241,"t":"Installing the JupyterHub Helm Chart","u":"/docs/deploy-jupyterhub","h":"#installing-the-jupyterhub-helm-chart","p":218},{"i":243,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":245,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":247,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":249,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":251,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":253,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":255,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":256,"t":"Creating a secured route","u":"/docs/deploy-jupyterhub","h":"#creating-a-secured-route","p":218},{"i":258,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":260,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":262,"t":"Upgrading the config.yaml","u":"/docs/deploy-jupyterhub","h":"#upgrading-the-configyaml","p":218},{"i":264,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":266,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":268,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":270,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":272,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":273,"t":"Deploying JupyterHub using the Command Line Interface (CLI) 🪐","u":"/docs/deploy-jupyterhub","h":"#deploying-jupyterhub-using-the-command-line-interface-cli-","p":218},{"i":275,"t":"Installing the JupyterHub Helm Chart repository","u":"/docs/deploy-jupyterhub","h":"#installing-the-jupyterhub-helm-chart-repository-1","p":218},{"i":277,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":279,"t":"Installing the JupyterHub Helm Chart","u":"/docs/deploy-jupyterhub","h":"#installing-the-jupyterhub-helm-chart-1","p":218},{"i":281,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":283,"t":"Creating a secured route","u":"/docs/deploy-jupyterhub","h":"#creating-a-secured-route-1","p":218},{"i":285,"t":"Upgrading the config.yaml","u":"/docs/deploy-jupyterhub","h":"#upgrading-the-configyaml-1","p":218},{"i":287,"t":"","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":291,"t":"Start VisualStudio Code server","u":"/docs/deploy-vscode","h":"#start-visualstudio-code-server","p":289},{"i":293,"t":"Use Git in VSCode","u":"/docs/deploy-vscode","h":"#use-git-in-vscode","p":289},{"i":295,"t":"VSCode for GPU","u":"/docs/deploy-vscode","h":"#vscode-for-gpu","p":289},{"i":299,"t":"Follow these steps in the WSL2 environment:","u":"/docs/enabling-vpn-wsl","h":"#follow-these-steps-in-the-wsl2-environment","p":297},{"i":303,"t":"Deploy a Spark cluster","u":"/docs/deploy-spark","h":"#deploy-a-spark-cluster","p":301},{"i":305,"t":"Deploy the cluster from the catalog","u":"/docs/deploy-spark","h":"#deploy-the-cluster-from-the-catalog","p":301},{"i":307,"t":"Create a route to the Spark dashboard","u":"/docs/deploy-spark","h":"#create-a-route-to-the-spark-dashboard","p":301},{"i":309,"t":"Run on Spark","u":"/docs/deploy-spark","h":"#run-on-spark","p":301},{"i":311,"t":"Using PySpark","u":"/docs/deploy-spark","h":"#using-pyspark","p":301},{"i":313,"t":"RDF analytics with SANSA and Zeppelin notebooks","u":"/docs/deploy-spark","h":"#rdf-analytics-with-sansa-and-zeppelin-notebooks","p":301},{"i":315,"t":"Connect Spark to the persistent storage","u":"/docs/deploy-spark","h":"#connect-spark-to-the-persistent-storage","p":301},{"i":317,"t":"Delete a running Spark cluster","u":"/docs/deploy-spark","h":"#delete-a-running-spark-cluster","p":301},{"i":321,"t":"Docker","u":"/docs/glossary","h":"#docker","p":319},{"i":322,"t":"Kubernetes","u":"/docs/glossary","h":"#kubernetes","p":319},{"i":324,"t":"OpenShift","u":"/docs/glossary","h":"#openshift","p":319},{"i":326,"t":"OKD","u":"/docs/glossary","h":"#okd","p":319},{"i":330,"t":"Application walkthrough","u":"/docs/anatomy-of-an-application","h":"#application-walkthrough","p":328},{"i":332,"t":"Parameters","u":"/docs/anatomy-of-an-application","h":"#parameters","p":328},{"i":334,"t":"Image","u":"/docs/anatomy-of-an-application","h":"#image","p":328},{"i":336,"t":"Create storage","u":"/docs/anatomy-of-an-application","h":"#create-storage","p":328},{"i":338,"t":"Secret","u":"/docs/anatomy-of-an-application","h":"#secret","p":328},{"i":340,"t":"Deployment","u":"/docs/anatomy-of-an-application","h":"#deployment","p":328},{"i":342,"t":"Pod spec","u":"/docs/anatomy-of-an-application","h":"#pod-spec","p":328},{"i":344,"t":"Environment variables in the container","u":"/docs/anatomy-of-an-application","h":"#environment-variables-in-the-container","p":328},{"i":346,"t":"Mount storage","u":"/docs/anatomy-of-an-application","h":"#mount-storage","p":328},{"i":348,"t":"Security context","u":"/docs/anatomy-of-an-application","h":"#security-context","p":328},{"i":350,"t":"Service","u":"/docs/anatomy-of-an-application","h":"#service","p":328},{"i":352,"t":"Route","u":"/docs/anatomy-of-an-application","h":"#route","p":328},{"i":354,"t":"The complete application","u":"/docs/anatomy-of-an-application","h":"#the-complete-application","p":328},{"i":356,"t":"Add a configuration file","u":"/docs/anatomy-of-an-application","h":"#add-a-configuration-file","p":328},{"i":358,"t":"Add automated health checks","u":"/docs/anatomy-of-an-application","h":"#add-automated-health-checks","p":328},{"i":360,"t":"Define resource limits","u":"/docs/anatomy-of-an-application","h":"#define-resource-limits","p":328},{"i":362,"t":"Build your own application template","u":"/docs/anatomy-of-an-application","h":"#build-your-own-application-template","p":328},{"i":366,"t":"Build from local Dockerfile","u":"/docs/guide-dockerfile-to-openshift","h":"#build-from-local-dockerfile","p":364},{"i":368,"t":"Create new build configuration.","u":"/docs/guide-dockerfile-to-openshift","h":"#create-new-build-configuration","p":364},{"i":370,"t":"Build the image","u":"/docs/guide-dockerfile-to-openshift","h":"#build-the-image","p":364},{"i":372,"t":"Create your app","u":"/docs/guide-dockerfile-to-openshift","h":"#create-your-app","p":364},{"i":374,"t":"Expose app","u":"/docs/guide-dockerfile-to-openshift","h":"#expose-app","p":364},{"i":376,"t":"Delete the created build","u":"/docs/guide-dockerfile-to-openshift","h":"#delete-the-created-build","p":364},{"i":378,"t":"Deploy from a local docker image","u":"/docs/guide-dockerfile-to-openshift","h":"#deploy-from-a-local-docker-image","p":364},{"i":380,"t":"Deploy from a Git repository","u":"/docs/guide-dockerfile-to-openshift","h":"#deploy-from-a-git-repository","p":364},{"i":384,"t":"Install MiniShift","u":"/docs/guide-local-install","h":"#install-minishift","p":382},{"i":386,"t":"Start MiniShift","u":"/docs/guide-local-install","h":"#start-minishift","p":382},{"i":388,"t":"Login","u":"/docs/guide-local-install","h":"#login","p":382},{"i":390,"t":"Stop","u":"/docs/guide-local-install","h":"#stop","p":382},{"i":392,"t":"Reset","u":"/docs/guide-local-install","h":"#reset","p":382},{"i":394,"t":"Install kubectl","u":"/docs/guide-local-install","h":"#install-kubectl","p":382},{"i":395,"t":"kubectl on Ubuntu","u":"/docs/guide-local-install","h":"#kubectl-on-ubuntu","p":382},{"i":397,"t":"kubectl on MacOS & Windows","u":"/docs/guide-local-install","h":"#kubectl-on-macos--windows","p":382},{"i":399,"t":"Install the Dashboard UI","u":"/docs/guide-local-install","h":"#install-the-dashboard-ui","p":382},{"i":401,"t":"Run kubectl","u":"/docs/guide-local-install","h":"#run-kubectl","p":382},{"i":403,"t":"Enable internet","u":"/docs/guide-local-install","h":"#enable-internet","p":382},{"i":405,"t":"Create persistent volume","u":"/docs/guide-local-install","h":"#create-persistent-volume","p":382},{"i":407,"t":"Uninstall","u":"/docs/guide-local-install","h":"#uninstall","p":382},{"i":409,"t":"Install Argo workflows","u":"/docs/guide-local-install","h":"#install-argo-workflows","p":382},{"i":410,"t":"Install on your local Kubernetes","u":"/docs/guide-local-install","h":"#install-on-your-local-kubernetes","p":382},{"i":412,"t":"Install the client","u":"/docs/guide-local-install","h":"#install-the-client","p":382},{"i":414,"t":"Expose the UI","u":"/docs/guide-local-install","h":"#expose-the-ui","p":382},{"i":418,"t":"Login to Container Registries 🔑","u":"/docs/guide-publish-image","h":"#login-to-container-registries-","p":416},{"i":419,"t":"Login to GitHub Container Registry","u":"/docs/guide-publish-image","h":"#login-to-github-container-registry","p":416},{"i":421,"t":"Login to quay.io","u":"/docs/guide-publish-image","h":"#login-to-quayio","p":416},{"i":423,"t":"Login to DockerHub","u":"/docs/guide-publish-image","h":"#login-to-dockerhub","p":416},{"i":425,"t":"Publish your image 📢","u":"/docs/guide-publish-image","h":"#publish-your-image-","p":416},{"i":427,"t":"Publish to GitHub Container Registry","u":"/docs/guide-publish-image","h":"#publish-to-github-container-registry","p":416},{"i":429,"t":"Publish to Quay.io","u":"/docs/guide-publish-image","h":"#publish-to-quayio","p":416},{"i":431,"t":"Publish to DockerHub","u":"/docs/guide-publish-image","h":"#publish-to-dockerhub","p":416},{"i":433,"t":"Use automated workflows","u":"/docs/guide-publish-image","h":"#use-automated-workflows","p":416},{"i":437,"t":"Monitor your application resources use","u":"/docs/guide-monitoring","h":"#monitor-your-application-resources-use","p":435},{"i":439,"t":"Debug an application deployment","u":"/docs/guide-monitoring","h":"#debug-an-application-deployment","p":435},{"i":443,"t":"Request an account","u":"/docs/guide-vpn","h":"#request-an-account","p":441},{"i":445,"t":"Connect to the UM network","u":"/docs/guide-vpn","h":"#connect-to-the-um-network","p":441},{"i":449,"t":"Install the Helm client","u":"/docs/helm","h":"#install-the-helm-client","p":447},{"i":450,"t":"Install Golang","u":"/docs/helm","h":"#install-golang","p":447},{"i":452,"t":"Install Helm","u":"/docs/helm","h":"#install-helm","p":447},{"i":454,"t":"Check Helm installation","u":"/docs/helm","h":"#check-helm-installation","p":447},{"i":456,"t":"Install a Helm chart","u":"/docs/helm","h":"#install-a-helm-chart","p":447},{"i":458,"t":"Start a MySQL database with Helm","u":"/docs/helm","h":"#start-a-mysql-database-with-helm","p":447},{"i":460,"t":"Uninstall the application","u":"/docs/helm","h":"#uninstall-the-application","p":447},{"i":462,"t":"Set deployment parameters","u":"/docs/helm","h":"#set-deployment-parameters","p":447},{"i":466,"t":"The good","u":"/docs/increase-process-speed","h":"#the-good","p":464},{"i":468,"t":"The bad","u":"/docs/increase-process-speed","h":"#the-bad","p":464},{"i":470,"t":"The solution","u":"/docs/increase-process-speed","h":"#the-solution","p":464},{"i":474,"t":"Request VPN accounts for users","u":"/docs/guide-workshop","h":"#request-vpn-accounts-for-users","p":472},{"i":476,"t":"Fill a form","u":"/docs/guide-workshop","h":"#fill-a-form","p":472},{"i":478,"t":"Prepare you workshop","u":"/docs/guide-workshop","h":"#prepare-you-workshop","p":472},{"i":480,"t":"Publish an image for your training","u":"/docs/guide-workshop","h":"#publish-an-image-for-your-training","p":472},{"i":482,"t":"Show your users how to start a workspace","u":"/docs/guide-workshop","h":"#show-your-users-how-to-start-a-workspace","p":472},{"i":486,"t":"Cannot access your data in the persistent folder","u":"/docs/guide-known-issues","h":"#cannot-access-your-data-in-the-persistent-folder","p":484},{"i":488,"t":"Large volumes","u":"/docs/guide-known-issues","h":"#large-volumes","p":484},{"i":490,"t":"DockerHub pull limitations","u":"/docs/guide-known-issues","h":"#dockerhub-pull-limitations","p":484},{"i":492,"t":"How to run function within a container ''in the background'","u":"/docs/guide-known-issues","h":"#how-to-run-function-within-a-container-in-the-background","p":484},{"i":494,"t":"Git authentication issue","u":"/docs/guide-known-issues","h":"#git-authentication-issue","p":484},{"i":496,"t":"Filebrowser 403 forbidden","u":"/docs/guide-known-issues","h":"#filebrowser-403-forbidden","p":484},{"i":500,"t":"🧊 Install kfctl","u":"/docs/jupyterhub-spark","h":"#-install-kfctl","p":498},{"i":502,"t":"🪐 Deploy JupyterHub and Spark","u":"/docs/jupyterhub-spark","h":"#-deploy-jupyterhub-and-spark","p":498},{"i":504,"t":"✨ Use the Spark cluster","u":"/docs/jupyterhub-spark","h":"#-use-the-spark-cluster","p":498},{"i":506,"t":"Match the version","u":"/docs/jupyterhub-spark","h":"#match-the-version","p":498},{"i":508,"t":"Spark UI","u":"/docs/jupyterhub-spark","h":"#spark-ui","p":498},{"i":510,"t":"New Spark cluster","u":"/docs/jupyterhub-spark","h":"#new-spark-cluster","p":498},{"i":512,"t":"🗑️ Delete the deployment","u":"/docs/jupyterhub-spark","h":"#️-delete-the-deployment","p":498},{"i":516,"t":"Run MPI jobs on CPU","u":"/docs/mpi-jobs","h":"#run-mpi-jobs-on-cpu","p":514},{"i":520,"t":"🪐 Start your workspace","u":"/docs/jupyterhub-workspace","h":"#-start-your-workspace","p":518},{"i":522,"t":"📦️ Manage dependencies with Conda","u":"/docs/jupyterhub-workspace","h":"#️-manage-dependencies-with-conda","p":518},{"i":524,"t":"🐙 Use git in JupyterLab","u":"/docs/jupyterhub-workspace","h":"#-use-git-in-jupyterlab","p":518},{"i":528,"t":"JupyterLab with FreeSurfer","u":"/docs/neuroscience","h":"#jupyterlab-with-freesurfer","p":526},{"i":530,"t":"FreeSurfer and FSL","u":"/docs/neuroscience","h":"#freesurfer-and-fsl","p":526},{"i":532,"t":"FreeSurfer and AFNI","u":"/docs/neuroscience","h":"#freesurfer-and-afni","p":526},{"i":534,"t":"Deploy the generated Dockerfile","u":"/docs/neuroscience","h":"#deploy-the-generated-dockerfile","p":526},{"i":536,"t":"Use the GPUs","u":"/docs/neuroscience","h":"#use-the-gpus","p":526},{"i":540,"t":"Overview","u":"/docs/openshift-commands","h":"#overview","p":538},{"i":542,"t":"Projects","u":"/docs/openshift-commands","h":"#projects","p":538},{"i":543,"t":"List projects","u":"/docs/openshift-commands","h":"#list-projects","p":538},{"i":545,"t":"Connect to project","u":"/docs/openshift-commands","h":"#connect-to-project","p":538},{"i":547,"t":"ImageStreams","u":"/docs/openshift-commands","h":"#imagestreams","p":538},{"i":549,"t":"Pods","u":"/docs/openshift-commands","h":"#pods","p":538},{"i":550,"t":"Create pod from YAML","u":"/docs/openshift-commands","h":"#create-pod-from-yaml","p":538},{"i":552,"t":"List pods","u":"/docs/openshift-commands","h":"#list-pods","p":538},{"i":554,"t":"Get specific pod","u":"/docs/openshift-commands","h":"#get-specific-pod","p":538},{"i":556,"t":"Remote Shell connection","u":"/docs/openshift-commands","h":"#remote-shell-connection","p":538},{"i":558,"t":"Execute command in pod","u":"/docs/openshift-commands","h":"#execute-command-in-pod","p":538},{"i":560,"t":"Delete pod","u":"/docs/openshift-commands","h":"#delete-pod","p":538},{"i":562,"t":"Get pod logs","u":"/docs/openshift-commands","h":"#get-pod-logs","p":538},{"i":564,"t":"Create app from template","u":"/docs/openshift-commands","h":"#create-app-from-template","p":538},{"i":566,"t":"Copy files","u":"/docs/openshift-commands","h":"#copy-files","p":538},{"i":570,"t":"Install the oc client","u":"/docs/openshift-install","h":"#install-the-oc-client","p":568},{"i":571,"t":"On Linux","u":"/docs/openshift-install","h":"#on-linux","p":568},{"i":573,"t":"On Mac","u":"/docs/openshift-install","h":"#on-mac","p":568},{"i":575,"t":"On Windows","u":"/docs/openshift-install","h":"#on-windows","p":568},{"i":577,"t":"Login in the terminal with oc","u":"/docs/openshift-install","h":"#login-in-the-terminal-with-oc","p":568},{"i":581,"t":"From the terminal","u":"/docs/openshift-delete-services","h":"#from-the-terminal","p":579},{"i":583,"t":"From the web UI","u":"/docs/openshift-delete-services","h":"#from-the-web-ui","p":579},{"i":587,"t":"UM Container registry","u":"/docs/login-docker-registry","h":"#um-container-registry","p":585},{"i":589,"t":"Logging in with Docker CLI","u":"/docs/login-docker-registry","h":"#logging-in-with-docker-cli","p":585},{"i":591,"t":"Using a Proxy Cache","u":"/docs/login-docker-registry","h":"#using-a-proxy-cache","p":585},{"i":593,"t":"Creating your own project","u":"/docs/login-docker-registry","h":"#creating-your-own-project","p":585},{"i":595,"t":"Using your own user","u":"/docs/login-docker-registry","h":"#using-your-own-user","p":585},{"i":597,"t":"Using a robot account","u":"/docs/login-docker-registry","h":"#using-a-robot-account","p":585},{"i":599,"t":"GitHub Container Registry","u":"/docs/login-docker-registry","h":"#github-container-registry","p":585},{"i":601,"t":"DockerHub","u":"/docs/login-docker-registry","h":"#dockerhub","p":585},{"i":605,"t":"In RStudio, JupyterLab and VSCode","u":"/docs/openshift-load-data","h":"#in-rstudio-jupyterlab-and-vscode","p":603},{"i":607,"t":"Copy large files with the terminal","u":"/docs/openshift-load-data","h":"#copy-large-files-with-the-terminal","p":603},{"i":609,"t":"Copy from local to pod","u":"/docs/openshift-load-data","h":"#copy-from-local-to-pod","p":603},{"i":611,"t":"Copy from pod to local","u":"/docs/openshift-load-data","h":"#copy-from-pod-to-local","p":603},{"i":613,"t":"Download data from SURFdrive","u":"/docs/openshift-load-data","h":"#download-data-from-surfdrive","p":603},{"i":615,"t":"Synchronizes files with oc rsync","u":"/docs/openshift-load-data","h":"#synchronizes-files-with-oc-rsync","p":603},{"i":617,"t":"Sync local to pod","u":"/docs/openshift-load-data","h":"#sync-local-to-pod","p":603},{"i":619,"t":"Sync pod to local","u":"/docs/openshift-load-data","h":"#sync-pod-to-local","p":603},{"i":621,"t":"More options","u":"/docs/openshift-load-data","h":"#more-options","p":603},{"i":623,"t":"One-liner","u":"/docs/openshift-load-data","h":"#one-liner","p":603},{"i":626,"t":"Code in a git repository","u":"/docs/prepare-project-for-dsri","h":"#code-in-a-git-repository","p":624},{"i":628,"t":"Get your data ready","u":"/docs/prepare-project-for-dsri","h":"#get-your-data-ready","p":624},{"i":630,"t":"Data is on your local machine","u":"/docs/prepare-project-for-dsri","h":"#data-is-on-your-local-machine","p":624},{"i":632,"t":"Data is on a server","u":"/docs/prepare-project-for-dsri","h":"#data-is-on-a-server","p":624},{"i":634,"t":"Request access to internal UM servers","u":"/docs/prepare-project-for-dsri","h":"#request-access-to-internal-um-servers","p":624},{"i":638,"t":"Install existing Operators","u":"/docs/operators","h":"#install-existing-operators","p":636},{"i":640,"t":"Build Operators","u":"/docs/operators","h":"#build-operators","p":636},{"i":642,"t":"External resources","u":"/docs/operators","h":"#external-resources","p":636},{"i":646,"t":"Create the Persistent Storage","u":"/docs/openshift-storage","h":"#create-the-persistent-storage","p":644},{"i":648,"t":"Connect the Existing Persistent Storage","u":"/docs/openshift-storage","h":"#connect-the-existing-persistent-storage","p":644},{"i":650,"t":"Expand existing Persistent Storage","u":"/docs/openshift-storage","h":"#expand-existing-persistent-storage","p":644},{"i":652,"t":"Use the dynamic storage","u":"/docs/openshift-storage","h":"#use-the-dynamic-storage","p":644},{"i":654,"t":"Use the ephemeral storage","u":"/docs/openshift-storage","h":"#use-the-ephemeral-storage","p":644},{"i":658,"t":"Delete an application","u":"/docs/openshift-delete-objects","h":"#delete-an-application","p":656},{"i":660,"t":"Delete pod","u":"/docs/openshift-delete-objects","h":"#delete-pod","p":656},{"i":662,"t":"Delete a project","u":"/docs/openshift-delete-objects","h":"#delete-a-project","p":656},{"i":664,"t":"Delete persistent storage","u":"/docs/openshift-delete-objects","h":"#delete-persistent-storage","p":656},{"i":666,"t":"Fix stuck deletions","u":"/docs/openshift-delete-objects","h":"#fix-stuck-deletions","p":656},{"i":667,"t":"Stuck provisioned service","u":"/docs/openshift-delete-objects","h":"#stuck-provisioned-service","p":656},{"i":669,"t":"Delete stuck project","u":"/docs/openshift-delete-objects","h":"#delete-stuck-project","p":656},{"i":673,"t":"Create a project using the web UI","u":"/docs/project-management","h":"#create-a-project-using-the-web-ui","p":671},{"i":675,"t":"Create a project using the CLI","u":"/docs/project-management","h":"#create-a-project-using-the-cli","p":671},{"i":677,"t":"Access permissions for developers to your project","u":"/docs/project-management","h":"#access-permissions-for-developers-to-your-project","p":671},{"i":679,"t":"Delete a project using the web UI","u":"/docs/project-management","h":"#delete-a-project-using-the-web-ui","p":671},{"i":681,"t":"Delete a project using the CLI","u":"/docs/project-management","h":"#delete-a-project-using-the-cli","p":671},{"i":685,"t":"What is profiling?","u":"/docs/profile-pytorch-code","h":"#what-is-profiling","p":683},{"i":687,"t":"Why should I care about profiling?","u":"/docs/profile-pytorch-code","h":"#why-should-i-care-about-profiling","p":683},{"i":689,"t":"When should I care about profiling?","u":"/docs/profile-pytorch-code","h":"#when-should-i-care-about-profiling","p":683},{"i":691,"t":"How DSRI team can help you?","u":"/docs/profile-pytorch-code","h":"#how-dsri-team-can-help-you","p":683},{"i":693,"t":"External Resources and references","u":"/docs/profile-pytorch-code","h":"#external-resources-and-references","p":683},{"i":697,"t":"🔶 Speeding up Tensorflow based deep learning pipelines","u":"/docs/speeding-tensorflow-dl","h":"#-speeding-up-tensorflow-based-deep-learning-pipelines","p":695},{"i":699,"t":"A possible checklist for speeding up your deep learning pipeline in Tensorflow?","u":"/docs/speeding-tensorflow-dl","h":"#a-possible-checklist-for-speeding-up-your-deep-learning-pipeline-in-tensorflow","p":695},{"i":701,"t":"Data Preparation","u":"/docs/speeding-tensorflow-dl","h":"#data-preparation","p":695},{"i":703,"t":"Data Reading","u":"/docs/speeding-tensorflow-dl","h":"#data-reading","p":695},{"i":705,"t":"Data Augmentation","u":"/docs/speeding-tensorflow-dl","h":"#data-augmentation","p":695},{"i":707,"t":"Training","u":"/docs/speeding-tensorflow-dl","h":"#training","p":695},{"i":709,"t":"Inference","u":"/docs/speeding-tensorflow-dl","h":"#inference","p":695},{"i":711,"t":"How DSRI team can help you?","u":"/docs/speeding-tensorflow-dl","h":"#how-dsri-team-can-help-you","p":695},{"i":713,"t":"External Resources and references","u":"/docs/speeding-tensorflow-dl","h":"#external-resources-and-references","p":695},{"i":717,"t":"Reminder: DSRI restrictions","u":"/docs/sensible-data","h":"#reminder-dsri-restrictions","p":715},{"i":719,"t":"Disclaimer","u":"/docs/sensible-data","h":"#disclaimer","p":715},{"i":723,"t":"SURF's Digital Services for Research and Development","u":"/docs/surf-offerings","h":"#surfs-digital-services-for-research-and-development","p":721},{"i":724,"t":"What is SURF?","u":"/docs/surf-offerings","h":"#what-is-surf","p":721},{"i":726,"t":"What is a cluster computer?","u":"/docs/surf-offerings","h":"#what-is-a-cluster-computer","p":721},{"i":728,"t":"Different types of Services provided by SURF:","u":"/docs/surf-offerings","h":"#different-types-of-services-provided-by-surf","p":721},{"i":730,"t":"How to Get Started with SURF Services?","u":"/docs/surf-offerings","h":"#how-to-get-started-with-surf-services","p":721},{"i":732,"t":"External Resources and references","u":"/docs/surf-offerings","h":"#external-resources-and-references","p":721},{"i":736,"t":"Introduction to containers","u":"/docs/start-workspace","h":"#introduction-to-containers","p":734},{"i":738,"t":"Choose your interface","u":"/docs/start-workspace","h":"#choose-your-interface","p":734},{"i":741,"t":"Upload your code and data","u":"/docs/start-workspace","h":"#upload-your-code-and-data","p":734},{"i":743,"t":"Install your dependencies","u":"/docs/start-workspace","h":"#install-your-dependencies","p":734},{"i":745,"t":"Run your code","u":"/docs/start-workspace","h":"#run-your-code","p":734},{"i":747,"t":"Stop your application","u":"/docs/start-workspace","h":"#stop-your-application","p":734},{"i":749,"t":"Start your application","u":"/docs/start-workspace","h":"#start-your-application","p":734},{"i":751,"t":"Optional: define a docker image","u":"/docs/start-workspace","h":"#optional-define-a-docker-image","p":734},{"i":755,"t":"Install the chart","u":"/docs/workflows-airflow","h":"#install-the-chart","p":753},{"i":758,"t":"Example workflows","u":"/docs/workflows-airflow","h":"#example-workflows","p":753},{"i":760,"t":"Delete the chart","u":"/docs/workflows-airflow","h":"#delete-the-chart","p":753},{"i":762,"t":"See also","u":"/docs/workflows-airflow","h":"#see-also","p":753},{"i":766,"t":"Install the argo client","u":"/docs/workflows-argo","h":"#install-the-argo-client","p":764},{"i":768,"t":"On Ubuntu","u":"/docs/workflows-argo","h":"#on-ubuntu","p":764},{"i":770,"t":"On MacOS","u":"/docs/workflows-argo","h":"#on-macos","p":764},{"i":772,"t":"On Windows","u":"/docs/workflows-argo","h":"#on-windows","p":764},{"i":774,"t":"Test Argo","u":"/docs/workflows-argo","h":"#test-argo","p":764},{"i":776,"t":"Install Argo in your project","u":"/docs/workflows-argo","h":"#install-argo-in-your-project","p":764},{"i":777,"t":"Argo workflows with Helm","u":"/docs/workflows-argo","h":"#argo-workflows-with-helm","p":764},{"i":779,"t":"ArgoCD Operator","u":"/docs/workflows-argo","h":"#argocd-operator","p":764},{"i":781,"t":"Uninstall argo","u":"/docs/workflows-argo","h":"#uninstall-argo","p":764},{"i":783,"t":"Run workflows to convert structured data to RDF","u":"/docs/workflows-argo","h":"#run-workflows-to-convert-structured-data-to-rdf","p":764},{"i":785,"t":"Clone the repository","u":"/docs/workflows-argo","h":"#clone-the-repository","p":764},{"i":787,"t":"Workflow to convert XML files to RDF","u":"/docs/workflows-argo","h":"#workflow-to-convert-xml-files-to-rdf","p":764},{"i":789,"t":"Workflow to convert CSV files to RDF","u":"/docs/workflows-argo","h":"#workflow-to-convert-csv-files-to-rdf","p":764},{"i":791,"t":"Argo commands","u":"/docs/workflows-argo","h":"#argo-commands","p":764},{"i":792,"t":"List running Argo workflows","u":"/docs/workflows-argo","h":"#list-running-argo-workflows","p":764},{"i":794,"t":"Stop a workflow","u":"/docs/workflows-argo","h":"#stop-a-workflow","p":764},{"i":796,"t":"Delete a workflow","u":"/docs/workflows-argo","h":"#delete-a-workflow","p":764},{"i":798,"t":"Debug a workflow","u":"/docs/workflows-argo","h":"#debug-a-workflow","p":764},{"i":802,"t":"Clone the repository","u":"/docs/workflows-cwl","h":"#clone-the-repository","p":800},{"i":804,"t":"Start pod","u":"/docs/workflows-cwl","h":"#start-pod","p":800},{"i":806,"t":"Delete created pod","u":"/docs/workflows-cwl","h":"#delete-created-pod","p":800},{"i":810,"t":"Machine Learning libraries","u":"/docs/tools-machine-learning","h":"#machine-learning-libraries","p":808},{"i":812,"t":"SciKit Learn","u":"/docs/tools-machine-learning","h":"#scikit-learn","p":808},{"i":814,"t":"Deep Learning libraries","u":"/docs/tools-machine-learning","h":"#deep-learning-libraries","p":808},{"i":816,"t":"Tensorflow","u":"/docs/tools-machine-learning","h":"#tensorflow","p":808},{"i":818,"t":"PyTorch","u":"/docs/tools-machine-learning","h":"#pytorch","p":808},{"i":820,"t":"Deep Java Library","u":"/docs/tools-machine-learning","h":"#deep-java-library","p":808},{"i":822,"t":"Sonnet","u":"/docs/tools-machine-learning","h":"#sonnet","p":808},{"i":824,"t":"Keras","u":"/docs/tools-machine-learning","h":"#keras","p":808},{"i":826,"t":"Metaflow","u":"/docs/tools-machine-learning","h":"#metaflow","p":808},{"i":830,"t":"Introduction","u":"/docs/workflows-introduction","h":"#introduction","p":828},{"i":832,"t":"Current solutions on the DSRI","u":"/docs/workflows-introduction","h":"#current-solutions-on-the-dsri","p":828},{"i":834,"t":"GitHub Actions workflows","u":"/docs/workflows-introduction","h":"#github-actions-workflows","p":828},{"i":836,"t":"Apache Airflow","u":"/docs/workflows-introduction","h":"#apache-airflow","p":828},{"i":838,"t":"Argo","u":"/docs/workflows-introduction","h":"#argo","p":828},{"i":840,"t":"More options","u":"/docs/workflows-introduction","h":"#more-options","p":828},{"i":842,"t":"Kubeflow","u":"/docs/workflows-introduction","h":"#kubeflow","p":828},{"i":844,"t":"Apache Airflow","u":"/docs/workflows-introduction","h":"#apache-airflow-1","p":828},{"i":846,"t":"Volcano","u":"/docs/workflows-introduction","h":"#volcano","p":828},{"i":848,"t":"Nextflow","u":"/docs/workflows-introduction","h":"#nextflow","p":828},{"i":850,"t":"CWL","u":"/docs/workflows-introduction","h":"#cwl","p":828},{"i":852,"t":"KubeGene","u":"/docs/workflows-introduction","h":"#kubegene","p":828},{"i":854,"t":"Seldon","u":"/docs/workflows-introduction","h":"#seldon","p":828},{"i":858,"t":"Install Nextflow","u":"/docs/workflows-nextflow","h":"#install-nextflow","p":856},{"i":860,"t":"Run workflow","u":"/docs/workflows-nextflow","h":"#run-workflow","p":856},{"i":864,"t":"Install the chart","u":"/docs/workflows-github-actions","h":"#install-the-chart","p":862},{"i":866,"t":"Deploy a Runner","u":"/docs/workflows-github-actions","h":"#deploy-a-runner","p":862},{"i":868,"t":"For an organization","u":"/docs/workflows-github-actions","h":"#for-an-organization","p":862},{"i":870,"t":"For a repository","u":"/docs/workflows-github-actions","h":"#for-a-repository","p":862},{"i":872,"t":"Define Actions to run on DSRI","u":"/docs/workflows-github-actions","h":"#define-actions-to-run-on-dsri","p":862},{"i":874,"t":"Uninstall the runner","u":"/docs/workflows-github-actions","h":"#uninstall-the-runner","p":862},{"i":876,"t":"Deploy using GitHub Actions workflows","u":"/docs/workflows-github-actions","h":"#deploy-using-github-actions-workflows","p":862},{"i":878,"t":"See also","u":"/docs/workflows-github-actions","h":"#see-also","p":862}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/14",[0,4.426,1,4.824]],["t/16",[2,3.62,3,3.904,4,4.629]],["t/18",[5,3.505,6,3.751,7,3.402]],["t/20",[5,4.135,8,3.634]],["t/22",[6,4.426,7,4.014]],["t/24",[5,3.505,9,4.319,10,5.098]],["t/28",[11,6.014,12,3.556]],["t/29",[13,2.048,14,4.629,15,3.15]],["t/31",[13,2.417,14,5.461]],["t/33",[15,3.717,16,6.014]],["t/35",[15,3.717,17,5.461]],["t/36",[18,7.333]],["t/38",[19,7.333]],["t/40",[15,3.15,20,3.751,21,4.319]],["t/44",[22,5.096,23,6.014]],["t/48",[24,7.333]],["t/52",[25,5.098,26,5.098,27,5.098]],["t/56",[15,3.15,28,4.629,29,5.098]],["t/58",[12,3.014,30,3.31,31,3.505]],["t/60",[28,6.658]],["t/64",[0,2.875,3,2.992,5,2.686,32,3.547,33,2.992]],["t/68",[34,6.214]],["t/69",[35,5.882]],["t/71",[6,4.426,7,4.014]],["t/73",[36,4.135,37,6.014]],["t/75",[2,2.774,38,2.219,39,3.133,40,2.607,41,2.537]],["t/79",[42,5.617]],["t/81",[42,4.607,43,6.014]],["t/83",[13,1.57,42,2.992,44,3.907,45,3.547,46,3.31]],["t/85",[42,3.388,45,4.016,47,3.548,48,3.748]],["t/87",[42,3.388,46,3.748,47,3.548,48,3.748]],["t/89",[49,4.607,50,4.27]],["t/92",[51,7.333]],["t/94",[13,1.777,52,4.423,53,4.423,54,3.255]],["t/96",[13,1.777,55,4.423,56,4.423,57,2.952]],["t/98",[13,2.048,58,4.319,59,2.79]],["t/100",[13,2.417,60,5.096]],["t/102",[13,2.048,36,3.505,61,5.098]],["t/104",[13,2.048,62,5.098,63,5.098]],["t/106",[0,3.255,13,1.777,64,4.016,65,4.423]],["t/110",[13,2.048,66,2.426,67,3.227]],["t/112",[13,1.777,68,3.548,69,3.388,70,3.141]],["t/114",[5,3.041,13,1.777,71,4.016,72,4.423]],["t/118",[13,2.417,66,2.863]],["t/120",[13,1.777,73,4.423,74,4.423,75,4.423]],["t/122",[13,1.406,76,3.498,77,3.498,78,3.498,79,3.498,80,2.806]],["t/124",[13,2.048,81,5.098,82,5.098]],["t/126",[13,1.777,83,4.423,84,4.423,85,4.423]],["t/128",[13,2.417,86,6.014]],["t/130",[13,2.048,87,5.098,88,5.098]],["t/134",[89,5.098,90,2.953,91,3.751]],["t/136",[15,3.15,59,2.79,90,2.953]],["t/138",[90,2.263,92,2.607,93,3.547,94,3.31,95,2.875]],["t/140",[96,4.824,97,5.096]],["t/142",[90,3.484,92,4.014]],["t/144",[93,5.461,98,4.607]],["t/150",[99,6.014,100,4.824]],["t/152",[12,3.014,13,2.048,101,5.098]],["t/154",[12,3.014,13,2.048,102,4.629]],["t/156",[100,4.824,103,6.014]],["t/157",[13,2.417,104,6.014]],["t/159",[13,2.417,105,6.014]],["t/161",[100,4.824,106,6.014]],["t/162",[107,5.098,108,5.098,109,4.629]],["t/164",[109,4.629,110,5.098,111,5.098]],["t/166",[112,7.333]],["t/170",[12,3.014,13,2.048,113,3.904]],["t/172",[13,1.777,114,4.016,115,3.748,116,4.016]],["t/174",[13,1.777,113,3.388,117,2.197,118,3.141]],["t/176",[13,2.417,47,4.824]],["t/180",[90,2.562,117,2.197,119,4.423,120,3.388]],["t/182",[90,2.562,117,2.197,120,3.388,121,4.423]],["t/184",[71,4.629,117,2.532,120,3.904]],["t/186",[59,2.79,120,3.904,122,3.505]],["t/188",[90,2.953,92,3.402,120,3.904]],["t/192",[122,3.505,123,4.089,124,3.904]],["t/194",[90,3.484,95,4.426]],["t/196",[59,3.292,124,4.607]],["t/198",[123,4.824,124,4.607]],["t/200",[122,4.135,125,5.461]],["t/202",[122,4.135,126,6.014]],["t/204",[122,3.505,127,5.098,128,5.098]],["t/206",[66,2.105,90,2.562,122,3.041,129,4.423]],["t/210",[12,3.556,130,4.824]],["t/212",[33,3.388,130,3.548,131,4.016,132,4.423]],["t/214",[117,2.532,118,3.62,130,4.089]],["t/216",[133,3.227,134,5.098,135,4.629]],["t/220",[136,4.629,137,5.098,138,4.319]],["t/222",[]],["t/223",[40,2.335,139,3.176,140,3.498,141,3.498,142,2.964,143,3.498]],["t/225",[68,4.089,144,4.629,145,5.098]],["t/227",[]],["t/229",[]],["t/230",[13,1.406,15,2.162,31,2.405,59,1.915,117,1.738,146,3.498]],["t/232",[31,2.686,66,1.859,67,2.473,147,2.686,148,2.774]],["t/234",[]],["t/236",[]],["t/238",[]],["t/240",[]],["t/241",[31,3.041,66,2.105,67,2.8,147,3.041]],["t/243",[]],["t/245",[]],["t/247",[]],["t/249",[]],["t/251",[]],["t/253",[]],["t/255",[]],["t/256",[38,2.895,69,3.904,149,4.319]],["t/258",[]],["t/260",[]],["t/262",[138,5.096,150,5.461]],["t/264",[]],["t/266",[]],["t/268",[]],["t/270",[]],["t/272",[]],["t/273",[13,1.162,31,1.989,59,1.583,117,1.437,151,2.451,152,2.893,153,2.627,154,2.32]],["t/275",[31,2.686,66,1.859,67,2.473,147,2.686,148,2.774]],["t/277",[]],["t/279",[31,3.041,66,2.105,67,2.8,147,3.041]],["t/281",[]],["t/283",[38,2.895,69,3.904,149,4.319]],["t/285",[138,5.096,150,5.461]],["t/287",[]],["t/291",[12,2.615,33,3.388,155,4.423,156,3.548]],["t/293",[117,2.532,118,3.62,157,4.319]],["t/295",[122,4.135,157,5.096]],["t/299",[158,4.423,159,4.423,160,4.423,161,4.016]],["t/303",[30,3.31,59,2.79,70,3.62]],["t/305",[59,2.79,70,3.62,162,5.098]],["t/307",[30,2.873,38,2.512,69,3.388,163,4.016]],["t/309",[30,3.906,133,3.807]],["t/311",[117,2.988,164,6.014]],["t/313",[165,3.133,166,3.907,167,3.907,168,3.907,169,3.907]],["t/315",[2,3.141,30,2.873,40,2.952,41,2.873]],["t/317",[30,2.873,70,3.141,133,2.8,170,2.562]],["t/321",[95,5.396]],["t/322",[171,6.658]],["t/324",[172,7.333]],["t/326",[173,7.333]],["t/330",[174,4.014,175,6.014]],["t/332",[176,6.658]],["t/334",[90,4.248]],["t/336",[38,3.416,41,3.906]],["t/338",[177,7.333]],["t/340",[59,4.014]],["t/342",[178,3.484,179,6.014]],["t/344",[161,4.629,180,5.098,181,3.505]],["t/346",[41,3.906,182,6.014]],["t/348",[149,5.096,183,6.014]],["t/350",[91,5.396]],["t/352",[69,5.617]],["t/354",[174,4.014,184,6.014]],["t/356",[36,3.505,68,4.089,185,4.629]],["t/358",[58,3.748,185,4.016,186,4.423,187,4.016]],["t/360",[50,3.62,96,4.089,188,4.629]],["t/362",[92,3.402,174,3.402,189,4.629]],["t/366",[57,3.402,92,3.402,97,4.319]],["t/368",[38,2.512,68,3.548,92,2.952,94,3.748]],["t/370",[90,3.484,92,4.014]],["t/372",[38,3.416,190,5.096]],["t/374",[190,5.096,191,5.461]],["t/376",[38,2.895,92,3.402,170,2.953]],["t/378",[57,2.952,59,2.421,90,2.562,95,3.255]],["t/380",[59,2.79,118,3.62,148,3.62]],["t/384",[66,2.863,192,5.461]],["t/386",[12,3.556,192,5.461]],["t/388",[193,5.396]],["t/390",[194,6.214]],["t/392",[195,7.333]],["t/394",[66,2.863,196,4.824]],["t/395",[34,5.096,196,4.824]],["t/397",[13,1.777,196,3.548,197,4.016,198,3.748]],["t/399",[7,3.402,66,2.426,163,4.629]],["t/401",[133,3.807,196,4.824]],["t/403",[125,5.461,199,6.014]],["t/405",[38,2.895,40,3.402,142,4.319]],["t/407",[200,5.882]],["t/409",[66,2.426,201,3.402,202,3.014]],["t/410",[57,3.402,66,2.426,171,4.629]],["t/412",[66,2.863,203,4.824]],["t/414",[7,4.014,191,5.461]],["t/418",[13,1.777,181,3.041,193,3.255,204,3.388]],["t/419",[54,3.255,181,3.041,193,3.255,204,3.388]],["t/421",[193,4.426,205,5.461]],["t/423",[98,4.607,193,4.426]],["t/425",[13,2.048,90,2.953,206,3.904]],["t/427",[54,3.255,181,3.041,204,3.388,206,3.388]],["t/429",[205,5.461,206,4.607]],["t/431",[98,4.607,206,4.607]],["t/433",[58,4.319,117,2.532,202,3.014]],["t/437",[50,3.141,117,2.197,174,2.952,207,4.423]],["t/439",[59,2.79,174,3.402,208,4.629]],["t/443",[0,4.426,1,4.824]],["t/445",[2,3.62,3,3.904,4,4.629]],["t/449",[66,2.426,67,3.227,203,4.089]],["t/450",[66,2.863,209,6.014]],["t/452",[66,2.863,67,3.807]],["t/454",[66,2.426,67,3.227,187,4.629]],["t/456",[66,2.426,67,3.227,147,3.505]],["t/458",[12,2.615,67,2.8,100,3.548,102,4.016]],["t/460",[174,4.014,200,4.824]],["t/462",[59,2.79,139,4.629,176,4.629]],["t/466",[210,7.333]],["t/468",[211,7.333]],["t/470",[212,6.658]],["t/474",[0,3.255,1,3.548,213,4.423,214,3.748]],["t/476",[215,6.014,216,6.014]],["t/478",[123,4.824,217,6.014]],["t/480",[90,2.953,206,3.904,218,4.629]],["t/482",[12,2.615,124,3.388,214,3.748,219,4.423]],["t/486",[5,3.041,40,2.952,220,2.873,221,4.423]],["t/488",[142,5.096,222,5.461]],["t/490",[64,4.629,98,3.904,188,4.629]],["t/492",[133,2.473,181,2.686,223,3.907,224,3.907,225,3.907]],["t/494",[118,3.62,144,4.629,226,5.098]],["t/496",[227,5.098,228,5.098,229,5.098]],["t/500",[13,2.048,66,2.426,230,5.098]],["t/502",[13,1.777,30,2.873,31,3.041,59,2.421]],["t/504",[13,1.777,30,2.873,70,3.141,117,2.197]],["t/506",[231,6.014,232,6.014]],["t/508",[7,4.014,30,3.906]],["t/510",[30,3.31,70,3.62,94,4.319]],["t/512",[13,2.048,59,2.79,170,2.953]],["t/516",[133,2.8,135,4.016,233,4.423,234,4.423]],["t/520",[12,3.014,13,2.048,124,3.904]],["t/522",[13,1.777,114,4.016,115,3.748,116,4.016]],["t/524",[13,1.777,113,3.388,117,2.197,118,3.141]],["t/528",[113,4.607,235,5.096]],["t/530",[235,5.096,236,6.014]],["t/532",[235,5.096,237,6.014]],["t/534",[59,2.79,97,4.319,238,5.098]],["t/536",[117,2.988,122,4.135]],["t/540",[239,7.333]],["t/542",[8,4.43]],["t/543",[8,3.634,240,5.096]],["t/545",[2,4.27,8,3.634]],["t/547",[241,7.333]],["t/549",[178,4.248]],["t/550",[38,2.895,178,2.953,242,5.098]],["t/552",[178,3.484,240,5.096]],["t/554",[17,5.461,178,3.484]],["t/556",[2,3.62,243,5.098,244,5.098]],["t/558",[151,4.319,178,2.953,245,5.098]],["t/560",[170,3.484,178,3.484]],["t/562",[178,3.484,246,5.461]],["t/564",[38,2.895,189,4.629,190,4.319]],["t/566",[36,4.135,247,4.824]],["t/570",[66,2.426,203,4.089,248,4.319]],["t/571",[249,7.333]],["t/573",[250,7.333]],["t/575",[198,6.214]],["t/577",[35,4.089,193,3.751,248,4.319]],["t/581",[35,5.882]],["t/583",[6,4.426,7,4.014]],["t/587",[3,3.904,181,3.505,204,3.904]],["t/589",[95,3.751,154,4.089,246,4.629]],["t/591",[117,2.532,251,5.098,252,5.098]],["t/593",[8,3.634,38,3.416]],["t/595",[117,2.988,214,5.096]],["t/597",[1,4.089,117,2.532,253,5.098]],["t/599",[54,3.751,181,3.505,204,3.904]],["t/601",[98,5.617]],["t/605",[113,3.904,130,4.089,157,4.319]],["t/607",[35,3.548,36,3.041,222,4.016,247,3.548]],["t/609",[57,3.402,178,2.953,247,4.089]],["t/611",[57,3.402,178,2.953,247,4.089]],["t/613",[136,4.629,220,3.31,254,5.098]],["t/615",[36,3.041,248,3.748,255,4.423,256,4.423]],["t/617",[57,3.402,178,2.953,257,4.629]],["t/619",[57,3.402,178,2.953,257,4.629]],["t/621",[21,5.096,258,5.096]],["t/623",[259,6.014,260,6.014]],["t/626",[118,3.62,148,3.62,156,4.089]],["t/628",[220,3.906,261,6.014]],["t/630",[57,3.402,220,3.31,262,4.629]],["t/632",[33,4.607,220,3.906]],["t/634",[0,2.875,3,2.992,5,2.686,32,3.547,33,2.992]],["t/638",[39,4.089,66,2.426,80,4.089]],["t/640",[80,4.824,92,4.014]],["t/642",[49,4.607,50,4.27]],["t/646",[38,2.895,40,3.402,41,3.31]],["t/648",[2,3.141,39,3.548,40,2.952,41,2.873]],["t/650",[39,3.548,40,2.952,41,2.873,263,4.423]],["t/652",[41,3.31,117,2.532,264,5.098]],["t/654",[41,3.31,117,2.532,265,5.098]],["t/658",[170,3.484,174,4.014]],["t/660",[170,3.484,178,3.484]],["t/662",[8,3.634,170,3.484]],["t/664",[40,3.402,41,3.31,170,2.953]],["t/666",[170,2.953,266,5.098,267,4.319]],["t/667",[91,3.751,267,4.319,268,5.098]],["t/669",[8,3.08,170,2.953,267,4.319]],["t/673",[6,2.875,7,2.607,8,2.36,38,2.219,117,1.941]],["t/675",[8,2.672,38,2.512,117,2.197,154,3.548]],["t/677",[5,3.041,8,2.672,9,3.748,269,4.423]],["t/679",[6,2.875,7,2.607,8,2.36,117,1.941,170,2.263]],["t/681",[8,2.672,117,2.197,154,3.548,170,2.562]],["t/685",[270,6.214]],["t/687",[270,5.096,271,5.461]],["t/689",[270,5.096,271,5.461]],["t/691",[15,3.15,60,4.319,272,4.629]],["t/693",[49,3.904,50,3.62,273,4.319]],["t/697",[13,1.162,20,2.129,48,2.451,274,2.627,275,2.627,276,2.451,277,2.32,278,2.627]],["t/699",[20,2.129,274,2.627,275,2.627,276,2.451,277,2.32,278,2.627,279,2.893,280,2.893]],["t/701",[123,4.824,220,3.906]],["t/703",[220,3.906,281,6.014]],["t/705",[220,3.906,282,6.014]],["t/707",[218,6.658]],["t/709",[283,7.333]],["t/711",[15,3.15,60,4.319,272,4.629]],["t/713",[49,3.904,50,3.62,273,4.319]],["t/717",[15,3.15,131,4.629,284,5.098]],["t/719",[285,7.333]],["t/723",[9,3.31,91,2.875,286,3.907,287,3.907,288,3.907]],["t/724",[289,6.214]],["t/726",[70,4.27,290,6.014]],["t/728",[91,2.875,289,3.31,291,3.907,292,3.907,293,3.907]],["t/730",[12,3.014,91,3.751,289,4.319]],["t/732",[49,3.904,50,3.62,273,4.319]],["t/736",[181,4.135,294,5.461]],["t/738",[153,5.461,295,6.014]],["t/741",[156,4.089,220,3.31,296,5.098]],["t/743",[66,2.863,115,5.096]],["t/745",[133,3.807,156,4.824]],["t/747",[174,4.014,194,5.096]],["t/749",[12,3.556,174,4.014]],["t/751",[90,2.562,95,3.255,96,3.548,258,3.748]],["t/755",[66,2.863,147,4.135]],["t/758",[47,4.824,202,3.556]],["t/760",[147,4.135,170,3.484]],["t/762",[297,6.658]],["t/766",[66,2.426,201,3.402,203,4.089]],["t/768",[34,6.214]],["t/770",[197,6.658]],["t/772",[198,6.214]],["t/774",[201,4.014,298,6.014]],["t/776",[8,3.08,66,2.426,201,3.402]],["t/777",[67,3.227,201,3.402,202,3.014]],["t/779",[80,4.824,299,6.014]],["t/781",[200,4.824,201,4.014]],["t/783",[133,2.214,165,2.806,202,2.068,220,2.272,300,2.964,301,3.498]],["t/785",[148,4.27,302,5.461]],["t/787",[36,2.686,165,3.133,202,2.31,300,3.31,303,3.907]],["t/789",[36,2.686,165,3.133,202,2.31,300,3.31,304,3.907]],["t/791",[151,5.096,201,4.014]],["t/792",[133,2.8,201,2.952,202,2.615,240,3.748]],["t/794",[194,5.096,202,3.556]],["t/796",[170,3.484,202,3.556]],["t/798",[202,3.556,208,5.461]],["t/802",[148,4.27,302,5.461]],["t/804",[12,3.556,178,3.484]],["t/806",[38,2.895,170,2.953,178,2.953]],["t/810",[20,3.751,262,4.629,305,4.319]],["t/812",[20,4.426,306,6.014]],["t/814",[20,3.751,277,4.089,305,4.319]],["t/816",[276,6.214]],["t/818",[46,6.214]],["t/820",[277,4.089,305,4.319,307,5.098]],["t/822",[308,7.333]],["t/824",[309,7.333]],["t/826",[310,7.333]],["t/830",[294,6.658]],["t/832",[15,3.15,212,4.629,311,5.098]],["t/834",[54,3.751,202,3.014,312,4.319]],["t/836",[22,5.096,313,5.461]],["t/838",[201,4.895]],["t/840",[21,5.096,258,5.096]],["t/842",[314,7.333]],["t/844",[22,5.096,313,5.461]],["t/846",[315,7.333]],["t/848",[316,6.658]],["t/850",[317,7.333]],["t/852",[318,7.333]],["t/854",[319,7.333]],["t/858",[66,2.863,316,5.461]],["t/860",[133,3.807,202,3.556]],["t/864",[66,2.863,147,4.135]],["t/866",[59,3.292,320,5.461]],["t/868",[321,7.333]],["t/870",[148,5.207]],["t/872",[15,2.734,96,3.548,133,2.8,312,3.748]],["t/874",[200,4.824,320,5.461]],["t/876",[54,2.875,59,2.138,117,1.941,202,2.31,312,3.31]],["t/878",[297,6.658]]],"invertedIndex":[["",{"_index":13,"t":{"29":{"position":[[0,1]]},"31":{"position":[[0,1]]},"83":{"position":[[58,1]]},"94":{"position":[[0,1]]},"96":{"position":[[0,3]]},"98":{"position":[[0,2]]},"100":{"position":[[0,2]]},"102":{"position":[[0,2]]},"104":{"position":[[0,2]]},"106":{"position":[[0,2]]},"110":{"position":[[0,2]]},"112":{"position":[[0,2]]},"114":{"position":[[0,2]]},"118":{"position":[[0,2]]},"120":{"position":[[0,2]]},"122":{"position":[[0,1]]},"124":{"position":[[0,2]]},"126":{"position":[[0,1]]},"128":{"position":[[0,1]]},"130":{"position":[[0,2]]},"152":{"position":[[17,2]]},"154":{"position":[[12,2]]},"157":{"position":[[8,2]]},"159":{"position":[[6,2]]},"170":{"position":[[0,2]]},"172":{"position":[[0,3]]},"174":{"position":[[0,2]]},"176":{"position":[[0,2]]},"230":{"position":[[44,2]]},"273":{"position":[[60,2]]},"397":{"position":[[17,1]]},"418":{"position":[[30,2]]},"425":{"position":[[19,2]]},"500":{"position":[[0,2]]},"502":{"position":[[0,2]]},"504":{"position":[[0,1]]},"512":{"position":[[0,3]]},"520":{"position":[[0,2]]},"522":{"position":[[0,3]]},"524":{"position":[[0,2]]},"697":{"position":[[0,2]]}}}],["403",{"_index":228,"t":{"496":{"position":[[12,3]]}}}],["access",{"_index":5,"t":{"18":{"position":[[0,6]]},"20":{"position":[[0,6]]},"24":{"position":[[0,9]]},"64":{"position":[[8,6]]},"114":{"position":[[3,6]]},"486":{"position":[[7,6]]},"634":{"position":[[8,6]]},"677":{"position":[[0,6]]}}}],["account",{"_index":1,"t":{"14":{"position":[[11,7]]},"443":{"position":[[11,7]]},"474":{"position":[[12,8]]},"597":{"position":[[14,7]]}}}],["action",{"_index":312,"t":{"834":{"position":[[7,7]]},"872":{"position":[[7,7]]},"876":{"position":[[20,7]]}}}],["add",{"_index":185,"t":{"356":{"position":[[0,3]]},"358":{"position":[[0,3]]}}}],["adjust",{"_index":137,"t":{"220":{"position":[[16,9]]}}}],["afni",{"_index":237,"t":{"532":{"position":[[15,4]]}}}],["airflow",{"_index":313,"t":{"836":{"position":[[7,7]]},"844":{"position":[[7,7]]}}}],["allegrograph",{"_index":112,"t":{"166":{"position":[[0,12]]}}}],["analyt",{"_index":166,"t":{"313":{"position":[[4,9]]}}}],["apach",{"_index":22,"t":{"44":{"position":[[0,6]]},"836":{"position":[[0,6]]},"844":{"position":[[0,6]]}}}],["app",{"_index":190,"t":{"372":{"position":[[12,3]]},"374":{"position":[[7,3]]},"564":{"position":[[7,3]]}}}],["applic",{"_index":174,"t":{"330":{"position":[[0,11]]},"354":{"position":[[13,11]]},"362":{"position":[[15,11]]},"437":{"position":[[13,11]]},"439":{"position":[[9,11]]},"460":{"position":[[14,11]]},"658":{"position":[[10,11]]},"747":{"position":[[10,11]]},"749":{"position":[[11,11]]}}}],["architectur",{"_index":16,"t":{"33":{"position":[[9,12]]}}}],["argo",{"_index":201,"t":{"409":{"position":[[8,4]]},"766":{"position":[[12,4]]},"774":{"position":[[5,4]]},"776":{"position":[[8,4]]},"777":{"position":[[0,4]]},"781":{"position":[[10,4]]},"791":{"position":[[0,4]]},"792":{"position":[[13,4]]},"838":{"position":[[0,4]]}}}],["argocd",{"_index":299,"t":{"779":{"position":[[0,6]]}}}],["augment",{"_index":282,"t":{"705":{"position":[[5,12]]}}}],["authent",{"_index":144,"t":{"225":{"position":[[15,14]]},"494":{"position":[[4,14]]}}}],["autom",{"_index":58,"t":{"98":{"position":[[3,9]]},"358":{"position":[[4,9]]},"433":{"position":[[4,9]]}}}],["avail",{"_index":29,"t":{"56":{"position":[[11,9]]}}}],["background",{"_index":225,"t":{"492":{"position":[[48,11]]}}}],["bad",{"_index":211,"t":{"468":{"position":[[4,3]]}}}],["base",{"_index":48,"t":{"85":{"position":[[28,5]]},"87":{"position":[[19,5]]},"697":{"position":[[26,5]]}}}],["basic",{"_index":73,"t":{"120":{"position":[[3,5]]}}}],["browser",{"_index":37,"t":{"73":{"position":[[5,7]]}}}],["build",{"_index":92,"t":{"138":{"position":[[0,5]]},"142":{"position":[[0,5]]},"188":{"position":[[0,5]]},"362":{"position":[[0,5]]},"366":{"position":[[0,5]]},"368":{"position":[[11,5]]},"370":{"position":[[0,5]]},"376":{"position":[[19,5]]},"640":{"position":[[0,5]]}}}],["cach",{"_index":252,"t":{"591":{"position":[[14,5]]}}}],["care",{"_index":271,"t":{"687":{"position":[[13,4]]},"689":{"position":[[14,4]]}}}],["catalog",{"_index":162,"t":{"305":{"position":[[28,7]]}}}],["cellprofil",{"_index":24,"t":{"48":{"position":[[0,12]]}}}],["chang",{"_index":56,"t":{"96":{"position":[[11,7]]}}}],["chart",{"_index":147,"t":{"232":{"position":[[31,5]]},"241":{"position":[[31,5]]},"275":{"position":[[31,5]]},"279":{"position":[[31,5]]},"456":{"position":[[15,5]]},"755":{"position":[[12,5]]},"760":{"position":[[11,5]]},"864":{"position":[[12,5]]}}}],["check",{"_index":187,"t":{"358":{"position":[[21,6]]},"454":{"position":[[0,5]]}}}],["checklist",{"_index":280,"t":{"699":{"position":[[11,9]]}}}],["checkpoint",{"_index":42,"t":{"79":{"position":[[8,14]]},"81":{"position":[[0,13]]},"83":{"position":[[12,13]]},"85":{"position":[[34,14]]},"87":{"position":[[25,14]]}}}],["choos",{"_index":295,"t":{"738":{"position":[[0,6]]}}}],["cli",{"_index":154,"t":{"273":{"position":[[54,5]]},"589":{"position":[[23,3]]},"675":{"position":[[27,3]]},"681":{"position":[[27,3]]}}}],["client",{"_index":203,"t":{"412":{"position":[[12,6]]},"449":{"position":[[17,6]]},"570":{"position":[[15,6]]},"766":{"position":[[17,6]]}}}],["clone",{"_index":302,"t":{"785":{"position":[[0,5]]},"802":{"position":[[0,5]]}}}],["cluster",{"_index":70,"t":{"112":{"position":[[29,7]]},"303":{"position":[[15,7]]},"305":{"position":[[11,7]]},"317":{"position":[[23,7]]},"504":{"position":[[16,7]]},"510":{"position":[[10,7]]},"726":{"position":[[10,7]]}}}],["code",{"_index":156,"t":{"291":{"position":[[19,4]]},"626":{"position":[[0,4]]},"741":{"position":[[12,4]]},"745":{"position":[[9,4]]}}}],["column",{"_index":77,"t":{"122":{"position":[[12,7]]}}}],["command",{"_index":151,"t":{"273":{"position":[[31,7]]},"558":{"position":[[8,7]]},"791":{"position":[[5,8]]}}}],["common",{"_index":83,"t":{"126":{"position":[[2,6]]}}}],["complet",{"_index":184,"t":{"354":{"position":[[4,8]]}}}],["compon",{"_index":28,"t":{"56":{"position":[[0,10]]},"60":{"position":[[4,10]]}}}],["comput",{"_index":290,"t":{"726":{"position":[[18,9]]}}}],["concept",{"_index":74,"t":{"120":{"position":[[9,8]]}}}],["conda",{"_index":116,"t":{"172":{"position":[[29,5]]},"522":{"position":[[29,5]]}}}],["condit",{"_index":81,"t":{"124":{"position":[[3,11]]}}}],["config.yaml",{"_index":138,"t":{"220":{"position":[[30,11]]},"262":{"position":[[14,11]]},"285":{"position":[[14,11]]}}}],["configur",{"_index":68,"t":{"112":{"position":[[3,9]]},"225":{"position":[[0,11]]},"356":{"position":[[6,13]]},"368":{"position":[[17,14]]}}}],["connect",{"_index":2,"t":{"16":{"position":[[0,7]]},"75":{"position":[[12,10]]},"315":{"position":[[0,7]]},"445":{"position":[[0,7]]},"545":{"position":[[0,7]]},"556":{"position":[[13,10]]},"648":{"position":[[0,7]]}}}],["contain",{"_index":181,"t":{"344":{"position":[[29,9]]},"418":{"position":[[9,9]]},"419":{"position":[[16,9]]},"427":{"position":[[18,9]]},"492":{"position":[[29,9]]},"587":{"position":[[3,9]]},"599":{"position":[[7,9]]},"736":{"position":[[16,10]]}}}],["context",{"_index":183,"t":{"348":{"position":[[9,7]]}}}],["contribut",{"_index":51,"t":{"92":{"position":[[0,10]]}}}],["convert",{"_index":300,"t":{"783":{"position":[[17,7]]},"787":{"position":[[12,7]]},"789":{"position":[[12,7]]}}}],["copi",{"_index":247,"t":{"566":{"position":[[0,4]]},"607":{"position":[[0,4]]},"609":{"position":[[0,4]]},"611":{"position":[[0,4]]}}}],["cpu",{"_index":234,"t":{"516":{"position":[[16,3]]}}}],["creat",{"_index":38,"t":{"75":{"position":[[0,8]]},"256":{"position":[[0,8]]},"283":{"position":[[0,8]]},"307":{"position":[[0,6]]},"336":{"position":[[0,6]]},"368":{"position":[[0,6]]},"372":{"position":[[0,6]]},"376":{"position":[[11,7]]},"405":{"position":[[0,6]]},"550":{"position":[[0,6]]},"564":{"position":[[0,6]]},"593":{"position":[[0,8]]},"646":{"position":[[0,6]]},"673":{"position":[[0,6]]},"675":{"position":[[0,6]]},"806":{"position":[[7,7]]}}}],["csv",{"_index":304,"t":{"789":{"position":[[20,3]]}}}],["current",{"_index":311,"t":{"832":{"position":[[0,7]]}}}],["cwl",{"_index":317,"t":{"850":{"position":[[0,3]]}}}],["dashboard",{"_index":163,"t":{"307":{"position":[[28,9]]},"399":{"position":[[12,9]]}}}],["dask",{"_index":75,"t":{"120":{"position":[[21,4]]}}}],["data",{"_index":220,"t":{"486":{"position":[[19,4]]},"613":{"position":[[9,4]]},"628":{"position":[[9,4]]},"630":{"position":[[0,4]]},"632":{"position":[[0,4]]},"701":{"position":[[0,4]]},"703":{"position":[[0,4]]},"705":{"position":[[0,4]]},"741":{"position":[[21,4]]},"783":{"position":[[36,4]]}}}],["databas",{"_index":100,"t":{"150":{"position":[[4,9]]},"156":{"position":[[6,9]]},"161":{"position":[[6,9]]},"458":{"position":[[14,8]]}}}],["debug",{"_index":208,"t":{"439":{"position":[[0,5]]},"798":{"position":[[0,5]]}}}],["deep",{"_index":277,"t":{"697":{"position":[[32,4]]},"699":{"position":[[42,4]]},"814":{"position":[[0,4]]},"820":{"position":[[0,4]]}}}],["default",{"_index":141,"t":{"223":{"position":[[15,7]]}}}],["defin",{"_index":96,"t":{"140":{"position":[[0,6]]},"360":{"position":[[0,6]]},"751":{"position":[[10,6]]},"872":{"position":[[0,6]]}}}],["delet",{"_index":170,"t":{"317":{"position":[[0,6]]},"376":{"position":[[0,6]]},"512":{"position":[[4,6]]},"560":{"position":[[0,6]]},"658":{"position":[[0,6]]},"660":{"position":[[0,6]]},"662":{"position":[[0,6]]},"664":{"position":[[0,6]]},"666":{"position":[[10,9]]},"669":{"position":[[0,6]]},"679":{"position":[[0,6]]},"681":{"position":[[0,6]]},"760":{"position":[[0,6]]},"796":{"position":[[0,6]]},"806":{"position":[[0,6]]}}}],["depend",{"_index":115,"t":{"172":{"position":[[11,12]]},"522":{"position":[[11,12]]},"743":{"position":[[13,12]]}}}],["deploy",{"_index":59,"t":{"98":{"position":[[13,10]]},"136":{"position":[[0,6]]},"186":{"position":[[0,6]]},"196":{"position":[[0,6]]},"230":{"position":[[0,9]]},"273":{"position":[[0,9]]},"303":{"position":[[0,6]]},"305":{"position":[[0,6]]},"340":{"position":[[0,10]]},"378":{"position":[[0,6]]},"380":{"position":[[0,6]]},"439":{"position":[[21,10]]},"462":{"position":[[4,10]]},"502":{"position":[[3,6]]},"512":{"position":[[15,10]]},"534":{"position":[[0,6]]},"866":{"position":[[0,6]]},"876":{"position":[[0,6]]}}}],["develop",{"_index":9,"t":{"24":{"position":[[14,9]]},"677":{"position":[[23,10]]},"723":{"position":[[41,11]]}}}],["differ",{"_index":291,"t":{"728":{"position":[[0,9]]}}}],["digit",{"_index":287,"t":{"723":{"position":[[7,7]]}}}],["disabl",{"_index":126,"t":{"202":{"position":[[0,7]]}}}],["disclaim",{"_index":285,"t":{"719":{"position":[[0,10]]}}}],["docker",{"_index":95,"t":{"138":{"position":[[21,6]]},"194":{"position":[[10,6]]},"321":{"position":[[0,6]]},"378":{"position":[[20,6]]},"589":{"position":[[16,6]]},"751":{"position":[[19,6]]}}}],["dockerfil",{"_index":97,"t":{"140":{"position":[[9,10]]},"366":{"position":[[17,10]]},"534":{"position":[[21,10]]}}}],["dockerhub",{"_index":98,"t":{"144":{"position":[[8,9]]},"423":{"position":[[9,9]]},"431":{"position":[[11,9]]},"490":{"position":[[0,9]]},"601":{"position":[[0,9]]}}}],["done",{"_index":14,"t":{"29":{"position":[[14,4]]},"31":{"position":[[17,4]]}}}],["download",{"_index":136,"t":{"220":{"position":[[0,11]]},"613":{"position":[[0,8]]}}}],["driver",{"_index":129,"t":{"206":{"position":[[12,7]]}}}],["dsri",{"_index":15,"t":{"29":{"position":[[26,4]]},"33":{"position":[[4,4]]},"35":{"position":[[4,4]]},"40":{"position":[[17,4]]},"56":{"position":[[24,4]]},"136":{"position":[[20,4]]},"230":{"position":[[31,4]]},"691":{"position":[[4,4]]},"711":{"position":[[4,4]]},"717":{"position":[[10,4]]},"832":{"position":[[25,4]]},"872":{"position":[[25,4]]}}}],["dynam",{"_index":264,"t":{"652":{"position":[[8,7]]}}}],["edit",{"_index":53,"t":{"94":{"position":[[8,4]]}}}],["element",{"_index":78,"t":{"122":{"position":[[24,7]]}}}],["enabl",{"_index":125,"t":{"200":{"position":[[0,6]]},"403":{"position":[[0,6]]}}}],["environ",{"_index":161,"t":{"299":{"position":[[31,12]]},"344":{"position":[[0,11]]}}}],["ephemer",{"_index":265,"t":{"654":{"position":[[8,9]]}}}],["evalu",{"_index":88,"t":{"130":{"position":[[8,10]]}}}],["exampl",{"_index":47,"t":{"85":{"position":[[0,7]]},"87":{"position":[[0,7]]},"176":{"position":[[3,7]]},"758":{"position":[[0,7]]}}}],["execut",{"_index":245,"t":{"558":{"position":[[0,7]]}}}],["exist",{"_index":39,"t":{"75":{"position":[[26,8]]},"638":{"position":[[8,8]]},"648":{"position":[[12,8]]},"650":{"position":[[7,8]]}}}],["expand",{"_index":263,"t":{"650":{"position":[[0,6]]}}}],["expos",{"_index":191,"t":{"374":{"position":[[0,6]]},"414":{"position":[[0,6]]}}}],["extern",{"_index":49,"t":{"89":{"position":[[0,8]]},"642":{"position":[[0,8]]},"693":{"position":[[0,8]]},"713":{"position":[[0,8]]},"732":{"position":[[0,8]]}}}],["fequenc",{"_index":43,"t":{"81":{"position":[[14,9]]}}}],["file",{"_index":36,"t":{"73":{"position":[[0,4]]},"102":{"position":[[3,5]]},"356":{"position":[[20,4]]},"566":{"position":[[5,5]]},"607":{"position":[[11,5]]},"615":{"position":[[13,5]]},"787":{"position":[[24,5]]},"789":{"position":[[24,5]]}}}],["filebrows",{"_index":227,"t":{"496":{"position":[[0,11]]}}}],["fill",{"_index":215,"t":{"476":{"position":[[0,4]]}}}],["filter",{"_index":82,"t":{"124":{"position":[[15,9]]}}}],["find",{"_index":89,"t":{"134":{"position":[[0,4]]}}}],["fix",{"_index":266,"t":{"666":{"position":[[0,3]]}}}],["flink",{"_index":23,"t":{"44":{"position":[[7,5]]}}}],["folder",{"_index":221,"t":{"486":{"position":[[42,6]]}}}],["follow",{"_index":158,"t":{"299":{"position":[[0,6]]}}}],["forbidden",{"_index":229,"t":{"496":{"position":[[16,9]]}}}],["form",{"_index":216,"t":{"476":{"position":[[7,4]]}}}],["freesurf",{"_index":235,"t":{"528":{"position":[[16,10]]},"530":{"position":[[0,10]]},"532":{"position":[[0,10]]}}}],["fsl",{"_index":236,"t":{"530":{"position":[[15,3]]}}}],["function",{"_index":223,"t":{"492":{"position":[[11,8]]}}}],["gener",{"_index":238,"t":{"534":{"position":[[11,9]]}}}],["get",{"_index":11,"t":{"28":{"position":[[0,7]]}}}],["git",{"_index":118,"t":{"174":{"position":[[7,3]]},"214":{"position":[[4,3]]},"293":{"position":[[4,3]]},"380":{"position":[[14,3]]},"494":{"position":[[0,3]]},"524":{"position":[[7,3]]},"626":{"position":[[10,3]]}}}],["github",{"_index":54,"t":{"94":{"position":[[16,6]]},"419":{"position":[[9,6]]},"427":{"position":[[11,6]]},"599":{"position":[[0,6]]},"834":{"position":[[0,6]]},"876":{"position":[[13,6]]}}}],["golang",{"_index":209,"t":{"450":{"position":[[8,6]]}}}],["good",{"_index":210,"t":{"466":{"position":[[4,4]]}}}],["gpu",{"_index":122,"t":{"186":{"position":[[17,3]]},"192":{"position":[[13,3]]},"200":{"position":[[11,3]]},"202":{"position":[[12,3]]},"204":{"position":[[23,4]]},"206":{"position":[[8,3]]},"295":{"position":[[11,3]]},"536":{"position":[[8,4]]}}}],["graph",{"_index":106,"t":{"161":{"position":[[0,5]]}}}],["graphdb",{"_index":111,"t":{"164":{"position":[[9,7]]}}}],["groupbi",{"_index":86,"t":{"128":{"position":[[2,7]]}}}],["hardwar",{"_index":19,"t":{"38":{"position":[[0,8]]}}}],["health",{"_index":186,"t":{"358":{"position":[[14,6]]}}}],["helm",{"_index":67,"t":{"110":{"position":[[21,4]]},"232":{"position":[[26,4]]},"241":{"position":[[26,4]]},"275":{"position":[[26,4]]},"279":{"position":[[26,4]]},"449":{"position":[[12,4]]},"452":{"position":[[8,4]]},"454":{"position":[[6,4]]},"456":{"position":[[10,4]]},"458":{"position":[[28,4]]},"777":{"position":[[20,4]]}}}],["help",{"_index":60,"t":{"100":{"position":[[3,4]]},"691":{"position":[[18,4]]},"711":{"position":[[18,4]]}}}],["imag",{"_index":90,"t":{"134":{"position":[[8,5]]},"136":{"position":[[11,5]]},"138":{"position":[[28,5]]},"142":{"position":[[10,5]]},"180":{"position":[[24,5]]},"182":{"position":[[20,5]]},"188":{"position":[[22,5]]},"194":{"position":[[17,6]]},"206":{"position":[[27,5]]},"334":{"position":[[0,5]]},"370":{"position":[[10,5]]},"378":{"position":[[27,5]]},"425":{"position":[[13,5]]},"480":{"position":[[11,5]]},"751":{"position":[[26,5]]}}}],["imagestream",{"_index":241,"t":{"547":{"position":[[0,12]]}}}],["increas",{"_index":127,"t":{"204":{"position":[[0,8]]}}}],["infer",{"_index":283,"t":{"709":{"position":[[0,9]]}}}],["instal",{"_index":66,"t":{"110":{"position":[[3,12]]},"118":{"position":[[3,12]]},"206":{"position":[[0,7]]},"232":{"position":[[0,10]]},"241":{"position":[[0,10]]},"275":{"position":[[0,10]]},"279":{"position":[[0,10]]},"384":{"position":[[0,7]]},"394":{"position":[[0,7]]},"399":{"position":[[0,7]]},"409":{"position":[[0,7]]},"410":{"position":[[0,7]]},"412":{"position":[[0,7]]},"449":{"position":[[0,7]]},"450":{"position":[[0,7]]},"452":{"position":[[0,7]]},"454":{"position":[[11,12]]},"456":{"position":[[0,7]]},"500":{"position":[[3,7]]},"570":{"position":[[0,7]]},"638":{"position":[[0,7]]},"743":{"position":[[0,7]]},"755":{"position":[[0,7]]},"766":{"position":[[0,7]]},"776":{"position":[[0,7]]},"858":{"position":[[0,7]]},"864":{"position":[[0,7]]}}}],["interfac",{"_index":153,"t":{"273":{"position":[[44,9]]},"738":{"position":[[12,9]]}}}],["intern",{"_index":32,"t":{"64":{"position":[[18,8]]},"634":{"position":[[18,8]]}}}],["internet",{"_index":199,"t":{"403":{"position":[[7,8]]}}}],["introduct",{"_index":294,"t":{"736":{"position":[[0,12]]},"830":{"position":[[0,12]]}}}],["issu",{"_index":226,"t":{"494":{"position":[[19,5]]}}}],["java",{"_index":307,"t":{"820":{"position":[[5,4]]}}}],["job",{"_index":135,"t":{"216":{"position":[[6,4]]},"516":{"position":[[8,4]]}}}],["jupyt",{"_index":71,"t":{"114":{"position":[[14,7]]},"184":{"position":[[14,7]]}}}],["jupyterhub",{"_index":31,"t":{"58":{"position":[[17,10]]},"230":{"position":[[10,10]]},"232":{"position":[[15,10]]},"241":{"position":[[15,10]]},"273":{"position":[[10,10]]},"275":{"position":[[15,10]]},"279":{"position":[[15,10]]},"502":{"position":[[10,10]]}}}],["jupyterlab",{"_index":113,"t":{"170":{"position":[[9,10]]},"174":{"position":[[14,10]]},"524":{"position":[[14,10]]},"528":{"position":[[0,10]]},"605":{"position":[[12,10]]}}}],["kera",{"_index":309,"t":{"824":{"position":[[0,5]]}}}],["kfctl",{"_index":230,"t":{"500":{"position":[[11,5]]}}}],["kubectl",{"_index":196,"t":{"394":{"position":[[8,7]]},"395":{"position":[[0,7]]},"397":{"position":[[0,7]]},"401":{"position":[[4,7]]}}}],["kubeflow",{"_index":314,"t":{"842":{"position":[[0,8]]}}}],["kubegen",{"_index":318,"t":{"852":{"position":[[0,8]]}}}],["kubernet",{"_index":171,"t":{"322":{"position":[[0,10]]},"410":{"position":[[22,10]]}}}],["larg",{"_index":222,"t":{"488":{"position":[[0,5]]},"607":{"position":[[5,5]]}}}],["larger",{"_index":55,"t":{"96":{"position":[[4,6]]}}}],["lazi",{"_index":87,"t":{"130":{"position":[[3,4]]}}}],["learn",{"_index":20,"t":{"40":{"position":[[0,5]]},"697":{"position":[[37,8]]},"699":{"position":[[47,8]]},"810":{"position":[[8,8]]},"812":{"position":[[7,5]]},"814":{"position":[[5,8]]}}}],["librari",{"_index":305,"t":{"810":{"position":[[17,9]]},"814":{"position":[[14,9]]},"820":{"position":[[10,7]]}}}],["limit",{"_index":188,"t":{"360":{"position":[[16,6]]},"490":{"position":[[15,11]]}}}],["line",{"_index":152,"t":{"273":{"position":[[39,4]]}}}],["liner",{"_index":260,"t":{"623":{"position":[[4,5]]}}}],["linux",{"_index":249,"t":{"571":{"position":[[3,5]]}}}],["list",{"_index":240,"t":{"543":{"position":[[0,4]]},"552":{"position":[[0,4]]},"792":{"position":[[0,4]]}}}],["local",{"_index":57,"t":{"96":{"position":[[19,7]]},"366":{"position":[[11,5]]},"378":{"position":[[14,5]]},"410":{"position":[[16,5]]},"609":{"position":[[10,5]]},"611":{"position":[[17,5]]},"617":{"position":[[5,5]]},"619":{"position":[[12,5]]},"630":{"position":[[16,5]]}}}],["locat",{"_index":61,"t":{"102":{"position":[[9,9]]}}}],["log",{"_index":246,"t":{"562":{"position":[[8,4]]},"589":{"position":[[0,7]]}}}],["login",{"_index":193,"t":{"388":{"position":[[0,5]]},"418":{"position":[[0,5]]},"419":{"position":[[0,5]]},"421":{"position":[[0,5]]},"423":{"position":[[0,5]]},"577":{"position":[[0,5]]}}}],["mac",{"_index":250,"t":{"573":{"position":[[3,3]]}}}],["machin",{"_index":262,"t":{"630":{"position":[[22,7]]},"810":{"position":[[0,7]]}}}],["maco",{"_index":197,"t":{"397":{"position":[[11,5]]},"770":{"position":[[3,5]]}}}],["manag",{"_index":114,"t":{"172":{"position":[[4,6]]},"522":{"position":[[4,6]]}}}],["markdown",{"_index":62,"t":{"104":{"position":[[3,8]]}}}],["match",{"_index":231,"t":{"506":{"position":[[0,5]]}}}],["matlab",{"_index":120,"t":{"180":{"position":[[17,6]]},"182":{"position":[[13,6]]},"184":{"position":[[4,6]]},"186":{"position":[[7,6]]},"188":{"position":[[15,6]]}}}],["metaflow",{"_index":310,"t":{"826":{"position":[[0,8]]}}}],["method",{"_index":145,"t":{"225":{"position":[[30,6]]}}}],["minishift",{"_index":192,"t":{"384":{"position":[[8,9]]},"386":{"position":[[6,9]]}}}],["mongodb",{"_index":104,"t":{"157":{"position":[[0,7]]}}}],["monitor",{"_index":207,"t":{"437":{"position":[[0,7]]}}}],["more",{"_index":21,"t":{"40":{"position":[[6,4]]},"621":{"position":[[0,4]]},"840":{"position":[[0,4]]}}}],["mount",{"_index":182,"t":{"346":{"position":[[0,5]]}}}],["mpi",{"_index":233,"t":{"516":{"position":[[4,3]]}}}],["mysql",{"_index":102,"t":{"154":{"position":[[6,5]]},"458":{"position":[[8,5]]}}}],["network",{"_index":4,"t":{"16":{"position":[[18,7]]},"445":{"position":[[18,7]]}}}],["new",{"_index":94,"t":{"138":{"position":[[17,3]]},"368":{"position":[[7,3]]},"510":{"position":[[0,3]]}}}],["nextflow",{"_index":316,"t":{"848":{"position":[[0,8]]},"858":{"position":[[8,8]]}}}],["nosql",{"_index":103,"t":{"156":{"position":[[0,5]]}}}],["notebook",{"_index":169,"t":{"313":{"position":[[38,9]]}}}],["number",{"_index":128,"t":{"204":{"position":[[13,6]]}}}],["oc",{"_index":248,"t":{"570":{"position":[[12,2]]},"577":{"position":[[27,2]]},"615":{"position":[[24,2]]}}}],["offici",{"_index":119,"t":{"180":{"position":[[8,8]]}}}],["okd",{"_index":173,"t":{"326":{"position":[[0,3]]}}}],["on",{"_index":259,"t":{"623":{"position":[[0,3]]}}}],["ontotext",{"_index":110,"t":{"164":{"position":[[0,8]]}}}],["openlink",{"_index":107,"t":{"162":{"position":[[0,8]]}}}],["openshift",{"_index":172,"t":{"324":{"position":[[0,9]]}}}],["oper",{"_index":80,"t":{"122":{"position":[[37,10]]},"638":{"position":[[17,9]]},"640":{"position":[[6,9]]},"779":{"position":[[7,8]]}}}],["option",{"_index":258,"t":{"621":{"position":[[5,7]]},"751":{"position":[[0,9]]},"840":{"position":[[5,7]]}}}],["organ",{"_index":321,"t":{"868":{"position":[[7,12]]}}}],["overview",{"_index":239,"t":{"540":{"position":[[0,8]]}}}],["paramet",{"_index":176,"t":{"332":{"position":[[0,10]]},"462":{"position":[[15,10]]}}}],["password/token",{"_index":72,"t":{"114":{"position":[[22,14]]}}}],["permiss",{"_index":269,"t":{"677":{"position":[[7,11]]}}}],["persist",{"_index":40,"t":{"75":{"position":[[35,10]]},"223":{"position":[[23,10]]},"315":{"position":[[21,10]]},"405":{"position":[[7,10]]},"486":{"position":[[31,10]]},"646":{"position":[[11,10]]},"648":{"position":[[21,10]]},"650":{"position":[[16,10]]},"664":{"position":[[7,10]]}}}],["perspect",{"_index":10,"t":{"24":{"position":[[24,11]]}}}],["pipelin",{"_index":278,"t":{"697":{"position":[[46,9]]},"699":{"position":[[56,8]]}}}],["pod",{"_index":178,"t":{"342":{"position":[[0,3]]},"549":{"position":[[0,4]]},"550":{"position":[[7,3]]},"552":{"position":[[5,4]]},"554":{"position":[[13,3]]},"558":{"position":[[19,3]]},"560":{"position":[[7,3]]},"562":{"position":[[4,3]]},"609":{"position":[[19,3]]},"611":{"position":[[10,3]]},"617":{"position":[[14,3]]},"619":{"position":[[5,3]]},"660":{"position":[[7,3]]},"804":{"position":[[6,3]]},"806":{"position":[[15,3]]}}}],["possibl",{"_index":279,"t":{"699":{"position":[[2,8]]}}}],["postgresql",{"_index":101,"t":{"152":{"position":[[6,10]]}}}],["prepar",{"_index":123,"t":{"192":{"position":[[0,7]]},"198":{"position":[[0,7]]},"478":{"position":[[0,7]]},"701":{"position":[[5,11]]}}}],["process",{"_index":65,"t":{"106":{"position":[[16,7]]}}}],["profil",{"_index":270,"t":{"685":{"position":[[8,10]]},"687":{"position":[[24,10]]},"689":{"position":[[25,10]]}}}],["project",{"_index":8,"t":{"20":{"position":[[12,7]]},"542":{"position":[[0,8]]},"543":{"position":[[5,8]]},"545":{"position":[[11,7]]},"593":{"position":[[18,7]]},"662":{"position":[[9,7]]},"669":{"position":[[13,7]]},"673":{"position":[[9,7]]},"675":{"position":[[9,7]]},"677":{"position":[[42,7]]},"679":{"position":[[9,7]]},"681":{"position":[[9,7]]},"776":{"position":[[21,7]]}}}],["provid",{"_index":293,"t":{"728":{"position":[[28,8]]}}}],["provis",{"_index":268,"t":{"667":{"position":[[6,11]]}}}],["proxi",{"_index":251,"t":{"591":{"position":[[8,5]]}}}],["publish",{"_index":206,"t":{"425":{"position":[[0,7]]},"427":{"position":[[0,7]]},"429":{"position":[[0,7]]},"431":{"position":[[0,7]]},"480":{"position":[[0,7]]}}}],["pull",{"_index":64,"t":{"106":{"position":[[3,4]]},"490":{"position":[[10,4]]}}}],["push",{"_index":93,"t":{"138":{"position":[[10,4]]},"144":{"position":[[0,4]]}}}],["pyspark",{"_index":164,"t":{"311":{"position":[[6,7]]}}}],["pytorch",{"_index":46,"t":{"83":{"position":[[50,7]]},"87":{"position":[[11,7]]},"818":{"position":[[0,7]]}}}],["quay.io",{"_index":205,"t":{"421":{"position":[[9,7]]},"429":{"position":[[11,7]]}}}],["quick",{"_index":52,"t":{"94":{"position":[[2,5]]}}}],["r",{"_index":134,"t":{"216":{"position":[[4,1]]}}}],["rdf",{"_index":165,"t":{"313":{"position":[[0,3]]},"783":{"position":[[44,3]]},"787":{"position":[[33,3]]},"789":{"position":[[33,3]]}}}],["read",{"_index":281,"t":{"703":{"position":[[5,7]]}}}],["readi",{"_index":261,"t":{"628":{"position":[[14,5]]}}}],["redi",{"_index":105,"t":{"159":{"position":[[0,5]]}}}],["refer",{"_index":273,"t":{"693":{"position":[[23,10]]},"713":{"position":[[23,10]]},"732":{"position":[[23,10]]}}}],["registri",{"_index":204,"t":{"418":{"position":[[19,10]]},"419":{"position":[[26,8]]},"427":{"position":[[28,8]]},"587":{"position":[[13,8]]},"599":{"position":[[17,8]]}}}],["remind",{"_index":284,"t":{"717":{"position":[[0,9]]}}}],["remot",{"_index":243,"t":{"556":{"position":[[0,6]]}}}],["repositori",{"_index":148,"t":{"232":{"position":[[37,10]]},"275":{"position":[[37,10]]},"380":{"position":[[18,10]]},"626":{"position":[[14,10]]},"785":{"position":[[10,10]]},"802":{"position":[[10,10]]},"870":{"position":[[6,10]]}}}],["request",{"_index":0,"t":{"14":{"position":[[0,7]]},"64":{"position":[[0,7]]},"106":{"position":[[8,7]]},"443":{"position":[[0,7]]},"474":{"position":[[0,7]]},"634":{"position":[[0,7]]}}}],["research",{"_index":288,"t":{"723":{"position":[[28,8]]}}}],["reset",{"_index":195,"t":{"392":{"position":[[0,5]]}}}],["resourc",{"_index":50,"t":{"89":{"position":[[9,9]]},"360":{"position":[[7,8]]},"437":{"position":[[25,9]]},"642":{"position":[[9,9]]},"693":{"position":[[9,9]]},"713":{"position":[[9,9]]},"732":{"position":[[9,9]]}}}],["restrict",{"_index":131,"t":{"212":{"position":[[0,10]]},"717":{"position":[[15,12]]}}}],["rna",{"_index":26,"t":{"52":{"position":[[8,3]]}}}],["robot",{"_index":253,"t":{"597":{"position":[[8,5]]}}}],["rout",{"_index":69,"t":{"112":{"position":[[15,5]]},"256":{"position":[[19,5]]},"283":{"position":[[19,5]]},"307":{"position":[[9,5]]},"352":{"position":[[0,5]]}}}],["rstudio",{"_index":130,"t":{"210":{"position":[[6,7]]},"212":{"position":[[11,7]]},"214":{"position":[[11,7]]},"605":{"position":[[3,8]]}}}],["rsync",{"_index":256,"t":{"615":{"position":[[27,5]]}}}],["run",{"_index":133,"t":{"216":{"position":[[0,3]]},"309":{"position":[[0,3]]},"317":{"position":[[9,7]]},"401":{"position":[[0,3]]},"492":{"position":[[7,3]]},"516":{"position":[[0,3]]},"745":{"position":[[0,3]]},"783":{"position":[[0,3]]},"792":{"position":[[5,7]]},"860":{"position":[[0,3]]},"872":{"position":[[18,3]]}}}],["runner",{"_index":320,"t":{"866":{"position":[[9,6]]},"874":{"position":[[14,6]]}}}],["sansa",{"_index":167,"t":{"313":{"position":[[19,5]]}}}],["scikit",{"_index":306,"t":{"812":{"position":[[0,6]]}}}],["secret",{"_index":177,"t":{"338":{"position":[[0,6]]}}}],["secur",{"_index":149,"t":{"256":{"position":[[11,7]]},"283":{"position":[[11,7]]},"348":{"position":[[0,8]]}}}],["see",{"_index":297,"t":{"762":{"position":[[0,3]]},"878":{"position":[[0,3]]}}}],["seldon",{"_index":319,"t":{"854":{"position":[[0,6]]}}}],["select",{"_index":76,"t":{"122":{"position":[[2,9]]}}}],["seq",{"_index":27,"t":{"52":{"position":[[12,3]]}}}],["server",{"_index":33,"t":{"64":{"position":[[30,7]]},"212":{"position":[[30,6]]},"291":{"position":[[24,6]]},"632":{"position":[[13,6]]},"634":{"position":[[30,7]]}}}],["servic",{"_index":91,"t":{"134":{"position":[[23,7]]},"350":{"position":[[0,7]]},"667":{"position":[[18,7]]},"723":{"position":[[15,8]]},"728":{"position":[[19,8]]},"730":{"position":[[29,9]]}}}],["set",{"_index":139,"t":{"223":{"position":[[0,7]]},"462":{"position":[[0,3]]}}}],["shell",{"_index":244,"t":{"556":{"position":[[7,5]]}}}],["shini",{"_index":132,"t":{"212":{"position":[[24,5]]}}}],["show",{"_index":219,"t":{"482":{"position":[[0,4]]}}}],["size",{"_index":143,"t":{"223":{"position":[[41,4]]}}}],["softwar",{"_index":18,"t":{"36":{"position":[[0,8]]}}}],["solut",{"_index":212,"t":{"470":{"position":[[4,8]]},"832":{"position":[[8,9]]}}}],["sonnet",{"_index":308,"t":{"822":{"position":[[0,6]]}}}],["spark",{"_index":30,"t":{"58":{"position":[[6,5]]},"303":{"position":[[9,5]]},"307":{"position":[[22,5]]},"309":{"position":[[7,5]]},"315":{"position":[[8,5]]},"317":{"position":[[17,5]]},"502":{"position":[[25,5]]},"504":{"position":[[10,5]]},"508":{"position":[[0,5]]},"510":{"position":[[4,5]]}}}],["spec",{"_index":179,"t":{"342":{"position":[[4,4]]}}}],["specif",{"_index":17,"t":{"35":{"position":[[9,14]]},"554":{"position":[[4,8]]}}}],["speed",{"_index":274,"t":{"697":{"position":[[3,8]]},"699":{"position":[[25,8]]}}}],["sql",{"_index":99,"t":{"150":{"position":[[0,3]]}}}],["stabl",{"_index":121,"t":{"182":{"position":[[6,6]]}}}],["start",{"_index":12,"t":{"28":{"position":[[8,7]]},"58":{"position":[[0,5]]},"152":{"position":[[0,5]]},"154":{"position":[[0,5]]},"170":{"position":[[3,5]]},"210":{"position":[[0,5]]},"291":{"position":[[0,5]]},"386":{"position":[[0,5]]},"458":{"position":[[0,5]]},"482":{"position":[[23,5]]},"520":{"position":[[3,5]]},"730":{"position":[[11,7]]},"749":{"position":[[0,5]]},"804":{"position":[[0,5]]}}}],["statist",{"_index":85,"t":{"126":{"position":[[17,10]]}}}],["step",{"_index":159,"t":{"299":{"position":[[13,5]]}}}],["stop",{"_index":194,"t":{"390":{"position":[[0,4]]},"747":{"position":[[0,4]]},"794":{"position":[[0,4]]}}}],["storag",{"_index":41,"t":{"75":{"position":[[46,7]]},"315":{"position":[[32,7]]},"336":{"position":[[7,7]]},"346":{"position":[[6,7]]},"646":{"position":[[22,7]]},"648":{"position":[[32,7]]},"650":{"position":[[27,7]]},"652":{"position":[[16,7]]},"654":{"position":[[18,7]]},"664":{"position":[[18,7]]}}}],["structur",{"_index":301,"t":{"783":{"position":[[25,10]]}}}],["stuck",{"_index":267,"t":{"666":{"position":[[4,5]]},"667":{"position":[[0,5]]},"669":{"position":[[7,5]]}}}],["summari",{"_index":84,"t":{"126":{"position":[[9,7]]}}}],["support",{"_index":44,"t":{"83":{"position":[[0,7]]}}}],["surf",{"_index":289,"t":{"724":{"position":[[8,5]]},"728":{"position":[[40,5]]},"730":{"position":[[24,4]]}}}],["surf'",{"_index":286,"t":{"723":{"position":[[0,6]]}}}],["surfdriv",{"_index":254,"t":{"613":{"position":[[19,9]]}}}],["sync",{"_index":257,"t":{"617":{"position":[[0,4]]},"619":{"position":[[0,4]]}}}],["synchron",{"_index":255,"t":{"615":{"position":[[0,12]]}}}],["team",{"_index":272,"t":{"691":{"position":[[9,4]]},"711":{"position":[[9,4]]}}}],["templat",{"_index":189,"t":{"362":{"position":[[27,8]]},"564":{"position":[[16,8]]}}}],["tensorflow",{"_index":276,"t":{"697":{"position":[[15,10]]},"699":{"position":[[68,11]]},"816":{"position":[[0,10]]}}}],["tensorflow/kera",{"_index":45,"t":{"83":{"position":[[29,16]]},"85":{"position":[[11,16]]}}}],["termin",{"_index":35,"t":{"69":{"position":[[9,8]]},"577":{"position":[[13,8]]},"581":{"position":[[9,8]]},"607":{"position":[[26,8]]}}}],["test",{"_index":298,"t":{"774":{"position":[[0,4]]}}}],["tip",{"_index":63,"t":{"104":{"position":[[12,3]]}}}],["train",{"_index":218,"t":{"480":{"position":[[26,8]]},"707":{"position":[[0,8]]}}}],["triniti",{"_index":25,"t":{"52":{"position":[[0,7]]}}}],["triplestor",{"_index":109,"t":{"162":{"position":[[18,11]]},"164":{"position":[[17,11]]}}}],["type",{"_index":292,"t":{"728":{"position":[[10,5]]}}}],["ubuntu",{"_index":34,"t":{"68":{"position":[[0,6]]},"395":{"position":[[11,6]]},"768":{"position":[[3,6]]}}}],["ui",{"_index":7,"t":{"18":{"position":[[15,2]]},"22":{"position":[[14,2]]},"71":{"position":[[11,2]]},"399":{"position":[[22,2]]},"414":{"position":[[11,2]]},"508":{"position":[[6,2]]},"583":{"position":[[13,2]]},"673":{"position":[[31,2]]},"679":{"position":[[31,2]]}}}],["um",{"_index":3,"t":{"16":{"position":[[15,2]]},"64":{"position":[[27,2]]},"445":{"position":[[15,2]]},"587":{"position":[[0,2]]},"634":{"position":[[27,2]]}}}],["uninstal",{"_index":200,"t":{"407":{"position":[[0,9]]},"460":{"position":[[0,9]]},"781":{"position":[[0,9]]},"874":{"position":[[0,9]]}}}],["up",{"_index":275,"t":{"697":{"position":[[12,2]]},"699":{"position":[[34,2]]}}}],["upgrad",{"_index":150,"t":{"262":{"position":[[0,9]]},"285":{"position":[[0,9]]}}}],["upload",{"_index":296,"t":{"741":{"position":[[0,6]]}}}],["us",{"_index":117,"t":{"174":{"position":[[3,3]]},"180":{"position":[[0,3]]},"182":{"position":[[0,3]]},"184":{"position":[[0,3]]},"214":{"position":[[0,3]]},"230":{"position":[[21,5]]},"273":{"position":[[21,5]]},"293":{"position":[[0,3]]},"311":{"position":[[0,5]]},"433":{"position":[[0,3]]},"437":{"position":[[35,3]]},"504":{"position":[[2,3]]},"524":{"position":[[3,3]]},"536":{"position":[[0,3]]},"591":{"position":[[0,5]]},"595":{"position":[[0,5]]},"597":{"position":[[0,5]]},"652":{"position":[[0,3]]},"654":{"position":[[0,3]]},"673":{"position":[[17,5]]},"675":{"position":[[17,5]]},"679":{"position":[[17,5]]},"681":{"position":[[17,5]]},"876":{"position":[[7,5]]}}}],["user",{"_index":214,"t":{"474":{"position":[[25,5]]},"482":{"position":[[10,5]]},"595":{"position":[[15,4]]}}}],["user'",{"_index":140,"t":{"223":{"position":[[8,6]]}}}],["variabl",{"_index":180,"t":{"344":{"position":[[12,9]]}}}],["version",{"_index":232,"t":{"506":{"position":[[10,7]]}}}],["virtuoso",{"_index":108,"t":{"162":{"position":[[9,8]]}}}],["visualstudio",{"_index":155,"t":{"291":{"position":[[6,12]]}}}],["volcano",{"_index":315,"t":{"846":{"position":[[0,7]]}}}],["volum",{"_index":142,"t":{"223":{"position":[[34,6]]},"405":{"position":[[18,6]]},"488":{"position":[[6,7]]}}}],["vpn",{"_index":213,"t":{"474":{"position":[[8,3]]}}}],["vscode",{"_index":157,"t":{"293":{"position":[[11,6]]},"295":{"position":[[0,6]]},"605":{"position":[[27,6]]}}}],["walkthrough",{"_index":175,"t":{"330":{"position":[[12,11]]}}}],["web",{"_index":6,"t":{"18":{"position":[[11,3]]},"22":{"position":[[10,3]]},"71":{"position":[[7,3]]},"583":{"position":[[9,3]]},"673":{"position":[[27,3]]},"679":{"position":[[27,3]]}}}],["websit",{"_index":146,"t":{"230":{"position":[[36,7]]}}}],["window",{"_index":198,"t":{"397":{"position":[[19,7]]},"575":{"position":[[3,7]]},"772":{"position":[[3,7]]}}}],["wise",{"_index":79,"t":{"122":{"position":[[32,4]]}}}],["within",{"_index":224,"t":{"492":{"position":[[20,6]]}}}],["workflow",{"_index":202,"t":{"409":{"position":[[13,9]]},"433":{"position":[[14,9]]},"758":{"position":[[8,9]]},"777":{"position":[[5,9]]},"783":{"position":[[4,9]]},"787":{"position":[[0,8]]},"789":{"position":[[0,8]]},"792":{"position":[[18,9]]},"794":{"position":[[7,8]]},"796":{"position":[[9,8]]},"798":{"position":[[8,8]]},"834":{"position":[[15,9]]},"860":{"position":[[4,8]]},"876":{"position":[[28,9]]}}}],["workshop",{"_index":217,"t":{"478":{"position":[[12,8]]}}}],["workspac",{"_index":124,"t":{"192":{"position":[[17,9]]},"196":{"position":[[11,9]]},"198":{"position":[[12,9]]},"482":{"position":[[31,9]]},"520":{"position":[[14,9]]}}}],["wsl2",{"_index":160,"t":{"299":{"position":[[26,4]]}}}],["xml",{"_index":303,"t":{"787":{"position":[[20,3]]}}}],["yaml",{"_index":242,"t":{"550":{"position":[[16,4]]}}}],["zeppelin",{"_index":168,"t":{"313":{"position":[[29,8]]}}}]],"pipeline":["stemmer"]}},{"documents":[{"i":2,"t":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus elementum massa eget nulla aliquet sagittis. Proin odio tortor, vulputate ut odio in, ultrices ultricies augue. Cras ornare ultrices lorem malesuada iaculis. Etiam sit amet libero tempor, pulvinar mauris sed, sollicitudin sapien. Mauris vestibulum ullamcorper nibh, ut semper purus pulvinar ut. Donec volutpat orci sit amet mauris malesuada, non pulvinar augue aliquam. Vestibulum ultricies at urna ut suscipit. Morbi iaculis, erat at imperdiet semper, ipsum nulla sodales erat, eget tincidunt justo dui quis justo. Pellentesque dictum bibendum diam at aliquet. Sed pulvinar, dolor quis finibus ornare, eros odio facilisis erat, eu rhoncus nunc dui sed ex. Nunc gravida dui massa, sed ornare arcu tincidunt sit amet. Maecenas efficitur sapien neque, a laoreet libero feugiat ut. Nulla facilisi. Maecenas sodales nec purus eget posuere. Sed sapien quam, pretium a risus in, porttitor dapibus erat. Sed sit amet fringilla ipsum, eget iaculis augue. Integer sollicitudin tortor quis ultricies aliquam. Suspendisse fringilla nunc in tellus cursus, at placerat tellus scelerisque. Sed tempus elit a sollicitudin rhoncus. Nulla facilisi. Morbi nec dolor dolor. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Cras et aliquet lectus. Pellentesque sit amet eros nisi. Quisque ac sapien in sapien congue accumsan. Nullam in posuere ante. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin lacinia leo a nibh fringilla pharetra. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin venenatis lectus dui, vel ultrices ante bibendum hendrerit. Aenean egestas feugiat dui id hendrerit. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Curabitur in tellus laoreet, eleifend nunc id, viverra leo. Proin vulputate non dolor vel vulputate. Curabitur pretium lobortis felis, sit amet finibus lorem suscipit ut. Sed non mollis risus. Duis sagittis, mi in euismod tincidunt, nunc mauris vestibulum urna, at euismod est elit quis erat. Phasellus accumsan vitae neque eu placerat. In elementum arcu nec tellus imperdiet, eget maximus nulla sodales. Curabitur eu sapien eget nisl sodales fermentum. Phasellus pulvinar ex id commodo imperdiet. Praesent odio nibh, sollicitudin sit amet faucibus id, placerat at metus. Donec vitae eros vitae tortor hendrerit finibus. Interdum et malesuada fames ac ante ipsum primis in faucibus. Quisque vitae purus dolor. Duis suscipit ac nulla et finibus. Phasellus ac sem sed dui dictum gravida. Phasellus eleifend vestibulum facilisis. Integer pharetra nec enim vitae mattis. Duis auctor, lectus quis condimentum bibendum, nunc dolor aliquam massa, id bibendum orci velit quis magna. Ut volutpat nulla nunc, sed interdum magna condimentum non. Sed urna metus, scelerisque vitae consectetur a, feugiat quis magna. Donec dignissim ornare nisl, eget tempor risus malesuada quis.","s":"Blog Title","u":"/blog/2016/03/11/blog-post","h":"","p":1},{"i":4,"t":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus elementum massa eget nulla aliquet sagittis. Proin odio tortor, vulputate ut odio in, ultrices ultricies augue. Cras ornare ultrices lorem malesuada iaculis. Etiam sit amet libero tempor, pulvinar mauris sed, sollicitudin sapien. Mauris vestibulum ullamcorper nibh, ut semper purus pulvinar ut. Donec volutpat orci sit amet mauris malesuada, non pulvinar augue aliquam. Vestibulum ultricies at urna ut suscipit. Morbi iaculis, erat at imperdiet semper, ipsum nulla sodales erat, eget tincidunt justo dui quis justo. Pellentesque dictum bibendum diam at aliquet. Sed pulvinar, dolor quis finibus ornare, eros odio facilisis erat, eu rhoncus nunc dui sed ex. Nunc gravida dui massa, sed ornare arcu tincidunt sit amet. Maecenas efficitur sapien neque, a laoreet libero feugiat ut. Nulla facilisi. Maecenas sodales nec purus eget posuere. Sed sapien quam, pretium a risus in, porttitor dapibus erat. Sed sit amet fringilla ipsum, eget iaculis augue. Integer sollicitudin tortor quis ultricies aliquam. Suspendisse fringilla nunc in tellus cursus, at placerat tellus scelerisque. Sed tempus elit a sollicitudin rhoncus. Nulla facilisi. Morbi nec dolor dolor. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Cras et aliquet lectus. Pellentesque sit amet eros nisi. Quisque ac sapien in sapien congue accumsan. Nullam in posuere ante. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin lacinia leo a nibh fringilla pharetra. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin venenatis lectus dui, vel ultrices ante bibendum hendrerit. Aenean egestas feugiat dui id hendrerit. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Curabitur in tellus laoreet, eleifend nunc id, viverra leo. Proin vulputate non dolor vel vulputate. Curabitur pretium lobortis felis, sit amet finibus lorem suscipit ut. Sed non mollis risus. Duis sagittis, mi in euismod tincidunt, nunc mauris vestibulum urna, at euismod est elit quis erat. Phasellus accumsan vitae neque eu placerat. In elementum arcu nec tellus imperdiet, eget maximus nulla sodales. Curabitur eu sapien eget nisl sodales fermentum. Phasellus pulvinar ex id commodo imperdiet. Praesent odio nibh, sollicitudin sit amet faucibus id, placerat at metus. Donec vitae eros vitae tortor hendrerit finibus. Interdum et malesuada fames ac ante ipsum primis in faucibus. Quisque vitae purus dolor. Duis suscipit ac nulla et finibus. Phasellus ac sem sed dui dictum gravida. Phasellus eleifend vestibulum facilisis. Integer pharetra nec enim vitae mattis. Duis auctor, lectus quis condimentum bibendum, nunc dolor aliquam massa, id bibendum orci velit quis magna. Ut volutpat nulla nunc, sed interdum magna condimentum non. Sed urna metus, scelerisque vitae consectetur a, feugiat quis magna. Donec dignissim ornare nisl, eget tempor risus malesuada quis.","s":"New Blog Post","u":"/blog/2017/04/10/blog-post-two","h":"","p":3},{"i":6,"t":"1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 This should be truncated. This line should never render in XML.","s":"Adding RSS Support - RSS Truncation Test","u":"/blog/2017/09/25/testing-rss","h":"","p":5},{"i":8,"t":"This is a test post. A whole bunch of other information.","s":"Adding RSS Support","u":"/blog/2017/09/26/adding-rss","h":"","p":7},{"i":10,"t":"This blog post will test file name parsing issues when periods are present.","s":"New Version 1.0.0","u":"/blog/2017/10/24/new-version-1.0.0","h":"","p":9},{"i":13,"t":"Get started Access the DSRI","s":"Access the DSRI","u":"/docs/access-dsri","h":"","p":12},{"i":15,"t":"You will need to have an account at Maastricht University with an email ending with @maastrichtuniversity.nl or @student.maastrichtuniversity.nl. Request access to the DSRI for your account Please fill this form 📬 to provide us some information on what you plan to do with the DSRI. Once you fill the form, you will receive an email with detailed instructions on how to log in.","s":"Request an account","u":"/docs/access-dsri","h":"#request-an-account","p":12},{"i":17,"t":"You need to be connected to the UM network to access the DSRI. 🐧 On Linux: use openconnect to connect to the UM VPN. You can easily install it on Ubuntu and Debian distributions with apt: sudo apt install openconnectsudo openconnect -u YOUR.USER --authgroup 01-Employees vpn.maastrichtuniversity.nl 🍎 On MacOS and Windows: download and install the Maastricht University VPN client available at vpn.maastrichtuniversity.nl ⚠️ If your are a student you will need to request access to the UM VPN first You can try to use the Athena Student Desktop at athenadesktop.maastrichtuniversity.nl, to access the VPN through a virtual desktop Or ask one of your teachers to request VPN access for you. You will need to send an email to the IT helpdesk of your department with the following information: Email of the student who will get VPN for which course (provide the course ID) or project does the student need the VPN until which date the student will need the VPN.","s":"Connect to the UM network","u":"/docs/access-dsri","h":"#connect-to-the-um-network","p":12},{"i":19,"t":"Access the DSRI web UI at https://console-openshift-console.apps.dsri2.unimaas.nl Password Use your general UM password. If you do not have access to the DSRI contact us. You will be able to login at https://console-openshift-console.apps.dsri2.unimaas.nl using the standard maastricht portal upon clicking the login button: Command line interface We recommend you to install the oc command line interface to perform additional operations on your applications, such as loading large amount of data using oc cp, or deploying an application from a local Dockerfile. Instructions on installing the client can be found ➡ here","s":"Access the web UI","u":"/docs/access-dsri","h":"#access-the-web-ui","p":12},{"i":21,"t":"In the DSRI OpenShift web UI, applications are deployed in projects. Create a new project with a meaningful name describing what you are doing, such as workspace-yourname. Go to your project (applications are deployed in a project). Reuse your project Only create new projects when it is necessary (for a new project). You can easily clean up your current project instead of creating a new one every time you want to try something. Access permissions for developers to your project You can use the Project view in the Developer perspective to grant or revoke access permissions to your project collaborators. For More Info: Access permissions for developers to your project","s":"Access your project","u":"/docs/access-dsri","h":"#access-your-project","p":12},{"i":23,"t":"Developers can use the web console to visualize, browse, and manage the contents of projects in new version of OKD4. The OpenShift Container Platform web console provides two perspectives; the Administrator perspective the Developer perspective. The Developer perspective provides workflows specific to developer use cases, such as the ability to: Create and deploy applications on OpenShift Container Platform by importing existing codebases, images, and dockerfiles. Visually interact with applications, components, and services associated with them within a project and monitor their deployment and build status. Group components within an application and connect the components within and across applications.","s":"About the web UI","u":"/docs/access-dsri","h":"#about-the-web-ui","p":12},{"i":25,"t":"You can access the Developer perspective from the web console as follows: Log in to the OpenShift Container Platform web console using your login credentials. The default view for the OpenShift Container Platform web console is the Administrator perspective. Use the perspective switcher to switch to the Developer perspective. The Topology view with a list of all the projects in your cluster is displayed. Select an existing project from the list or use the Project drop-down list to create a new project. info If you have no workloads or applications in the project, the Topology view displays the available options to create applications. If you have existing workloads, the Topology view graphically displays your workload nodes.","s":"Accessing the Developer perspective","u":"/docs/access-dsri","h":"#accessing-the-developer-perspective","p":12},{"i":27,"t":"Get started Introduction","s":"Introduction","u":"/docs/","h":"","p":26},{"i":30,"t":"The DSRI is particularly useful if you need to: Gain access to more computing resources (memory and CPUs), which enables you to load larger amount of data, or use more threads for parallelized tasks Run jobs that takes a long time to complete Deploy any database or service you need, and connect to it from your workspace easily Book and start a workspace that uses one of our GPUs The DSRI proposes a number of popular workspaces to work with data: Multiple flavors of JupyterLab (scipy, tensorflow, all-spark, and more) VisualStudio Code server (also available within the JupyterLab workspaces) RStudio, with a complementary Shiny server Matlab Ubuntu Desktop You can then install anything you want in your workspace using conda, pip, or apt. Data storage DSRI is a computing infrastructure, built and used to run data science workloads. DSRI stores data in a persistent manner, but all data stored on the DSRI is susceptible to be altered by the workloads you are running, and we cannot guarantee its immutability. Always keep a safe copy of your data outside the DSRI. And don't rely on the DSRI for long term storage.","s":"✅ What can be done on the DSRI","u":"/docs/","h":"#-what-can-be-done-on-the-dsri","p":26},{"i":32,"t":"Since DSRI can only be accessed when using the UM VPN, deployed services will not be available on the public Internet 🔒 All activities must be legal in basis. You must closely examine and abide by the terms and conditions of any data, software, or web service that you use as part of your work 📜 You cannot reach data or servers hosted at Maastricht University from the DSRI by default. You will need to request access in advance here 📬️ Right now it is not possible to reach the central UM fileservices (MFS) Request an account If you are working at Maastricht University, see this page to request an account, and run your services on the DSRI.","s":"❌ What cannot be done","u":"/docs/","h":"#-what-cannot-be-done","p":26},{"i":34,"t":"Here is a diagram providing a simplified explanation of how the DSRI works, using popular data science applications as examples (JupyterLab, RStudio, VSCode server)","s":"The DSRI architecture","u":"/docs/","h":"#the-dsri-architecture","p":26},{"i":37,"t":"We use OKD 4.11, the Origin Community Distribution of Kubernetes that powers RedHat OpenShift, a distribution of the Kubernetes container orchestration tool. Kubernetes takes care of deploying the Docker containers on the cluster of servers, the OKD distribution extends it to improve security, and provide a user-friendly web UI to manage your applications. We use RedHat Ceph storage for the distributed storage.","s":"Software","u":"/docs/","h":"#software","p":26},{"i":39,"t":"16 CPU nodes RAM (GB) CPU (cores) Storage (TB) Node capacity 512 GB 64 cores (128 threads) 120 TB Total capacity 8 192 GB 1 024 cores 1 920 TB 1 GPU node: Nvidia DGX1 8x Tesla V100 - 32GB GPU GPUs RAM (GB) CPU (cores) GPU node capacity 8 512 GB 40 cores","s":"Hardware","u":"/docs/","h":"#hardware","p":26},{"i":41,"t":"See the following presentation about the Data Science Research Infrastructure","s":"Learn more about DSRI","u":"/docs/","h":"#learn-more-about-dsri","p":26},{"i":43,"t":"On this page","s":"Data streaming","u":"/docs/catalog-data-streaming","h":"","p":42},{"i":45,"t":"Apache Flink enables processing of Data Streams using languages such as Java or Scala . Root permission required 🔒 You need root containers enabled (aka. anyuid) in your project to start this application. Create the Apache Flink template in your project using vemonet/flink-on-openshift oc apply -f https://raw.githubusercontent.com/vemonet/flink-on-openshift/master/template-flink-dsri.yml Use the template to start the cluster from the catalog. Use this command to get the Flink Jobmanager pod id and copy file to the pod. oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name# Example creating the workspace folder and copying the RMLStreamer.jar to the podoc exec -- mkdir -p /mnt/workspace/resourcesoc cp workspace/resources/RMLStreamer.jar :/mnt/ Delete the Apache Flink cluster (change the application name): oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=flink-cluster","s":"Apache Flink","u":"/docs/catalog-data-streaming","h":"#apache-flink","p":42},{"i":47,"t":"Deploy applications Data Science catalog Imaging softwares","s":"Imaging softwares","u":"/docs/catalog-imaging","h":"","p":46},{"i":49,"t":"Cell image analysis software. See their website. You can start a container using the CellProfiler template in the Catalog web UI (make sure the Templates checkbox is checked) This template uses the official CellProfiler image hosted on DockerHub Persistent data folder 📂 Use the /usr/local/src/work folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. Once the CellProfiler has been started you can access it through the pod terminal (in the DSRI web UI, or using oc rsh POD_ID) cellprofiler --helpcellprofiler --runcellprofiler --run-headless Getting Started 🐬 For more information using cell profiler from the command line see this post","s":"CellProfiler","u":"/docs/catalog-imaging","h":"#cellprofiler","p":46},{"i":51,"t":"Deploy applications Data Science catalog Genomics","s":"Genomics","u":"/docs/catalog-genomics","h":"","p":50},{"i":53,"t":"Trinity assembles transcript sequences from Illumina RNA-Seq data. It represents a novel method for the efficient and robust de novo reconstruction of transcriptomes from RNA-seq data. See their documentation. You can start a container using the Trinity RNA-Seq template in the Catalog web UI (make sure the Templates checkbox is checked) This template uses the Trinity RNA-Seq image hosted in the UM IDS GitHub Container Registry Persistent data folder 📂 Use the /usr/local/src/work folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. We enabled the port 8787 in the container, if you need to deploy applications.","s":"Trinity RNA Seq","u":"/docs/catalog-genomics","h":"#trinity-rna-seq","p":50},{"i":55,"t":"On this page","s":"OpenDataHub","u":"/docs/catalog-opendatahub","h":"","p":54},{"i":57,"t":"Those components have been tested on the DSRI: JupyterHub Spark Operator from radanalytics","s":"Components available on DSRI","u":"/docs/catalog-opendatahub","h":"#components-available-on-dsri","p":54},{"i":59,"t":"Checkout the official documentation to start an instance of OpenDataHub (note that the Operator has already been installed) Then visit the documentation to reach the Spark cluster from a Jupyter notebook.","s":"Start Spark with JupyterHub","u":"/docs/catalog-opendatahub","h":"#start-spark-with-jupyterhub","p":54},{"i":61,"t":"Here are all the components that can be deployed as part of an OpenDataHub: JupyterHub Airflow Argo Grafana & Prometheus for data/logs visualization Spark Operator from radanalytics Kafka/Strimzi for streaming applications Superset for data visualization AI Library (Seldon to publish AI models) Let us know if you need help to deploy one of those components on the DSRI.","s":"All components","u":"/docs/catalog-opendatahub","h":"#all-components","p":54},{"i":63,"t":"Guides Access UM servers","s":"Access UM servers","u":"/docs/access-um-servers","h":"","p":62},{"i":65,"t":"In certain cases, UM servers are not accessible by default from the DSRI. This is even the case for servers that are normally publicly accessible. To be able to access these UM servers from the DSRI, we need to put in the request to open the connection. Please let us know either the servername and port you like to access, or the URL (e.g. um-vm0057.unimaas.nl on port 443 or https://gitlab.maastrichtuniversity.nl). You can reach out to us either by mail or by Slack. UM services that are not accessible from DSRI right now: central UM fileservices (MFS) The procedure is described in the diagram below:","s":"Request access to internal UM servers","u":"/docs/access-um-servers","h":"#request-access-to-internal-um-servers","p":62},{"i":67,"t":"Deploy applications Data Science catalog Utilities","s":"Utilities","u":"/docs/catalog-utilities","h":"","p":66},{"i":70,"t":"Start Ubuntu with the root user which has sudo permissions to install anything. You can start the application using the Ubuntu template in the Catalog web UI (make sure the Templates checkbox is checked) Login Credentials Username: root Password: Template creation password This template uses the Ubuntu image hosted on DockerHub, see its documentation at https://hub.docker.com/r/ubuntu Persistent data folder 📂 Use the /root folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. We enabled the port 8080 in the Ubuntu container if you need to deploy applications. To quickly access it from the terminal you can use the Terminal tab in the pod page, or via your local terminal: Get the Ubuntu pod ID: oc get pods Connect to it: oc rsh POD_ID Enable Bash in the Ubuntu container (if it starts with the Shell) bash","s":"With the terminal","u":"/docs/catalog-utilities","h":"#with-the-terminal","p":66},{"i":72,"t":"Start Ubuntu with a web UI accessible via a URL (using VNC). You will be the root user which has elevated permissions to install anything via apt install . Before you install a package run apt update. This also solves E: unable to locate package and E: no installation candidate errors. You can start the application using the Ubuntu with web UI template in the Catalog web UI (make sure the Templates checkbox is checked) Login Credentials Username: root Password: Template creation password This template uses the Docker image defined at https://github.com/fcwu/docker-ubuntu-vnc-desktop Less stable than the official image This image might be less stable than the original Ubuntu image. Let us know on Slack if you have any problem!","s":"With a web UI","u":"/docs/catalog-utilities","h":"#with-a-web-ui","p":66},{"i":74,"t":"Deploy a file browser on your persistent volume. This will provide a web UI to upload and download data to your DSRI persistent volume in case you need it (JupyterLab, RStudio and VisualStudio Code server already include a file browser) You can start a container using the File Browser for existing storage template in the Catalog web UI (make sure the Templates checkbox is checked) You can only deploy file browser on an existing Persistent Volume Claim, this enables you to add a web UI to access this storage. The following parameters can be provided: Provide a unique Application name. It will be used to generate the application URL. Provide a Password, you will need to hash the password first for extra security, use this quick docker command to do it: docker run filebrowser/filebrowser hash mypassword The Storage name of the Persistent Volume Claim (PVC) that will be exposed by the filebrowser. Storage subpath in the the Persistent Volume Claim that will be exposed by the filebrowser. Let it empty to use the Root folder of the persistent volume. You can find the Storage name if you Go to the deployments page > Storage panel.","s":"File browser","u":"/docs/catalog-utilities","h":"#file-browser","p":66},{"i":76,"t":"Find more details about the how to create persistent storage info The DSRI using the Openshift Container Stroage (OCS) which is based on CEPH offers ReadWriteOnce and ReadWriteMany access mode. ReadWriteOnce (RWO) volumes cannot be mounted on multiple nodes. Use the ReadWriteMany (RWX) access mode when possible. If a node fails, the system does not allow the attached RWO volume to be mounted on a new node because it is already assigned to the failed node. If you encounter a multi-attach error message as a result, force delete the pod on a shut down or crashed node. Find more details about the how to Connect the Existing persistent storage info You can try above method if you want to connect more applications to the same storage This deployment require to have root user enabled on your project. Contact the DSRI support team or create a new issues to request root access or to create persistent volume for your project if you don't have them . Credentials Default credentials will be username admin and password admin Change password Please change the password in the Filebrowser Web UI once it has been created.","s":"Creating or Connecting an Existing Persistent Storage","u":"/docs/catalog-utilities","h":"#creating-or-connecting-an-existing-persistent-storage","p":66},{"i":78,"t":"Guides Checkpointing Machine Learning Training","s":"Checkpointing Machine Learning Training","u":"/docs/checkpointing-ml-training","h":"","p":77},{"i":80,"t":"Checkpointing is periodically saving the learned model parameters and current hyperparameter values during training. It helps to resume training of a model where you left off, instead of restarting the training from the beginning. On shared DSRI cluster, you might have access to a GPU node for a limited number of time in one stretch, for example, maybe for 24 hours. Therefore, whenever the training job fails (due to timelimit expiry or otherwise), many hours of training can be lost. This problem is mitigated by a frequent checkpoint saving. When the training is resumed it'll continue from the last checkpoint saved. If the failure occurred 12 hours after the last checkpoint has been saved, 12 hours of training is lost and needs to be re-done. This can be very expensive.","s":"What is Checkpointing?","u":"/docs/checkpointing-ml-training","h":"#what-is-checkpointing","p":77},{"i":82,"t":"In theory one could save a checkpoint every 10 minutes and only ever lose 10 minutes of training time, but this too would dramatically delay the reaching of the finish line because large models can't be saved quickly and if the saving time starts to create a bottleneck for the training this approach becomes counterproductive. Depending on your checkpointing methodology and the speed of your IO storage partition the saving of a large model can take from dozens of seconds to several minutes. Therefore, the optimal approach to saving frequency lies somewhere in the middle. The math is quite simple - measure the amount of time it takes to save the checkpoint, multiply it by how many times you'd want to save it and see how much of an additional delay the checkpoint saving will contribute to the total training time. For instance, Let suppose, 1) Training Time (TT), i.e. allocated time on cluster : x days 2) Time needed to save every checkpoint: y seconds 3) Checkpoint fequencty: every z hours => Then, Total Number of Checkpoints during the complete training time (NCP) = (x *24)/ z => Total Time Spent on Checkpointing (TTSC) [in hours] = NCP * y/3600 => % of Training time spent on checkpointing = (TTSC/TT24) 100 ------------------Example calculations------------------------------------ Training Time (TT or x): 7 days Time needed to save every checkpoint (y): 20 secs Checkpoint fequency (z): every 30 minutes, i.e., 0.5 hours Then, NCP = 7*24/0.5 = 336 TTSC = 336* 20/3600 = 1.87 hours % of Training time spent on checkpointing = (1.87/724)100 ~ 1.2 %","s":"Checkpointing fequency?","u":"/docs/checkpointing-ml-training","h":"#checkpointing-fequency","p":77},{"i":84,"t":"Both PyTorch and TensorFlow/Keras support checkpointing. The follwoing sections provide an example of how Checkpointing can be done in these libraries.","s":"Support for Checkpointing in Tensorflow/Keras and PyTorch ?","u":"/docs/checkpointing-ml-training","h":"#support-for-checkpointing-in-tensorflowkeras-and-pytorch-","p":77},{"i":86,"t":"import tensorflow as tf#Imports the ModelCheckpoint classfrom tensorflow.keras.callbacks import ModelCheckpoint# Create your model as you normally would and compile it:model = tf.keras.models.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=(32,)), tf.keras.layers.Dense(10, activation='softmax')])model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])# Create a Checkpoint Callbackcheckpoint_callback = ModelCheckpoint(#filepath should be a path to your persistent volume. Example, /home/jovyan path in your JupyterLab pod. filepath='model_checkpoint.h5', # You can use formats like .hdf5 or .ckpt. save_best_only=True, monitor='val_loss', mode='min', verbose=1)# Train the Model with the Checkpoint Callbackhistory = model.fit( x_train, y_train, validation_data=(x_val, y_val), epochs=10, callbacks=[checkpoint_callback])# Loading a Saved Checkpoint# Load the model architecture + weights if you saved the full modelmodel = tf.keras.models.load_model('model_checkpoint.h5')# If you saved only the weights, you would need to create the model architecture first, then load weights:model.load_weights('model_checkpoint.h5')# Optional Parameters for Checkpointing, Example with Custom Save Intervalscheckpoint_callback = ModelCheckpoint( filepath='model_checkpoint_epoch_{epoch:02d}.h5', save_freq='epoch', save_weights_only=True, verbose=1)","s":"Example of Tensorflow/Keras based checkpointing:","u":"/docs/checkpointing-ml-training","h":"#example-of-tensorflowkeras-based-checkpointing","p":77},{"i":88,"t":"import torch# Example modelmodel = torch.nn.Linear(10, 2)# Save the entire modeltorch.save(model, 'model.pth')# Loading the Entire Modelmodel = torch.load('model.pth')# Saving and Loading Optimizer State, i.e., To continue training exactly as before, you may want to save the optimizer state as well.optimizer = torch.optim.SGD(model.parameters(), lr=0.01)# Save model and optimizer state_dictscheckpoint = { 'epoch': 5, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': 0.5,}torch.save(checkpoint, 'checkpoint.pth')# Load checkpointcheckpoint = torch.load('checkpoint.pth')model.load_state_dict(checkpoint['model_state_dict'])optimizer.load_state_dict(checkpoint['optimizer_state_dict'])epoch = checkpoint['epoch']loss = checkpoint['loss']model.train() # Ensure model is in training mode if needed","s":"Example of PyTorch based checkpointing:","u":"/docs/checkpointing-ml-training","h":"#example-of-pytorch-based-checkpointing","p":77},{"i":90,"t":"PyTorch Documentation: https://pytorch.org/tutorials/beginner/saving_loading_models.html#save-on-gpu-load-on-cpu Tensorflow/Keras Documentation: https://www.digitalocean.com/community/tutorials/checkpointing-in-tensorflow https://keras.io/api/callbacks/model_checkpoint/ Machine Learning Engineering by stas bekman: https://stasosphere.com/machine-learning/","s":"External Resources","u":"/docs/checkpointing-ml-training","h":"#external-resources","p":77},{"i":93,"t":"Check if there are issues related to your contribution, or post a new issue to discuss improvement to the documentation. Fork this repository Otherwise you will need to first fork this repository, then send a pull request when your changes have been pushed. Direct change if permission If you are part of the MaastrichtU-IDS organization on GitHub you can directly create a new branch to make your change in the main repository.","s":"Contribute","u":"/docs/contribute","h":"","p":91},{"i":95,"t":"You can really easily make quick changes directly on the GitHub website by clicking the Edit this page button at the bottom left of each documentation page. Or browsing to your forked repository. For example to edit the introduction page you can go to https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/introduction.md","s":"⚡ Quick edit on GitHub","u":"/docs/contribute","h":"#-quick-edit-on-github","p":91},{"i":97,"t":"To edit the documentation it is easier to clone the repository on your laptop, and use a user-friendly markdown editor. Use a Markdown editor We strongly recommend you to use a markdown editor, such as Typora. It makes writing documentation much faster, and more enjoyable. Clone the repository on your machine: git clone https://github.com/MaastrichtU-IDS/dsri-documentation.gitcd dsri-documentation Create a new branch from the master branch 🕊️ git checkout -b my-branch Add your changes in this branch ✒️ Start the website on http://localhost:3000 to test it: cd websiteyarn installyarn start Send a pull request Send a pull request to the master branch when your changes are done Development documentation Read more about running the API in development at https://github.com/MaastrichtU-IDS/dsri-documentation#run-for-development","s":"🏗️ Larger changes locally","u":"/docs/contribute","h":"#️-larger-changes-locally","p":91},{"i":99,"t":"The documentation website is automatically updated and redeployed at each change to the main branch using a GitHub Actions workflow.","s":"🔄 Automated deployment","u":"/docs/contribute","h":"#-automated-deployment","p":91},{"i":101,"t":"Most pages of this website are written in Markdown, hence they are really easy to edit, especially when you are using a convenient markdown editor. Only the index.js page is written in React JavaScript.","s":"📝 Help","u":"/docs/contribute","h":"#-help","p":91},{"i":103,"t":"Main DSRI documentation markdown files in website/docs Left docs menu defined in website/sidebars.json Blog articles as markdown files in website/docs Index and contribute pages in website/src/pages Images in website/src/static/img Website configuration file in website/docusaurus.config.js","s":"🔎 Files locations","u":"/docs/contribute","h":"#-files-locations","p":91},{"i":105,"t":"Colorful boxes Use the following tags to create colorful boxes in markdown files: :::note You can specify an optional titleGrey box::::::tip Green boxThe content and title *can* include markdown.::::::info Blue boxUseful information.::::::caution Be careful!Yellow box::::::danger Fire red boxDanger danger, mayday!:::","s":"🦄 Markdown tip","u":"/docs/contribute","h":"#-markdown-tip","p":91},{"i":107,"t":"Before sending a pull request make sure the DSRI documentation website still work as expected with the new changes properly integrated: cd websiteyarn installyarn start Send a pull request to the master branch. Project contributors will review your change as soon as they can!","s":"✔️ Pull Request process","u":"/docs/contribute","h":"#️-pull-request-process","p":91},{"i":109,"t":"Deploy applications Data Science catalog Deploy Dask Cluster","s":"Deploy Dask Cluster","u":"/docs/dask-cluster","h":"","p":108},{"i":111,"t":"Go to the +Add page, and select to add Helm Chart Search and Select the Dask chart then click on Create Configure the Yaml file, while under the Jupyter section: Command: [\"jupyter\", \"lab\", \"--allow-root\", \"--ip=0.0.0.0\", \"--port=8888\", \"--no-browser\"] servicePort: 8888 Add Storage to the dask-jupyter pod as shown below Set up a new Persistent Volume Claim for the cluster as shown below","s":"🧊 Installation with Helm","u":"/docs/dask-cluster","h":"#-installation-with-helm","p":108},{"i":113,"t":"Switch to the Administrator view and navigate to Route Create a new route by clicking the button Create Route with the setup as shown below Navigate the provided link to access your local cluster","s":"🪐 Configure a Route for the Cluster","u":"/docs/dask-cluster","h":"#-configure-a-route-for-the-cluster","p":108},{"i":115,"t":"Start up the terminal Run oc get pods to find the full podname of the dask-jupyter Run oc logs and copy the token used to access the jupyter notebook","s":"🪐 Access the Jupyter Password/Token","u":"/docs/dask-cluster","h":"#-access-the-jupyter-passwordtoken","p":108},{"i":117,"t":"Guides Parallelization using Dask","s":"Parallelization using Dask","u":"/docs/dask-tutorial","h":"","p":116},{"i":119,"t":"!pip install \"dask[complete]\" import daskdask.__version__ '2023.5.0' import dask.array as daimport dask.bag as dbimport dask.dataframe as ddimport numpy as npimport pandas as pd","s":"🧊 Installation","u":"/docs/dask-tutorial","h":"#-installation","p":116},{"i":121,"t":"On a high-level, you can think of Dask as a wrapper that extends the capabilities of traditional tools like pandas, NumPy, and Spark to handle larger-than-memory datasets. When faced with large objects like larger-than-memory arrays (vectors) or matrices (dataframes), Dask breaks them up into chunks, also called partitions. For example, consider the array of 12 random numbers in both NumPy and Dask: narr = np.random.rand(12)narr array([0.44236558, 0.00504448, 0.87087911, 0.468925 , 0.37513511, 0.22607761, 0.83035297, 0.07772372, 0.61587933, 0.82861156, 0.66214299, 0.90979423]) darr = da.from_array(narr, chunks=3)darr The image above shows that the Dask array contains four chunks as we set chunks to 3. Under the hood, each chunk is a NumPy array in itself. To fully appreciate the benefits of Dask, we need a large dataset, preferably over 1 GB in size. Consider the autogenerated data from the script below: import string# Set the desired number of rows and columnsnum_rows = 5_000_000num_cols = 10chunk_size = 100_000# Define an empty DataFrame to store the chunksdf_chunks = pd.DataFrame()# Generate and write the dataset in chunksfor i in range(0, num_rows, chunk_size): # Generate random numeric data numeric_data = np.random.rand(chunk_size, num_cols) # Generate random categorical data letters = list(string.ascii_uppercase) categorical_data = np.random.choice(letters, (chunk_size, num_cols)) # Combine numeric and categorical data into a Pandas DataFrame df_chunk = pd.DataFrame(np.concatenate([numeric_data, categorical_data], axis=1)) # Set column names for better understanding column_names = [f'Numeric_{i}' for i in range(num_cols)] + [f'Categorical_{i}' for i in range(num_cols)] df_chunk.columns = column_names # Append the current chunk to the DataFrame holding all chunks df_chunks = pd.concat([df_chunks, df_chunk], ignore_index=True) # Write the DataFrame chunk to a CSV file incrementally if (i + chunk_size) >= num_rows or (i // chunk_size) % 10 == 0: df_chunks.to_csv('large_dataset.csv', index=False, mode='a', header=(i == 0)) df_chunks = pd.DataFrame() dask_df = dd.read_csv(\"large_dataset.csv\")dask_df.head() Even though the file is large, you will notice that the result is fetched almost instantaneously. For even larger files, you can specify the blocksize parameter, which determines the number of bytes to break up the file into. Similar to how Dask Arrays contain chunks of small NumPy arrays, Dask is designed to handle multiple small Pandas DataFrames arranged along the row index.","s":"🪐 Basic Concepts of Dask","u":"/docs/dask-tutorial","h":"#-basic-concepts-of-dask","p":116},{"i":123,"t":"In this example, we're doing some pretty straightforward column operations on our Dask DataFrame, called dask_df. We're adding the values from the column Numeric_0 to the result of multiplying the values from Numeric_9 and Numeric_3. We store the outcome in a variable named result. result = ( dask_df[\"Numeric_0\"] + dask_df[\"Numeric_9\"] * dask_df[\"Numeric_3\"])result.compute().head() As we’ve mentioned, Dask is a bit different from traditional computing tools in that it doesn't immediately execute these operations. Instead, it creates a kind of 'plan' called a task graph to carry out these operations later on. This approach allows Dask to optimize the computations and parallelize them when needed. The compute() function triggers Dask to finally perform these computations, and head() just shows us the first few rows of the result.","s":"✨ Selecting columns and element-wise operations","u":"/docs/dask-tutorial","h":"#-selecting-columns-and-element-wise-operations","p":116},{"i":125,"t":"Now, let's look at how Dask can filter data. We're selecting rows from our DataFrame where the value in the \"Categorical_5\" column is \"A\". This filtering process is similar to how you'd do it in pandas, but with a twist - Dask does this operation lazily. It prepares the task graph for this operation but waits to execute it until we call compute(). When we run head(), we get to see the first few rows of our filtered DataFrame. dask_df[dask_df[\"Categorical_5\"] == \"A\"].compute().head()","s":"⚡️ Conditional filtering","u":"/docs/dask-tutorial","h":"#️-conditional-filtering","p":116},{"i":127,"t":"Next, we're going to generate some common summary statistics using Dask's describe() function. It gives us a handful of descriptive statistics for our DataFrame, including the mean, standard deviation, minimum, maximum, and so on. As with our previous examples, Dask prepares the task graph for this operation when we call describe(), but it waits to execute it until we call compute(). dask_df.describe().compute() dask_df[\"Categorical_3\"].value_counts().compute().head() We also use value_counts() to count the number of occurrences of each unique value in the \"Categorical_3\" column. We trigger the operation with compute(), and head() shows us the most common values.","s":"✨ Common summary statistics","u":"/docs/dask-tutorial","h":"#-common-summary-statistics","p":116},{"i":129,"t":"Finally, let's use the groupby() function to group our data based on values in the \"Categorical_8\" column. Then we select the \"Numeric_7\" column and calculate the mean for each group. This is similar to how you might use ‘groupby()’ in pandas, but as you might have guessed, Dask does this lazily. We trigger the operation with compute(), and head() displays the average of the \"Numeric_7\" column for the first few groups. dask_df.groupby(\"Categorical_8\")[\"Numeric_7\"].mean().compute().head()","s":"✨ Groupby","u":"/docs/dask-tutorial","h":"#-groupby","p":116},{"i":131,"t":"Now, let’s explore the use of the compute function at the end of each code block. Dask evaluates code blocks in lazy mode compared to Pandas’ eager mode, which returns results immediately. To draw a parallel in cooking, lazy evaluation is like preparing ingredients and chopping vegetables in advance but only combining them to cook when needed. The compute function serves that purpose. In contrast, eager evaluation is like throwing ingredients into the fire to cook as soon as they are ready. This approach ensures everything is ready to serve at once. Lazy evaluation is key to Dask’s excellent performance as it provides: Reduced computation. Expressions are evaluated only when needed (when compute is called), avoiding unnecessary intermediate results that may not be used in the final result. Optimal resource allocation. Lazy evaluation avoids allocating memory or processing power to intermediate results that may not be required. Support for large datasets. This method processes data elements on-the-fly or in smaller chunks, enabling efficient utilization of memory resources. When the results of compute are returned, they are given as Pandas Series/DataFrames or NumPy arrays instead of native Dask DataFrames. type(dask_df) dask.dataframe.core.DataFrame type( dask_df[[\"Numeric_5\", \"Numeric_6\", \"Numeric_7\"]].mean().compute()) pandas.core.series.Series The reason for this is that most data manipulation operations return only a subset of the original dataframe, taking up much smaller space. So, there won’t be any need to use parallelism of Dask, and you continue the rest of your workflow either in pandas or NumPy. 🪐 Dask Bags and Dask Delayed for Unstructured Data​ Dask Bags and Dask Delayed are two components of the Dask library that provide powerful tools for working with unstructured or semi-structured data and enabling lazy evaluation. While in the past, tabular data was the most common, today’s datasets often involve unstructured files such as images, text files, videos, and audio. Dask Bags provides the functionality and API to handle such unstructured files in a parallel and scalable manner. For example, let’s consider a simple illustration: # Create a Dask Bag from a list of stringsb = db.from_sequence([\"apple\", \"banana\", \"orange\", \"grape\", \"kiwi\"])# Filter the strings that start with the letter 'a'filtered_strings = b.filter(lambda x: x.startswith(\"a\"))# Map a function to convert each string to uppercaseuppercase_strings = filtered_strings.map(lambda x: x.upper())# Compute the result as a listresult = uppercase_strings.compute()print(result) ['APPLE'] In this example, we create a Dask Bag b from a list of strings. We then apply operations on the Bag to filter the strings that start with the letter 'a' and convert them to uppercase using the filter() and map() functions, respectively. Finally, we compute the result as a list using the compute() method and print the output. Now imagine that you can perform even more complex operations on billions of similar strings stored in a text file. Without the lazy evaluation and parallelism offered by Dask Bags, you would face significant challenges. As for Dask Delayed, it provides even more flexibility and introduces lazy evaluation and parallelism to various other scenarios. With Dask Delayed, you can convert any native Python function into a lazy object using the @dask.delayed decorator. Here is a simple example: %%timeimport time@dask.delayeddef process_data(x): # Simulate some computation time.sleep(1) return x**2# Generate a list of inputsinputs = range(1000)# Apply the delayed function to each inputresults = [process_data(x) for x in inputs]# Compute the results in parallelcomputed_results = dask.compute(*results) CPU times: user 260 ms, sys: 68.1 ms, total: 328 msWall time: 32.2 s In this example, we define a function process_data decorated with @dask.delayed. The function simulates some computational work by sleeping for 1 second and then returning the square of the input value. Without parallelism, performing this computation on 1000 inputs would have taken more than 1000 seconds. However, with Dask Delayed and parallel execution, the computation only took about 42.1 seconds. This example demonstrates the power of parallelism in reducing computation time by efficiently distributing the workload across multiple cores or workers. That’s what parallelism is all about. for more information see https://docs.dask.org/en/stable/","s":"⚡️ Lazy evaluation","u":"/docs/dask-tutorial","h":"#️-lazy-evaluation","p":116},{"i":133,"t":"Deploy applications Deploy from a Docker image","s":"Deploy from a Docker image","u":"/docs/deploy-from-docker","h":"","p":132},{"i":135,"t":"The easiest way to deploy a service on the DSRI is to use a Docker image from DockerHub 🐳. Search for an image for your service published on DockerHub Google \"dockerhub my_service_name\" Sometimes multiple images can be found for your service. Take the official image when possible, or the one most relevant to your use-case. Deploy from a Dockerfile If no suitable image can be found on DockerHub, it can be deployed from a Dockerfile. See above to do so.","s":"Find an image for your service","u":"/docs/deploy-from-docker","h":"#find-an-image-for-your-service","p":132},{"i":137,"t":"Once you have a Docker image for your application you can deploy it using the DSRI web UI. Go to the Overview page of your project. Click the Add to Project button in top right corner > Deploy Image Select to deploy from Image Name Provide your image name, e.g. umdsri/freesurfer Eventually change the Name, it needs to be unique by project. Click Deploy. Fix a common problem Once the application is deployed it will most probably fail because it has not been optimized to work with OpenShift random user ID. You will need to add an entry to the deployment to enable your image to run using any user ID. Go to Topology, click on your application node, click on the Actions button of your application details, and Edit deployment. In the deployment YAML search for spec: which has a containers: as child, and add the following under spec: spec: serviceAccountName: anyuid containers: ... Access the application You should now see your pod deployed on the Overview page of your project. You can expose routes to this pod in the Overview page: Create route","s":"Deploy the image on DSRI","u":"/docs/deploy-from-docker","h":"#deploy-the-image-on-dsri","p":132},{"i":139,"t":"In case you there is no Docker image for your application you can build and push one. To build and push a Docker image you will need to have Docker installed. Install Docker See the official documentation to install Docker.","s":"Build and push a new Docker image","u":"/docs/deploy-from-docker","h":"#build-and-push-a-new-docker-image","p":132},{"i":141,"t":"If no images are available on DockerHub, it is still possible that the developers created the Dockerfile to build the image without pushing it to DockerHub. Go to the GitHub/GitLab source code repository and search for a Dockerfile, it can usually be found in the source code repository root folder a docker subfolder as instructions in the README.md If no Dockerfile are available we will need to define one. Contact us Feel free to contact us to get help with this, especially if you are unfamiliar with Docker.","s":"Define a Dockerfile","u":"/docs/deploy-from-docker","h":"#define-a-dockerfile","p":132},{"i":143,"t":"Once a Dockerfile has been defined for the service you can build it by running the following command from the source code root folder, where the Dockerfile is: docker build -t username/my-service . Arguments can be provided when starting the build, they need to be defined in the Dockerfile to be used. docker build -t username/my-service --build-args MY_ARG=my_value .","s":"Build the image","u":"/docs/deploy-from-docker","h":"#build-the-image","p":132},{"i":145,"t":"Before pushing it to DockerHub you will need to create a repository. To do so, click on Create Repository. DockerHub is free for public repositories Images can be published under your DockerHub user or an organization you belong to Login to DockerHub, if not already done: docker login Push the image previously built to DockerHub: docker push username/my-service You can link DockerHub to your source code repository and ask it to build the Docker image automatically (from the Dockerfile in the root folder). It should take between 10 and 30min for DockerHub to build your image Deploy from a local Dockerfile You can also deploy a service on the DSRI directly from a local Dockerfile, to avoid using DockerHub. See this page to deploy a service from a local Dockerfile for more instructions","s":"Push to DockerHub","u":"/docs/deploy-from-docker","h":"#push-to-dockerhub","p":132},{"i":147,"t":"deploy-gitlab-runner First, obtain gitlab runner registration token via the gitlab webinterface TODO: add screenshot Add \"GitLab Runner\" operator to your project from the Operators --> OperatorHub page. Make sure you choose the \"certified\" GitLab Runner (v1.4.0) The community runner (v1.10.0) is a bit more up to date, but currently does not work. Install in a specific namespace on the cluster. Choose your namespace in the dropdown. Create registration token secret: ---apiVersion: v1kind: Secretmetadata: name: gitlab-runner-secrettype: OpaquestringData: runner-registration-token: oc create -f gitlab-runner-secret.yaml Although, this should also work: oc create secret generic gitlab-runner-secret --from-literal=runner-registration-token= Add the following to the ConfigMap of the GitLab Runner operator: [[runners]] executor = \"kubernetes\" [runners.kubernetes] [runners.kubernetes.volumes] [[runners.kubernetes.volumes.empty_dir]] name = \"empty-dir\" mount_path = \"/\" medium = \"Memory\" Create the configmap: oc create configmap custom-config-toml --from-file config.toml=/tmp/customconfig Create the gitlab runner Custom Resource Definition: apiVersion: apps.gitlab.com/v1beta2kind: Runnermetadata: name: gitlab-runnerspec: gitlabUrl: https://gitlab.maastrichtuniversity.nl token: gitlab-runner-secret config: custom-config-toml tags: openshift --- other stuff dont use!apiVersion: apps.gitlab.com/v1beta2kind: Runnermetadata: name: gitlab-runnerspec: gitlabUrl: https://gitlab.maastrichtuniversity.nl buildImage: alpine token: gitlab-runner-secret tags: openshift","s":"deploy-gitlab-runner","u":"/docs/deploy-gitlab-runner","h":"","p":146},{"i":149,"t":"Deploy applications Data Science catalog Databases","s":"Databases","u":"/docs/deploy-database","h":"","p":148},{"i":151,"t":"You can easily create a database from the templates available in the DSRI OpenShift web UI catalog: You can connect to a database from another application in the same project by using the database service name as hostname: You can also use the oc CLI to get the services in your project: oc get services","s":"SQL databases","u":"/docs/deploy-database","h":"#sql-databases","p":148},{"i":153,"t":"Use the Postgresql template in the DSRI OpenShift web UI catalog to start a SQL database. Connect to the database When the database has been deployed, you can connect from another pod using your favorite language and connector. Example with the psql Command Line Interface: apt-get update && apt-get install postgresql-client -y Connect to the Postgresql database using the service name (change depending on the username and database name you chose): psql -h postgresql-db -U postgres db Checkout the dsri-demo repository for a quick demo for accessing and using a PostgreSQL database from a Jupyter notebook on the DSRI.","s":"Start PostgreSQL 🐘","u":"/docs/deploy-database","h":"#start-postgresql-","p":148},{"i":155,"t":"Use the MySQL template in the DSRI OpenShift web UI catalog. Connect to the database When the database has been deployed, you can connect from another pod using your favorite language and connector. Example with the mysql Command Line Interface: apt-get update && apt-get install mariadb-client -y Connect to the MySQL database using the service name: mysql -h example-mysql -p Checkout the dsri-demo repository for a quick demo for accessing and using a MySQL database from a Jupyter notebook on the DSRI. Alternatively, MySQL databases can be started using Helm, see the Helm documentation page for more details.","s":"Start MySQL 🐬","u":"/docs/deploy-database","h":"#start-mysql-","p":148},{"i":158,"t":"MongoDB is a general purpose, document-based, distributed database built for modern application developers and for the cloud era. Use the MongoDB template in the DSRI OpenShift web UI catalog. Connect to the database Use the service name as hostname to connect from another pod in the same project.","s":"MongoDB 🌿","u":"/docs/deploy-database","h":"#mongodb-","p":148},{"i":160,"t":"Redis is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperlog. Use the Redis template in the DSRI OpenShift web UI catalog. Connect to the database Use the service name as hostname to connect from another pod in the same project.","s":"Redis 🎲","u":"/docs/deploy-database","h":"#redis-","p":148},{"i":163,"t":"Search for the Virtuoso triplestore template in the DSRI web UI catalog. Instantiate the template to create a Virtuoso triplestore in your project. The deployment is based on the latest open source version of Virtuoso: https://hub.docker.com/r/openlink/virtuoso-opensource-7 Connect to the database Use the service name as hostname to connect from another pod in the same project.","s":"OpenLink Virtuoso triplestore","u":"/docs/deploy-database","h":"#openlink-virtuoso-triplestore","p":148},{"i":165,"t":"Use the official DockerHub image if you have an enterprise license. Or build GraphDB free edition image from graphdb-docker on GitHub. After copying the .zip file in the graphdb-docker/free-edition folder, go the graphdb-docker folder in your terminal: cd graphdb-docker Before creating your GraphDB ImageStream, make sure you are in the right project: oc project my-project Create the ImageStream for GraphDB: oc new-build --name graphdb --binary Build the image on the DSRI and save it in the ImageStream: oc start-build graphdb --from-dir=free-edition --follow --wait You can now use the Ontotext GraphDB template to deploy a GraphDB instance on DSRI. Use the name of the ImageStream when instantiating the template, you can check if the image was properly built in Search > Filter Resources for ImageStreams Connect to the database Use the service name as hostname to connect from another pod in the same project.","s":"Ontotext GraphDB triplestore","u":"/docs/deploy-database","h":"#ontotext-graphdb-triplestore","p":148},{"i":167,"t":"AllegroGraph® is a modern, high-performance, persistent graph database. It supports SPARQL, RDFS++, and Prolog reasoning from numerous client applications. AllegroGraph has not been tested on DSRI yet, but it can be deployed on Kubernetes using Helm, cf. https://www.github.com/franzinc/agraph-examples/tree/master/clustering%2Fkubernetes%2Fmmr%2Fkubernetes-mmr.md","s":"AllegroGraph","u":"/docs/deploy-database","h":"#allegrograph","p":148},{"i":169,"t":"Deploy applications Data Science catalog Jupyter Notebooks","s":"Jupyter Notebooks","u":"/docs/deploy-jupyter","h":"","p":168},{"i":171,"t":"Start a JupyterLab container based on the official Jupyter docker stacks (debian), with sudo privileges to install anything you need (e.g. pip or apt packages) You can start a container using the JupyterLab template in the Catalog web UI (make sure the Templates checkbox is checked) When instantiating the template you can provide a few parameters, such as: Password to access the notebook Optionally you can provide a git repository to be automatically cloned in the JupyterLab (if there is a requirements.txt packages will be automatically installed with pip) Docker image to use for the notebook (see below for more details on customizing the docker image) Your git username and email to automatically configure git The DSRI will automatically create a persistent volume to store data you will put in the /home/jovyan/work folder (the folder used by the notebook interface). You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. With this template you can use any image based on the official Jupyter docker stack: https://github.com/jupyter/docker-stacks ghcr.io/maastrichtu-ids/jupyterlab:latest: custom image for data science on the DSRI, with additional kernels (Java), conda integration, VisualStudio Code, and autocomplete for Python ghcr.io/maastrichtu-ids/jupyterlab:knowledge-graph: custom image for working with knowledge graph on the DSRI, with SPARQL kernel and OpenRefine jupyter/scipy-notebook: some packages for science are preinstalled jupyter/datascience-notebook: with Julia kernel jupyter/tensorflow-notebook: with tensorflow package pre-installed jupyter/r-notebook: to work with R jupyter/pyspark-notebook: if you want to connect to a Spark cluster jupyter/all-spark-notebook: if you want to run Spark locally in the notebook You can also build your own image, we recommend to use this repository as example to extend a JupyterLab image: https://github.com/MaastrichtU-IDS/jupyterlab","s":"🪐 Start JupyterLab","u":"/docs/deploy-jupyter","h":"#-start-jupyterlab","p":168},{"i":173,"t":"With the ghcr.io/maastrichtu-ids/jupyterlab:latest image, you can easily start notebooks from the JupyterLab Launcher page using installed conda environments, at the condition nb_conda_kernels and ipykernel are installed in those environments. You can pass a Git repository URL which contains an environment.yml file in the root folder when starting JupyterLab, the conda environment will automatically be installed at the start of your container, and available in the JupyterLab Launcher page. You can use this repository as example: https://github.com/MaastrichtU-IDS/dsri-demo Or you can install it directly in a running JupyterLab (we use mamba which is like conda but faster): mamba env create -f environment.yml You'll need to wait for 1 or 2 minutes before the new conda environment becomes available on the JupyterLab Launcher page. You can easily install an environment with a different version of Python if you need it. Here is an example of an environment.yml file to create an environment with Python 3.9, install the minimal dependencies required to easily starts notebooks in this environment with conda, and install a pip package: name: custom-envchannels: - defaults - conda-forge - anacondadependencies: - python=3.9 - ipykernel - nb_conda_kernels - pip - pip: - matplotlib ⚠️ You cannot use conda activate in a Docker container, so you will need to either open a notebook using the kernel for your conda env, or use conda run to run scripts in the new environment: conda run -n custom-env python --version","s":"📦️ Manage dependencies with Conda","u":"/docs/deploy-jupyter","h":"#️-manage-dependencies-with-conda","p":168},{"i":175,"t":"You can always use git from the terminal. Configure username Before pushing back to GitHub or GitLab, you will need to configure you username and email in VSCode terminal: git config --global user.name \"Jean Dupont\"git config --global user.email jeandupont@gmail.com Save your password You can run this command to ask git to save your password for 15min: git config credential.helper cache Or store the password in a plain text file: git config --global credential.helper 'store --file ~/.git-credentials' Git tip We recommend to use SSH instead of HTTPS connection when possible, checkout here how to generate SSH keys and use them with your GitHub account. You can also enable and use the JupyterLab Git extension to clone and manage your git repositories. It will prompt you for a username and password if the repository is private.","s":"🐙 Use git in JupyterLab","u":"/docs/deploy-jupyter","h":"#-use-git-in-jupyterlab","p":168},{"i":177,"t":"Initialize repository Include git details in DSRI project setup Verify automatic deployment","s":"🐶 Example","u":"/docs/deploy-jupyter","h":"#-example","p":168},{"i":179,"t":"Deploy applications Data Science catalog Matlab","s":"Matlab","u":"/docs/deploy-matlab","h":"","p":178},{"i":181,"t":"Start Matlab with a desktop UI accessible directly using your web browser at a URL automatically generated. Go to the Catalog, make sure Templates are displayed (box checked), and search for Matlab, and provide the right parameters: You will need to provide the password you will use to access the Matlab UI when filling the template. You can also change the Matlab image version, see the latest version released in the official Matlab Docker image documentation Once Matlab start you can access it through 2 routes (URL), which can be accessed when clicking on the Matlab node in the Topology: The main matlab route to access Matlab desktop UI directly in your web browser. It is recommended to use this route. The matlab-vnc route can be used to access Matlab using a VNC client (you will need to use the full URL to your Matlab VNC route). Only use it if you know what you're doing.","s":"Use the official Matlab image","u":"/docs/deploy-matlab","h":"#use-the-official-matlab-image","p":178},{"i":183,"t":"The official Matlab image is infamous for showing a black screening after a few hours of use, making it a bit cumbersome to be used trustfully. We have a solution if you need to have a more stable Matlab image, that will require a bit more manual operations: Use the Ubuntu with GUI template to setup a Ubuntu pod on the DSRI with the image ghcr.io/vemonet/docker-ubuntu-vnc-desktop:latest Start firefox and browse to https://nl.mathworks.com Login with your personal Matlab account, create one if you don’t have it Choose get matlab and download, the linux matlab version Open a terminal window and run the following commands: sudo apt-get updatesudo apt-get install unzip# Unzip the previous downloaded matlab installation file# start the matlab installation with:sudo .\\install You will then be prompted the Matlab installation process: Fill in your personal matlab account credentials ⚠️ Fill in the username as used in the Ubuntu environment, in your case it will most probably be root (Matlab gives a license error if this is not correct, check with whoami in the terminal when in doubt) Select every Matlab modules you want to be installed Check \"symbolic link\" and \"Improve……\"","s":"Use a stable Matlab image","u":"/docs/deploy-matlab","h":"#use-a-stable-matlab-image","p":178},{"i":185,"t":"You can also use mathworks/jupyter-matlab-proxy. You can easily install it in a JupyterLab image with pip: pip install jupyter-matlab-proxy Follow the instructions on the mathworks/jupyter-matlab-proxy repository to access it.","s":"Use Matlab in Jupyter","u":"/docs/deploy-matlab","h":"#use-matlab-in-jupyter","p":178},{"i":187,"t":"We use the Matlab template in the DSRI catalog to deploy a pre-built Nvidia Matlab Deep Learning Container on CPU or GPU nodes. See the official documentation from MathWorks for more details about this image. Request access to Matlab To be able to access the Matlab on GPU template you will need to ask the DSRI admins to enable it in your project. 2 options are available to connect to your running Matlab pod terminal: Go to the matlab pod page on the DSRI web UI Or connect from your terminal with oc rsh MATLAB_POD_ID Type bash when first accessing to the terminal to have a better experience. Type cd /ContainerDeepLearningData to go in the persistent volume, and use this volume to store all data that should be preserved. Type matlab to access Matlab from the terminal It is possible to access the Matlab desktop UI through VNC and a web UI, but the script to start it in /bin/run.sh seems to face some errors, let us know if you have any luck with this. By default the image run with the matlab user which does not have sudo privilege, you can run the container as root if you need to install packages which require admin privileges.","s":"Deploy Matlab on GPU","u":"/docs/deploy-matlab","h":"#deploy-matlab-on-gpu","p":178},{"i":189,"t":"Follow the instructions at: https://github.com/mathworks-ref-arch/matlab-dockerfile This will require you to retrieve Matlab installation files to build your own container Once all the files have been properly placed in the folder and the license server URL has been set, you can start the build on DSRI by following the documentation to deploy from a Dockerfile License server not available on your laptop If you try to build Matlab directly on your laptop it will most probably fail as your machine might not have access to the license server. You will need to build the Matlab container directly on DSRI with oc start-build Once Matlab deployed, you will need to edit the matlab deployment YAML before it works. Go to Topology, click on the Matlab node, click on the Actions button of the matlab details, and Edit deployment. In the deployment YAML search for spec: which has a containers: as child, and add the following under spec: spec: serviceAccountName: anyuid containers: ... Your Matlab container should now be running! 2 options are available to connect to your running Matlab pod terminal: Go to the matlab pod page on the DSRI web UI Or connect from your terminal with oc rsh MATLAB_POD_ID You can access Matlab from the terminal by running matlab Unfortunately Matlab did not expected their users to need the graphical UI when using Matlab in containers. So only the command line is available by default. You can find more information to enable the Matlab UI in this issue.","s":"Build your own Matlab image","u":"/docs/deploy-matlab","h":"#build-your-own-matlab-image","p":178},{"i":191,"t":"Deploy applications GPU applications","s":"GPU applications","u":"/docs/deploy-on-gpu","h":"","p":190},{"i":193,"t":"You will first need to start your workspace without the GPU enabled, you can then prepare your experiments: clone the code, download the data, prepare scripts to install all requirements (the workspace will be restarted when you enable the GPU).","s":"Prepare your GPU workspace","u":"/docs/deploy-on-gpu","h":"#prepare-your-gpu-workspace","p":190},{"i":195,"t":"We are mainly using images provided by Nvidia, with all required drivers and optimizations for GPU pre-installed. You can access the workspace with JupyterLab and VisualStudio Code in your browser, and install dependencies with apt-get, conda or pip in the workspace. We currently mainly use Tensorflow, PyTorch and CUDA, but any image available in the Nvidia catalog should be easy to deploy. Checkout this documentation for more details on how we build the optimized docker images for the DSRI GPUs. And feel free to extend the images to install any software you need.","s":"About the docker images","u":"/docs/deploy-on-gpu","h":"#about-the-docker-images","p":190},{"i":197,"t":"You can easily deploy your GPU workspace from the DSRI catalog: Go to the DSRI Catalog web UI: Click on Add to Project, then Browse Catalog Search the catalog for \"GPU\", and make sure the Template checkbox is enabled Choose the template: JupyterLab on GPU Follow the instructions to create the template in the DSRI web UI, all information about the images you can use are provided there. The most notable is the base image you want to use for your workspace (cuda, tensorflow or pytorch) Access the workspace from the route created (the small arrow at the top right of your application bubble in the Topology page).","s":"Deploy the workspace","u":"/docs/deploy-on-gpu","h":"#deploy-the-workspace","p":190},{"i":199,"t":"You can now add your code and data in the persistent folder to be fully prepared when you will get access to the GPUs. You can install dependencies with apt-get, conda or pip. We recommend your to use scripts stored in the persistent folder to easily install all your requirements, so you can reinstall them when we enable the GPU, as it restarts the workspace. For more information on how to use conda/mamba to install new dependencies or complete environment (useful if you need to use a different version of python than the one installed by default) checkout this page. ⚠️ We recommend you to also try and debug your code on small sample using the CPU before getting the GPU, this way you will be able to directly start long running task when you get the GPU, instead of losing time debugging your code (it's probably not going to work on the first try, you know it). You can find more details on the images we use and how to extend them in this repository. Storage Use the /workspace/persistent folder, which is the JupyterLab workspace, to store your code and data persistently. Note that loading data from the persistent storage will be slowly that what you might expected, this is due to the nature of the distributed storage. So try to optimize this part and avoid reloading multiple time your data, and let us know if it is too much of a problem, we have some solution to improve this","s":"Prepare the workspace","u":"/docs/deploy-on-gpu","h":"#prepare-the-workspace","p":190},{"i":201,"t":"You will receive an email when the GPU has been enabled in your project. You can then update your deployment to use the GPUs using either the oc command-line tool, or by editing the deployment configuration from the web UI With the Command Line Interface, run the following command from the terminal of your laptop after having installed the oc command-line tool. We use jupyterlab-gpu as deployment name is in the example, change it to yours if it is different. oc patch dc/jupyterlab-gpu --type=json -p='[{\"op\": \"replace\", \"path\": \"/spec/template/spec/containers/0/resources\", \"value\": {\"requests\": {\"nvidia.com/gpu\": 1}, \"limits\": {\"nvidia.com/gpu\": 1}}}]' Or through the web UI In the Topology view click on the circle representing your GPU application, then click on the Actions button in the top right of the screen, and click on Edit Deployment Config at the bottom of the list In the Deployment Config text editor, hit ctrl + f to search for \"resources\". You should see a line - resources: {} under containers:. You need to change this line to the following to enable GPU in your application (and make sure the indentation match the rest of the file): - resources: requests: nvidia.com/gpu: 1 limits: nvidia.com/gpu: 1 Then wait for the pod to restart, or start it if it was stopped. You can use the following command in the terminal of your container on the DSRI to see the current GPU usage: nvidia-smi Windows When using above command with the oc client on windows you might receive an error like: error: unable to parse \"'[{op:\": yaml: found unexpected end of stream This is because the single quotation mark on windows is handled differently. Try replacing the single quotation marks in the command with double quotation marks and the command should work.","s":"Enable the GPU","u":"/docs/deploy-on-gpu","h":"#enable-the-gpu","p":190},{"i":203,"t":"The GPU allocated to your workspace will be automatically disabled the after your booking ends at 9:00. You can also manually disable the GPU from your app, the pod will be restarted automatically on a CPU node: oc patch dc/jupyterlab-gpu --type=json -p='[{\"op\": \"replace\", \"path\": \"/spec/template/spec/containers/0/resources\", \"value\": {}}]'","s":"Disable the GPU","u":"/docs/deploy-on-gpu","h":"#disable-the-gpu","p":190},{"i":205,"t":"If you have been granted a 2nd GPU to speed up your experiment you can easily upgrade the number of GPU used by your workspace: From the Topology view click on your application: Stop the application, by decreasing the number of pod to 0 (in the Details tab) Click on Options > Edit Deployment > in the YAML of the deployment search for limits and change the number of GPU assigned to your deployment to 2: resources: limits: nvidia.com/gpu: '2' requests: nvidia.com/gpu: '2' You can also do it using the command line, make sure to stop the pod first, and replace jupyterlab-gpu by your app name in this command: oc patch dc/jupyterlab-gpu --type=json -p='[{\"op\": \"replace\", \"path\": \"/spec/template/spec/containers/0/resources\", \"value\": {\"requests\": {\"nvidia.com/gpu\": 2}, \"limits\": {\"nvidia.com/gpu\": 2}}}]' Restart the pod for your application (the same way you stopped it)","s":"Increase the number of GPUs","u":"/docs/deploy-on-gpu","h":"#increase-the-number-of-gpus","p":190},{"i":207,"t":"You can also install the GPU drivers in any image and use this image directly. See the latest official Nvidia docs to install the nvidia-container-runtime, which should contain all packages and drivers required to access the GPU from your application. Here is an example of commands to add to a debian based Dockerfile to install the GPU drivers (note that this is not complete, you will need to check the latest instructions and do some research & development to get it to work): RUN curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | \\ apt-key add - \\ && distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \\ && curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list | RUN apt-get update \\ && apt-get install -y nvidia-container-runtime Then, build your image in your DSRI project using oc from the folder where your put the Dockerfile (replace custom-app-gpu by your app name): oc new-build --name custom-app-gpu --binaryoc start-build custom-app-gpu --from-dir=. --follow --waitoc new-app custom-app-gpu You will then need to edit the deployment to the serviceAccountName: anyuid and add a persistent storage oc edit custom-app-gpu Finally for when your reservation start, checkout the section above about how to enable the GPU in workspace See also: official Nvidia docs for CUDA","s":"Install GPU drivers in any image","u":"/docs/deploy-on-gpu","h":"#install-gpu-drivers-in-any-image","p":190},{"i":209,"t":"Deploy applications Data Science catalog RStudio","s":"RStudio","u":"/docs/deploy-rstudio","h":"","p":208},{"i":211,"t":"Start a RStudio container based on Rocker RStudio tidyverse images (debian), with sudo privileges to install anything you need (e.g. pip or apt packages) You can start a container using the RStudio template in the Catalog web UI (make sure the Templates checkbox is checked) Provide a few parameters, and Instantiate the template. The username will be rstudio and the password will be what you configure yourself, the DSRI will automatically create a persistent volume to store data you will put in the /home/rstudio folder. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. Official image documentation See the official Docker image documentation for more details about the container deployed.","s":"Start RStudio","u":"/docs/deploy-rstudio","h":"#start-rstudio","p":208},{"i":213,"t":"Start a RStudio application, with a complementary Shiny server, using a regular rstudio user, without sudo privileges. Create the template in your project: In the DSRI web UI, go to + Add, then click on YAML, add the content of the template-rstudio-shiny-restricted.yml file, and validate. You can also do it using the terminal: oc apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/templates/restricted/template-rstudio-shiny-restricted.yml Once the template has been created in your project, use the RStudio with Shiny server template in the OpenShift web UI catalog. It will automatically create a persistent storage for the data. No sudo privileges You will not have sudo privileges in the application.","s":"Restricted RStudio with Shiny server","u":"/docs/deploy-rstudio","h":"#restricted-rstudio-with-shiny-server","p":208},{"i":215,"t":"The fastest way to get started is to use git from the terminal, for example to clone a git repository use git clone You can also check how to enable Git integration in RStudio at https://support.rstudio.com/hc/en-us/articles/200532077 You can run this command to ask git to save your password for 15min: git config credential.helper cache Or store the password/token in a plain text file: git config --global credential.helper 'store --file ~/.git-credentials' Before pushing back to GitHub or GitLab, you will need to configure you username and email in the terminal: git config --global user.name \"Jean Dupont\"git config --global user.email jeandupont@gmail.com Git tip We recommend to use SSH instead of HTTPS connection when possible, checkout here how to generate SSH keys and use them with your GitHub account.","s":"Use Git in RStudio","u":"/docs/deploy-rstudio","h":"#use-git-in-rstudio","p":208},{"i":217,"t":"You can visit this folder that gives all resources and instructions to explain how to run a standalone R job on the DSRI: https://github.com/MaastrichtU-IDS/dsri-demo/tree/main/r-job If you want to run jobs directly from RStudio, checkout this package to run chunks of R code as jobs directly through RStudio: https://github.com/lindeloev/job","s":"Run R jobs","u":"/docs/deploy-rstudio","h":"#run-r-jobs","p":208},{"i":219,"t":"Deploy applications Data Science catalog JupyterHub","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":221,"t":"Before you begin download the config.yaml Download the preconfigured config.yaml from our GitHub repository. The default config that is provided by JupyterHub will not work.","s":"Downloading and adjusting the config.yaml","u":"/docs/deploy-jupyterhub","h":"#downloading-and-adjusting-the-configyaml","p":218},{"i":224,"t":"Persistent volumes​ Persistent volumes are automatically created for each user and instance started in JupyterHub to ensure persistence of the data even JupyterHub is stopped. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. It is possible to change the default size of a persistent volume claim for a user in the config.yaml. In our config.yaml the default value is 2Gi. However if you think that your users will need more storage space you can change this default size in the config.yaml. singleuser: # ... storage: capacity: 2Gi","s":"Setting user's default persistent volume size","u":"/docs/deploy-jupyterhub","h":"#setting-users-default-persistent-volume-size","p":218},{"i":226,"t":"At the moment we support three different authentication methods. One for testing purposes (dummy authenthication), one for people who are working alone in a JupyterHub instance or with one or two collaborators (allowed_users / admin_users authenthication), and one for allowing groups of people to collaborate in the same JupyterHub instance (GitHub OAuth). By default the dummy authentication is set in the config.yaml. Note that this is only for testing purposes!!! However, with very few changes to the config.yaml you can set up the other authentication methods. For reference see the zero2jupyterhub documentation about authentication methods Dummy authentication​ This authentication method is set by default and is only there so that you can easily test your JupyterHub instance without the need of setting up proper authentication. The catch with this method is that whatever username/password combination you fill in, you will get access! In other words this is completely not safe to use in usecases other than testing! In the config.yaml you see -besides the commented out other authentication methods- the following block of text: hub: # ... config: JupyterHub: admin_access: true authenticator_class: dummy Some parts are intentionally left out here, shown as dots # ... for better representation. If you are first setting up your JupyterHub instance you can leave this as is. Upon going to your instance via the URL you will get prompted with a login screen:","s":"Configuring an authentication method","u":"/docs/deploy-jupyterhub","h":"#configuring-an-authentication-method","p":218},{"i":228,"t":"Fill in any usernamer and password combination you would like and the useraccount will be made. Note that this useraccount really is made and has its own userpod in the deployment. It has a persistent volume as well and all other properties like any other useraccount that will be made. However you can use whatever password you will fill in to access this account. In other words do not use this user actively and definitely do not store any (sensitive) data in this useraccount! allow_users / admin_users authentication​ If you will be working on your own in your JupyterHub instance it will be easiest to use the allow_users / admin_users authentication method. This method will let you specify an user and admin account with a shared password. It is important that you keep this password a secret and safe! If people will get their hands on this they can acces your JupyterHub instance and login as an admin, which can lead to hefty consequences. If you want to make use of this config uncomment the following block of text and comment out the previous block of text seen at the Dummy authentication section above: hub: # ... config: Authenticator: admin_users: - admin allowed_users: - user1 DummyAuthenticator: password: a-shared-secret-password JupyterHub: authenticator_class: dummy Note that this password is in plaintext in your config.yaml. Do not use password you use for other accounts, this is never a good idea and is surely not a good idea in this case! Unfortunately it is not possible to set passwords in JupyterHub using secrets in the DSRI at the moment. If you need to share your JupyterHub instance with others we recommend you to use the GitHub OAuth authentication method described below. GitHub OAuth authentication​ This authentication method is the most secure option we provide at the moment. The major caveat is that you and the people you want to collaborate with need a GitHub account. Moreover, you will need to create an organization and team within that organization, or have access to an organization and team. You grant the people authorization to log in into the JupyterHub instance with their GitHub account by adding them to a team in an organization in GitHub. hub: # ... config: GitHubOAuthenticator: client_id: your-client-id client_secret: your-client-secret oauth_callback_url: https://-.apps.dsri2.unimaas.nl/hub/oauth_callback JupyterHub: authenticator_class: github For creating an OAuth app in GitHub please refer to GitHub's documentation.. The GitHub OAuth app will provide your client ID and client secret. The and you provided yourself in the previous steps, fill those in accordingly. To set up an organization and team, please refer to GitHub's documentation. as well.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":231,"t":"Before you begin download the config.yaml Download the preconfigured config.yaml from our GitHub repository. The default config that is provided by JupyterHub will not work.","s":"Deploying JupyterHub using the DSRI website 🪐","u":"/docs/deploy-jupyterhub","h":"#deploying-jupyterhub-using-the-dsri-website-","p":218},{"i":233,"t":"After you have created a project you can start with installing the JupyterHub Helm Chart. If you do not have access to DSRI or created a project yet, and you need to find out how, please refer to our documentation. Helm Chart already available The Helm Chart should be already made available for everyone to use on the DSRI platform. There will be no need to install the repository yourself.","s":"Installing the JupyterHub Helm Chart repository","u":"/docs/deploy-jupyterhub","h":"#installing-the-jupyterhub-helm-chart-repository","p":218},{"i":235,"t":"In Developer mode in your project, go to Helm in the sidepanel (1). Next, click on Create and choose Repository (2).","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":237,"t":"Then fill in the Name, Display Name, give it a Description and fill in the URL: https://hub.jupyter.org/helm-chart/.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":239,"t":"Next, click Create.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":242,"t":"info At the moment the latest -and only- Helm Chart version which is supported by DSRI is version 3.3.8. Newer versions will not work, and older versions are not tested and/or configured!","s":"Installing the JupyterHub Helm Chart","u":"/docs/deploy-jupyterhub","h":"#installing-the-jupyterhub-helm-chart","p":218},{"i":244,"t":"In Developer mode in your project, go to Helm in the sidepanel (1). Next, click on Create and choose Helm Release (2)","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":246,"t":"Search for jupyterhub (or the name you gave the repository if you added the repository yourself), and choose the JupyterHub Helm Chart (1).","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":248,"t":"Click Create.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":250,"t":"Click the Chart version drop down menu (1).","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":252,"t":"And choose the right Chart version: 3.3.8 (1). Note that this is an important step, as we only support version 3.3.8 at the moment. Newer versions do not work yet and older versions we did not configure and/or test!","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":254,"t":"Now, change the config with the content of the config.yaml you have downloaded from our GitHub repository. Copy the content of the config.yaml and paste it in the highlighted box to replace the old with the new config. Click Create to install the JupyterHub Helm Chart.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":257,"t":"Create a secured route, with TLS edge termination. In Developer mode in your project, go to Project in the sidepanel (1). Next, click on Route (2).","s":"Creating a secured route","u":"/docs/deploy-jupyterhub","h":"#creating-a-secured-route","p":218},{"i":259,"t":"Next, click Create.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":261,"t":"Fill in the Name (1), choose the Service: proxy-public (2), choose the Target Port: 80 -> http (TCP) (3), tick the box Secure Route (4), and finally choose TLS Termination: Edge (5). Next, click Create, to create the route.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":263,"t":"You can upgrade your config.yaml easily in the DSRI web UI if you would like to change certain settings, such as user's default persistent volume claims, authentication methods, and many more things. Note that in some cases users who created an account with an old authentication method will still have access via that method, make sure you set up your preferred authentication method before allowing users to authenticate and use the JupyterHub instance.","s":"Upgrading the config.yaml","u":"/docs/deploy-jupyterhub","h":"#upgrading-the-configyaml","p":218},{"i":265,"t":"In Developer mode in your project, go to Helm in the sidepanel (1). Next, click on your Helm Chart Release (2).","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":267,"t":"Now, click the Actions drop down menu, and choose Upgrade (1).","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":269,"t":"In the box -highlighted in the picutre below- you can make changes to the config.yaml. After you have made your changes, click Upgrade and your upgraded JupyterHub Helm Chart Release will automatically be deployed.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":271,"t":"Configure JupyterHub Feel free to submit a ticket to ask for help configuring your JupyterHub.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":274,"t":"Before you begin download the config.yaml Download the preconfigured config.yaml from our GitHub repository. The default config that is provided by JupyterHub will not work.","s":"Deploying JupyterHub using the Command Line Interface (CLI) 🪐","u":"/docs/deploy-jupyterhub","h":"#deploying-jupyterhub-using-the-command-line-interface-cli-","p":218},{"i":276,"t":"After you have created a project you can start with installing the JupyterHub Helm Chart. If you do not have access to DSRI or created a project yet, and you need to find out how, please refer to our documentation. Helm Chart already available The Helm Chart should be already made available for everyone to use on the DSRI platform. There will be no need to install the repository yourself.","s":"Installing the JupyterHub Helm Chart repository","u":"/docs/deploy-jupyterhub","h":"#installing-the-jupyterhub-helm-chart-repository-1","p":218},{"i":278,"t":"Add the JupyterHub Helm Chart repository: helm repo add jupyterhub https://hub.jupyter.org/helm-chart/helm repo update","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":280,"t":"info At the moment the latest -and only- Helm Chart version which is supported by DSRI is version 3.3.8. Newer versions will not work, and older versions are not tested and/or configured!","s":"Installing the JupyterHub Helm Chart","u":"/docs/deploy-jupyterhub","h":"#installing-the-jupyterhub-helm-chart-1","p":218},{"i":282,"t":"Make sure you use the right config.yaml downloaded from our GitHub repository. Install the Helm Chart using the following command: helm upgrade --cleanup-on-fail \\ --install jupyterhub jupyterhub/jupyterhub \\ --version=3.3.8 \\ --namespace= \\ --values config.yaml is the name of the namespace your project is in.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":284,"t":"Create a secured route, with TLS edge termination: oc create route edge --namespace --service=proxy-public --port=http is the name of the namespace your project is in. is the name of the route.","s":"Creating a secured route","u":"/docs/deploy-jupyterhub","h":"#creating-a-secured-route-1","p":218},{"i":286,"t":"Run the following command with your new config.yaml: helm upgrade --cleanup-on-fail \\ --install jupyterhub jupyterhub/jupyterhub \\ --version=3.3.8 \\ --namespace= \\ --values config.yaml is the name of the namespace your project is in. Note that the namespace should be the same namespace as the one where your original deployment was initiated!","s":"Upgrading the config.yaml","u":"/docs/deploy-jupyterhub","h":"#upgrading-the-configyaml-1","p":218},{"i":288,"t":"Configure JupyterHub Feel free to submit a ticket to ask for help configuring your JupyterHub.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":218},{"i":290,"t":"Deploy applications Data Science catalog VisualStudio Code","s":"VisualStudio Code","u":"/docs/deploy-vscode","h":"","p":289},{"i":292,"t":"Start a VisualStudio Code server with the coder user, which has sudo privileges. You can deploy it using the VisualStudio Code server solution in the Catalog web UI (make sure the Templates checkbox is checked) Provide a few parameters, and instantiate the template. The DSRI will automatically create a persistent volume to store data you will put in the /home/coder/project folder. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.","s":"Start VisualStudio Code server","u":"/docs/deploy-vscode","h":"#start-visualstudio-code-server","p":289},{"i":294,"t":"The easiest way to login and clone a repository from GitHub is to use the built-in authentication system of VisualStudio Code, to do so click on clone repository... in the Welcome page, and follow the instructions in the top of the VisualStudio window. If this solution does not work for you, you can use git from the terminal to clone the git repository with git clone. VisualStudio might ask you to login in the dialog box at the top of the page, enter your username and password when requested. For GitHub you might need to generate a token at https://github.com/settings/tokens to use as password. Once the repository cloned, you can use git from the VSCode web UI to manage your git repositories (add, commit, push changes), or in the terminal. Before committing to GitHub or GitLab, you might need to configure you username and email in VSCode terminal: git config --global user.name \"Jean Dupont\"git config --global user.email jeandupont@gmail.com Save your git password You can run this command to ask git to save your password for 15min: git config credential.helper cache Or store the password in a plain text file: git config --global credential.helper 'store --file ~/.git-credentials' Git tip We recommend to use SSH instead of HTTPS connection when possible, checkout here how to generate SSH keys and use them with your GitHub account.","s":"Use Git in VSCode","u":"/docs/deploy-vscode","h":"#use-git-in-vscode","p":289},{"i":296,"t":"See the Deploy on GPU page to deploy a VisualStudio Code server on GPU.","s":"VSCode for GPU","u":"/docs/deploy-vscode","h":"#vscode-for-gpu","p":289},{"i":298,"t":"Miscellaneous Enabling VPN access in WSL2","s":"Enabling VPN access in WSL2","u":"/docs/enabling-vpn-wsl","h":"","p":297},{"i":300,"t":"Create a file in /etc/wsl.conf: [network] generateResolvConf = false This makes sure that WSL2 does not generate it's own resolv.conf anymore. Edit the file /etc/resolv.conf and add the appropiate nameservers: nameserver 137.120.1.1 nameserver 137.120.1.5 nameserver 8.8.8.8 # OR OF YOUR CHOOSING search unimaas.nl These are all the steps you should take in WSL2. Now you should do the following step after you connected to the VPN. You can run this command in Powershell: Get-NetAdapter | Where-Object {$_.InterfaceDescription -Match \"Cisco AnyConnect\"} | Set-NetIPInterface -InterfaceMetric 6000 you should now be able to verify that WSL2 has connectivity: ping google.com -c 4","s":"Follow these steps in the WSL2 environment:","u":"/docs/enabling-vpn-wsl","h":"#follow-these-steps-in-the-wsl2-environment","p":297},{"i":302,"t":"Deploy applications Data Science catalog Spark cluster","s":"Spark cluster","u":"/docs/deploy-spark","h":"","p":301},{"i":304,"t":"Once the DSRI admins have enabled the Spark Operator your project, you should found a Spark Cluster entry in the Catalog (in the Operator Backed category)","s":"Deploy a Spark cluster","u":"/docs/deploy-spark","h":"#deploy-a-spark-cluster","p":301},{"i":306,"t":"Click on the Spark Cluster entry to deploy a Spark cluster. You will be presented a form where you can provide the number of Spark workers in your cluster. Additionally you can provide a label which can be helpful later to manage or delete the cluster, use the name of your application and the label app, e.g.: app=my-spark-cluster Change The number of Spark workers can be easily updated later in the Spark deployment YAML file.","s":"Deploy the cluster from the catalog","u":"/docs/deploy-spark","h":"#deploy-the-cluster-from-the-catalog","p":301},{"i":308,"t":"Once the cluster has been started you can create a route to access the Spark web UI: Go to Search > Click on Resources and search for Route > Click on Route You should now see the routes deployed in your project. Click on the button Create Route Give a short meaningful name to your route, e.g. my-spark-ui Keep Hostname and Path as it is Select the Service corresponding your Spark cluster suffixed with -ui, e.g. my-spark-cluster-ui Select the Target Port of the route, it should be 8080 You can now access the Spark web UI at the generated URL to see which jobs are running and the nodes in your cluster.","s":"Create a route to the Spark dashboard","u":"/docs/deploy-spark","h":"#create-a-route-to-the-spark-dashboard","p":301},{"i":310,"t":"You can now start a spark-enabled JupyterLab, or any other spark-enabled applications, to use the Spark cluster deployed.","s":"Run on Spark","u":"/docs/deploy-spark","h":"#run-on-spark","p":301},{"i":312,"t":"The easiest is to use a Spark-enabled JupyterLab image, such as jupyter/pyspark-notebook But you can also use any image as long as you download the jar file, install all requirements, such as pyspark, and set the right environment variable, such as SPARK_HOME Connect to a Spark cluster deployed in the same project, replace spark-cluster by your Spark cluster name: from pyspark import SparkConf, SparkContextfrom pyspark.sql import SparkSession# Stop existing Spark Contextspark = SparkSession.builder.master(\"spark://spark-cluster:7077\").getOrCreate()spark.sparkContext.stop()# Connect to the Spark clusterconf = SparkConf().setAppName('sansa').setMaster('spark://spark-cluster:7077') sc = SparkContext(conf=conf)# Run basic Spark testx = ['spark', 'rdd', 'example', 'sample', 'example'] y = sc.parallelize(x)y.collect()","s":"Using PySpark","u":"/docs/deploy-spark","h":"#using-pyspark","p":301},{"i":314,"t":"SANSA is a big data engine for scalable processing of large-scale RDF data. SANSA uses Spark, or Flink, which offer fault-tolerant, highly available and scalable approaches to efficiently process massive sized datasets. SANSA provides the facilities for Semantic data representation, Querying, Inference, and Analytics. Use the Zeppelin notebook for Spark template in the catalog to start a Spark-enabled Zeppelin notebook. You can find more information on the Zeppelin image at https://github.com/rimolive/zeppelin-openshift Connect and test Spark in a Zeppelin notebook, replace spark-cluster by your Spark cluster name: %pysparkfrom pyspark import SparkConf, SparkContextfrom pyspark.sql import SparkSession# Stop existing Spark Contextspark = SparkSession.builder.master(\"spark://spark-cluster:7077\").getOrCreate()spark.sparkContext.stop()# Connect to the Spark clusterconf = SparkConf().setAppName('sansa').setMaster('spark://spark-cluster:7077') sc = SparkContext(conf=conf)# Run basic Spark testx = [1, 2, 3, 4, 5] y = sc.parallelize(x)y.collect() You should see the job running in the Spark web UI, kill the job with the kill button in the Spark dashboard. You can now start to run your workload on the Spark cluster Reset a Zeppelin notebook Click on the cranked wheel in the top right of the note: Interpreter binding, and reset the interpreter Use the official SANSA notebooks examples See more examples: https://github.com/rimolive/zeppelin-openshift","s":"RDF analytics with SANSA and Zeppelin notebooks","u":"/docs/deploy-spark","h":"#rdf-analytics-with-sansa-and-zeppelin-notebooks","p":301},{"i":316,"t":"Instructions available at https://github.com/rimolive/ceph-spark-integration Requirements: pip install boto Check the example notebook for Ceph storage","s":"Connect Spark to the persistent storage","u":"/docs/deploy-spark","h":"#connect-spark-to-the-persistent-storage","p":301},{"i":318,"t":"Get all objects part of the Spark cluster, change app=spark-cluster to match your Spark cluster name: oc get all,secret,configmaps --selector app=spark-cluster Then delete the Operator deployment from the OpenShift web UI overview.","s":"Delete a running Spark cluster","u":"/docs/deploy-spark","h":"#delete-a-running-spark-cluster","p":301},{"i":320,"t":"Guides Glossary","s":"Glossary","u":"/docs/glossary","h":"","p":319},{"i":323,"t":"Kubernetes is a portable, extensible, open-source platform for managing containerized workloads and services, that facilitates both declarative configuration and automation. It has a large, rapidly growing ecosystem. Kubernetes services, support, and tools are widely available. Kubernetes, also known as K8s, is an open-source system for automating deployment, scaling, and management of containerized applications. More Information: https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/","s":"Kubernetes","u":"/docs/glossary","h":"#kubernetes","p":319},{"i":325,"t":"Red Hat OpenShift is a hybrid cloud, enterprise Kubernetes application platform, trusted by 2,000+ organizations. It includes Container host and runtime Enterprise Kubernetes Validated integrations Integrated container registry Developer workflows Easy access to services","s":"OpenShift","u":"/docs/glossary","h":"#openshift","p":319},{"i":327,"t":"OKD is a distribution of Kubernetes optimized for continuous application development and multi-tenant deployment. OKD adds developer and operations-centric tools on top of Kubernetes to enable rapid application development, easy deployment and scaling, and long-term lifecycle maintenance for small and large teams. OKD is a sibling Kubernetes distribution to Red Hat OpenShift OKD 4 Documentation","s":"OKD","u":"/docs/glossary","h":"#okd","p":319},{"i":329,"t":"Deploy applications Anatomy of a DSRI application","s":"Anatomy of a DSRI application","u":"/docs/anatomy-of-an-application","h":"","p":328},{"i":331,"t":"First, you need to create your Template objects, this will be the main object we will create here as all other objects defined will be deployed by this template. In this part we mainly just provide the description and information that will be shown to users when deploying the application from the DSRI web UI catalog. ---kind: TemplateapiVersion: template.openshift.io/v1labels: template: jupyterlab-rootmetadata: name: jupyterlab-root annotations: openshift.io/display-name: JupyterLab description: |- Start JupyterLab images as the `jovyan` user, with sudo privileges to install anything you need. 📂 Use the `/home/jovyan` folder (workspace of the JupyterLab UI) to store your data in the persistent storage automatically created You can find the persistent storage in the DSRI web UI, go to Administrator view > Storage > Persistent Volume Claims You can use any image based on the official Jupyter docker stack https://github.com/jupyter/docker-stacks - jupyter/tensorflow-notebook - jupyter/r-notebook - jupyter/all-spark-notebook - ghcr.io/maastrichtu-ids/jupyterlab (with Java and SPARQL kernels) Or build your own! Checkout https://github.com/MaastrichtU-IDS/jupyterlab for an example of custom image Once JupyterLab is deployed you can install any pip packages, JupyterLab extensions, and apt packages. iconClass: icon-python tags: python,jupyter,notebook openshift.io/provider-display-name: Institute of Data Science, UM openshift.io/documentation-url: https://maastrichtu-ids.github.io/dsri-documentation/docs/deploy-jupyter openshift.io/support-url: https://maastrichtu-ids.github.io/dsri-documentation/help","s":"Application walkthrough","u":"/docs/anatomy-of-an-application","h":"#application-walkthrough","p":328},{"i":333,"t":"Then define the parameters the user will be able to define in the DSRI catalog web UI when instantiating the application. APPLICATION_NAME is the most important as it will be used everywhere to create the objects and identify the application. parameters:- name: APPLICATION_NAME displayName: Name for the application description: Must be without spaces (use -), and unique in the project. value: jupyterlab required: true- name: PASSWORD displayName: JupyterLab UI Password description: The password/token to access the JupyterLab web UI required: true- name: APPLICATION_IMAGE displayName: Jupyter notebook Docker image value: ghcr.io/maastrichtu-ids/jupyterlab:latest required: true description: You can use any image based on https://github.com/jupyter/docker-stacks- name: STORAGE_SIZE displayName: Storage size description: Size of the storage allocated to the notebook persistent storage in `/home/jovyan`. value: 5Gi required: true We can then refer to those parameters value (filled by the users of the template) in the rest of the template using this syntax: ${APPLICATION_NAME} We will now describe all objects deployed when we instantiate this template (to start an application).","s":"Parameters","u":"/docs/anatomy-of-an-application","h":"#parameters","p":328},{"i":335,"t":"First we define the ImageStream object to import the Docker image(s) of your application(s) on the DSRI cluster Setting the importPolicy: scheduled to true will have the DSRI to automatically check for new version of this image, which can be useful if you want to always have the latest published version of an applications. Visit the OpenShift ImageStreams documentation for more details. Be careful as enabling this feature without real need will cause the DSRI to query DockerHub more, which might require you to login to DockerHub to increase your pull request quota. objects:- kind: \"ImageStream\" apiVersion: image.openshift.io/v1 metadata: name: ${APPLICATION_NAME} labels: app: ${APPLICATION_NAME} spec: tags: - name: latest from: kind: DockerImage name: ${APPLICATION_IMAGE} importPolicy: scheduled: true lookupPolicy: local: true","s":"Image","u":"/docs/anatomy-of-an-application","h":"#image","p":328},{"i":337,"t":"Then we define the PersistentVolumeClaim, which is a persistent storage on which we will mount the /home/jovyan folder to avoid loosing data if our application is restarted. Any file outside of a persistent volume can be lost at any moment if the pod restart, usually it only consists in temporary file if you are properly working in the persistent volume folder. This can be useful also if your application is crashing, stopping and restarting your pod (application) might fix it. - kind: \"PersistentVolumeClaim\" apiVersion: \"v1\" metadata: name: ${APPLICATION_NAME} labels: app: ${APPLICATION_NAME} spec: accessModes: - \"ReadWriteMany\" resources: requests: storage: ${STORAGE_SIZE}","s":"Create storage","u":"/docs/anatomy-of-an-application","h":"#create-storage","p":328},{"i":339,"t":"Then the Secret to store the password - kind: \"Secret\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: ${APPLICATION_NAME} stringData: application-password: \"${PASSWORD}\"","s":"Secret","u":"/docs/anatomy-of-an-application","h":"#secret","p":328},{"i":341,"t":"Then the DeploymentConfig (aka. Deployment) define how to deploy the JupyterLab image, if you want to deploy another application alongside JupyterLab you can do it by adding as many deployments as you want! (and use the same, or different, persistent volume claims for storage). Checkout the OpenShift Deployments documentation for more details. In this first block we will define the strategy to update and recreate our applications if you change the YAML configuration, or when a new latest docker image is updated, allowing your service to always use the latest up-to-date version of a software without any intervention from you. We chose the Recreate release option to make sure the container is properly recreated and avoid unnecessary resources consumption, but you can also use Rolling to have a downtime free transition between deployments. - kind: \"DeploymentConfig\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: \"${APPLICATION_NAME}\" spec: replicas: 1 strategy: type: \"Recreate\" triggers: - type: \"ConfigChange\" - type: \"ImageChange\" imageChangeParams: automatic: true containerNames: - jupyter-notebook from: kind: ImageStreamTag name: ${APPLICATION_NAME}:latest selector: app: \"${APPLICATION_NAME}\" deploymentconfig: \"${APPLICATION_NAME}\"","s":"Deployment","u":"/docs/anatomy-of-an-application","h":"#deployment","p":328},{"i":343,"t":"Then we define the spec of the pod that will be deployed by this DeploymentConfig. Setting the serviceAccountName: anyuid is required for most Docker containers as it allows to run a container using any user ID (e.g. root). Otherwise OpenShift expect to use a random user ID, which is require to build the Docker image especially to work with random user IDs. We then create the containers: array which is where we will define the containers deployed in the pod. It is recommended to deploy 1 container per pod, as it enables a better separation and management of the applications, apart if you know what you are doing. You can also provide the command to run at the start of the container to overwrite the default one, and define the exposed ports (here 8080). template: metadata: labels: app: \"${APPLICATION_NAME}\" deploymentconfig: \"${APPLICATION_NAME}\" spec: serviceAccountName: \"anyuid\" containers: - name: \"jupyter-notebook\" image: \"${APPLICATION_NAME}:latest\" command: - \"start-notebook.sh\" - \"--no-browser\" - \"--ip=0.0.0.0\" ports: - containerPort: 8888 protocol: TCP","s":"Pod spec","u":"/docs/anatomy-of-an-application","h":"#pod-spec","p":328},{"i":345,"t":"Then define the environment variables used in your container, usually the password and most parameters are set here, such as enabling sudo in the container. env: - name: JUPYTER_TOKEN valueFrom: secretKeyRef: key: \"application-password\" name: \"${APPLICATION_NAME}\" - name: JUPYTER_ENABLE_LAB value: \"yes\" - name: GRANT_SUDO value: \"yes\"","s":"Environment variables in the container","u":"/docs/anatomy-of-an-application","h":"#environment-variables-in-the-container","p":328},{"i":347,"t":"Then we need to mount the previously created PersistentVolume on /home/jovyan , the workspace of JupyterLab. Be careful: volumeMounts is in the containers: object, and volumes is defined in the spec: object volumeMounts: - name: data mountPath: \"/home/jovyan\" volumes: - name: data persistentVolumeClaim: claimName: \"${APPLICATION_NAME}\"","s":"Mount storage","u":"/docs/anatomy-of-an-application","h":"#mount-storage","p":328},{"i":349,"t":"Then we define the securityContext to allow JupyterLab to run as root, this is not required for most applications, just a specificity of the official Jupyter images to run with root privileges. securityContext: runAsUser: 0 supplementalGroups: - 100 automountServiceAccountToken: false","s":"Security context","u":"/docs/anatomy-of-an-application","h":"#security-context","p":328},{"i":351,"t":"Then we create the Service to expose the port 8888 of our JupyterLab container on the project network. This means that the JupyterLab web UI will reachable by all other application deployed in your project using its application name as hostname (e.g. jupyterlab) - kind: \"Service\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: ${APPLICATION_NAME} spec: ports: - name: 8888-tcp protocol: TCP port: 8888 targetPort: 8888 selector: app: ${APPLICATION_NAME} deploymentconfig: \"${APPLICATION_NAME}\" type: ClusterIP","s":"Service","u":"/docs/anatomy-of-an-application","h":"#service","p":328},{"i":353,"t":"Finally, we define the Route which will automatically generate a URL for the service of your application based following this template: APPLICATION_NAME-PROJECT_ID-DSRI_URL - kind: \"Route\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: ${APPLICATION_NAME} spec: host: '' to: kind: Service name: \"${APPLICATION_NAME}\" weight: 100 port: targetPort: 8888-tcp tls: termination: edge insecureEdgeTerminationPolicy: Redirect","s":"Route","u":"/docs/anatomy-of-an-application","h":"#route","p":328},{"i":355,"t":"Here is a complete file to describe the JupyterLab deployment template, you can add it to your project catalog by going to +Add in the DSRI web UI, then click on the option to add a YAML file content, and copy paste the template YAML. ---kind: TemplateapiVersion: template.openshift.io/v1labels: template: jupyterlab-rootmetadata: name: jupyterlab-root annotations: openshift.io/display-name: JupyterLab description: |- Start JupyterLab images as the `jovyan` user, with sudo privileges to install anything you need. 📂 Use the `/home/jovyan` folder (workspace of the JupyterLab UI) to store your data in the persistent storage automatically created You can find the persistent storage in the DSRI web UI, go to Administrator view > Storage > Persistent Volume Claims You can use any image based on the official Jupyter docker stack https://github.com/jupyter/docker-stacks - jupyter/tensorflow-notebook - jupyter/r-notebook - jupyter/all-spark-notebook - ghcr.io/maastrichtu-ids/jupyterlab (with Java and SPARQL kernels) Or build your own! Checkout https://github.com/MaastrichtU-IDS/jupyterlab for an example of custom image Once JupyterLab is deployed you can install any pip packages, JupyterLab extensions, and apt packages. iconClass: icon-python tags: python,jupyter,notebook openshift.io/provider-display-name: Institute of Data Science, UM openshift.io/documentation-url: https://maastrichtu-ids.github.io/dsri-documentation/docs/deploy-jupyter openshift.io/support-url: https://maastrichtu-ids.github.io/dsri-documentation/help parameters:- name: APPLICATION_NAME displayName: Name for the application description: Must be without spaces (use -), and unique in the project. value: jupyterlab required: true- name: PASSWORD displayName: JupyterLab UI Password description: The password/token to access the JupyterLab web UI required: true- name: APPLICATION_IMAGE displayName: Jupyter notebook Docker image value: ghcr.io/maastrichtu-ids/jupyterlab:latest required: true description: You can use any image based on https://github.com/jupyter/docker-stacks- name: STORAGE_SIZE displayName: Storage size description: Size of the storage allocated to the notebook persistent storage in `/home/jovyan`. value: 5Gi required: true objects:- kind: \"ImageStream\" apiVersion: image.openshift.io/v1 metadata: name: ${APPLICATION_NAME} labels: app: ${APPLICATION_NAME} spec: tags: - name: latest from: kind: DockerImage name: ${APPLICATION_IMAGE} lookupPolicy: local: true- kind: \"PersistentVolumeClaim\" apiVersion: \"v1\" metadata: name: ${APPLICATION_NAME} labels: app: ${APPLICATION_NAME} spec: accessModes: - \"ReadWriteMany\" resources: requests: storage: ${STORAGE_SIZE}- kind: \"Secret\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: ${APPLICATION_NAME} stringData: application-password: \"${PASSWORD}\"- kind: \"DeploymentConfig\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: \"${APPLICATION_NAME}\" spec: replicas: 1 strategy: type: Recreate triggers: - type: ConfigChange - type: ImageChange imageChangeParams: automatic: true containerNames: - jupyter-notebook from: kind: ImageStreamTag name: ${APPLICATION_NAME}:latest selector: app: \"${APPLICATION_NAME}\" deploymentconfig: \"${APPLICATION_NAME}\" template: metadata: labels: app: \"${APPLICATION_NAME}\" deploymentconfig: \"${APPLICATION_NAME}\" spec: serviceAccountName: \"anyuid\" containers: - name: jupyter-notebook image: \"${APPLICATION_NAME}:latest\" command: - \"start-notebook.sh\" - \"--no-browser\" - \"--ip=0.0.0.0\" ports: - containerPort: 8888 protocol: TCP env: - name: \"JUPYTER_TOKEN\" valueFrom: secretKeyRef: key: application-password name: \"${APPLICATION_NAME}\" - name: JUPYTER_ENABLE_LAB value: \"yes\" - name: GRANT_SUDO value: \"yes\" volumeMounts: - name: data mountPath: \"/home/jovyan\" volumes: - name: data persistentVolumeClaim: claimName: \"${APPLICATION_NAME}\" securityContext: runAsUser: 0 supplementalGroups: - 100 automountServiceAccountToken: false- kind: \"Service\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: ${APPLICATION_NAME} spec: ports: - name: 8888-tcp protocol: TCP port: 8888 targetPort: 8888 selector: app: ${APPLICATION_NAME} deploymentconfig: \"${APPLICATION_NAME}\" type: ClusterIP- kind: \"Route\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: ${APPLICATION_NAME} spec: host: '' to: kind: Service name: \"${APPLICATION_NAME}\" weight: 100 port: targetPort: 8888-tcp tls: termination: edge insecureEdgeTerminationPolicy: Redirect","s":"The complete application","u":"/docs/anatomy-of-an-application","h":"#the-complete-application","p":328},{"i":357,"t":"This practice is more advanced, and is not required for most deployments, but you can easily create a ConfigMap object to define any file to be provided at runtime to the application. For example here we are going to define a python script that will be run when starting JupyterLab (jupyter_notebook_config.py). It will clone the git repository URL, provided by the user when creating the template, at the start of JupyterLab in the workspace. If this repo contains files with list of packages in the root folder (requirements.txt and packages.txt), they will be installed at start - kind: ConfigMap apiVersion: v1 metadata: name: \"${APPLICATION_NAME}-cfg\" labels: app: \"${APPLICATION_NAME}\" data: # Clone git repo, then install requirements.txt and packages.txt jupyter_notebook_config.py: | import os git_url = os.environ.get('GIT_URL') home_dir = os.environ.get('HOME') os.chdir(home_dir) if git_url: repo_id = git_url.rsplit('/', 1)[-1] os.system('git clone --quiet --recursive ' + git_url) os.chdir(repo_id) if os.path.exists('packages.txt'): os.system('sudo apt-get update') os.system('cat packages.txt | xargs sudo apt-get install -y') if os.path.exists('requirements.txt'): os.system('pip install -r requirements.txt') os.chdir(home_dir) We will then need to mount this config file like a persistent volume in the path we want it to be (here /etc/jupyter/openshift), change the volumes and volumeMounts of your DeploymentConfig: volumeMounts: - name: data mountPath: \"/home/jovyan\" - name: configs mountPath: \"/etc/jupyter/openshift\" automountServiceAccountToken: false volumes: - name: data persistentVolumeClaim: claimName: \"${APPLICATION_NAME}\" - name: configs configMap: name: \"${APPLICATION_NAME}-cfg\" Then change the jupyter-notebook container start command to include this config file: command: - \"start-notebook.sh\" - \"--no-browser\" - \"--ip=0.0.0.0\" - \"--config=/etc/jupyter/openshift/jupyter_notebook_config.py\" Add the optional parameter to get the git URL to clone when the user create the template: parameters:- name: GIT_URL displayName: URL of the git repository to clone (optional) required: false description: Source code will be automatically cloned, then requirements.txt and packages.txt content will be automatically installed if presents Finally, add the git URL parameter provided by the user as environment variable of the container, so that it is picked up by the config script when running at the start of JupyterLab: env: - name: GIT_URL value: \"${GIT_URL}\"","s":"Add a configuration file","u":"/docs/anatomy-of-an-application","h":"#add-a-configuration-file","p":328},{"i":359,"t":"You can add readiness and liveness probes to a container to automatically check if the web application is up and ready. This will allow to wait for the JupyterLab web UI to be accessible before showing the application as ready in the Topology. Useful if you are cloning a repository and installing packages, which will take more time to start JupyterLab. containers: - name: jupyter-notebook readinessProbe: tcpSocket: port: 8888 livenessProbe: initialDelaySeconds: 15 tcpSocket: port: 8888 failureThreshold: 40 periodSeconds: 10 timeoutSeconds: 2 Checkout the OpenShift Application health documentation for more details.","s":"Add automated health checks","u":"/docs/anatomy-of-an-application","h":"#add-automated-health-checks","p":328},{"i":361,"t":"You can also define resources request and limits for each DeploymentConfig, in spec: spec: resources: requests: cpu: \"1\" memory: \"2Gi\" limits: cpu: \"128\" memory: \"300Gi\"","s":"Define resource limits","u":"/docs/anatomy-of-an-application","h":"#define-resource-limits","p":328},{"i":363,"t":"The easiest way to build a template for a new application is to start from this JupyterLab template: Replace jupyterlab-root by your application name Replace 8888 by your application Change the template and parameters descriptions to match your application Remove the securityContext part, and other objects you do not need If you need to start multiple containers, copy/paste the objects you need to create and edit them","s":"Build your own application template","u":"/docs/anatomy-of-an-application","h":"#build-your-own-application-template","p":328},{"i":365,"t":"Deploy applications Deploy from a Dockerfile","s":"Deploy from a Dockerfile","u":"/docs/guide-dockerfile-to-openshift","h":"","p":364},{"i":367,"t":"This manual shows you an example of how to convert a dockerfile from your local machine to a running container on DSRI (openshift / okd). Start by cloning the example repository to your local machine. git clone git@gitlab.maastrichtuniversity.nl:dsri-examples/dockerfile-to-okd.git After cloning you now have a local folder containing a Dockerfile and index.html file. Inspect both files. Login with the openshift client: Authenticate to the OpenShift cluster using oc login . oc login --token= Create a new project if you don't have a project yet you can work with (change myproject to a project name of your choice: oc new-project myproject","s":"Build from local Dockerfile","u":"/docs/guide-dockerfile-to-openshift","h":"#build-from-local-dockerfile","p":364},{"i":369,"t":"oc new-build --name dockerfile-to-okd --binary","s":"Create new build configuration.","u":"/docs/guide-dockerfile-to-openshift","h":"#create-new-build-configuration","p":364},{"i":371,"t":"Start a new build on the DSRI with the files provided: cd dockerfile-to-okdoc start-build dockerfile-to-okd --from-dir=. --follow --wait","s":"Build the image","u":"/docs/guide-dockerfile-to-openshift","h":"#build-the-image","p":364},{"i":373,"t":"Create a new app using the build we just created: oc new-app dockerfile-to-okd To properly deploy your app on OpenShift you will need to define a few more parameters: Enable root user access (with serviceAccountName) by running this command: oc patch deployment/dockerfile-to-okd --patch '{\"spec\":{\"template\": {\"spec\":{\"serviceAccountName\": \"anyuid\"}}}}' You can also add persistent storage (with volumes and containers: volumeMounts ) ${STORAGE_NAME}: Name of your persistent volume claim in the Storage page of your project in the web UI ${STORAGE_FOLDER} : Name of the folder inside the persistent volume claim to store the application data (so you can store multiple applications on the same persistent volume claim) Open the configuration of the started app to fix its configuration: oc edit deployment/dockerfile-to-okd You can mount existing persistent volume this way (replace the variables, such as ${STORAGE_NAME} by your values): template: spec: serviceAccountName: anyuid volumes: - name: data persistentVolumeClaim: claimName: \"${STORAGE_NAME}\" containers: - image: rstudio-root:latest volumeMounts: - name: data mountPath: \"/home/rstudio\" subPath: \"${STORAGE_FOLDER}\" Generate deployment file in YAML You can also generate the app deployment in a YAML file to edit it before start: oc new-app dockerfile-to-okd -o yaml > myapp.yml# Edit myapp.ymloc create -f myapp.yml","s":"Create your app","u":"/docs/guide-dockerfile-to-openshift","h":"#create-your-app","p":364},{"i":375,"t":"Expose the application so you can reach it from your browser and check the route that was created oc expose svc/dockerfile-to-okdoc get route You can now visit the route shown in the HOST/PORT output of the oc get route command and see if you have successfully converted the docker file. You can edit the created route to enable HTTPS with this command: oc patch route/dockerfile-to-okd --patch '{\"spec\":{\"tls\": {\"termination\": \"edge\", \"insecureEdgeTerminationPolicy\": \"Redirect\"}}}'","s":"Expose app","u":"/docs/guide-dockerfile-to-openshift","h":"#expose-app","p":364},{"i":377,"t":"oc delete build dockerfile-to-okd See oc delete documentation.","s":"Delete the created build","u":"/docs/guide-dockerfile-to-openshift","h":"#delete-the-created-build","p":364},{"i":379,"t":"You can also deploy a local docker image from your machine. First build the docker image: docker build -t my-docker-image:latest . Check you have the image locally on your system: docker images ls You should have a docker image for your application: REPOSITORY TAG my-docker-image latest You can then deploy providing the docker image name and the name of the application to be deployed: oc new-app my-docker-image --name app-name-on-openshift","s":"Deploy from a local docker image","u":"/docs/guide-dockerfile-to-openshift","h":"#deploy-from-a-local-docker-image","p":364},{"i":381,"t":"Go to +Add > From Git: https://console-openshift-console.apps.dsri2.unimaas.nl/import Follow the instructions given by the web UI: provide the URL to your git repository, the port on which the web interface will be deployed, you can also create a secret for git login if the repository is private. Once the container has started you will need to make a small change to enable it running with any user ID (due to OpenShift security policies). You can do it with the command line (just change your-app-name by your application name) oc patch deployment/your-app-name --patch '{\"spec\":{\"template\": {\"spec\":{\"serviceAccountName\": \"anyuid\"}}}}' Or through the web UI: click on your deployment, then Actions > Edit Deployment. And edit the YAML of your deployment to add serviceAccountName: anyuid under template.spec: template: spec: serviceAccountName: anyuid containers: - [...]","s":"Deploy from a Git repository","u":"/docs/guide-dockerfile-to-openshift","h":"#deploy-from-a-git-repository","p":364},{"i":383,"t":"On this page","s":"Install local OpenShift","u":"/docs/guide-local-install","h":"","p":382},{"i":385,"t":"You will need to set up the virtualization environment before installing MiniShift. Download MiniShift and unzip it. # For Ubuntu 18.04 and oldersudo apt install -y libvirt-bin qemu-kvm# For Ubuntu 18.10 and newer (replace libvirtd by libvirt in next commands)sudo apt install -y qemu-kvm libvirt-daemon libvirt-daemon-system# Create group if does not existsudo addgroup libvirtdsudo adduser $(whoami) libvirtdsudo usermod -a -G libvirtd $(whoami)newgrp libvirtdcurl -L https://github.com/dhiltgen/docker-machine-kvm/releases/download/v0.10.0/docker-machine-driver-kvm-ubuntu16.04 -o /usr/local/bin/docker-machine-driver-kvmsudo chmod +x /usr/local/bin/docker-machine-driver-kvm# Check if libvirtd runningsystemctl is-active libvirtd# Start if inactivesudo systemctl start libvirtd# Copy MiniShift in your pathcp minishift-1.34.1-linux-amd64/minishift /usr/local/bin","s":"Install MiniShift","u":"/docs/guide-local-install","h":"#install-minishift","p":382},{"i":387,"t":"minishift start Get your local OpenShift cluster URL after the command complete.","s":"Start MiniShift","u":"/docs/guide-local-install","h":"#start-minishift","p":382},{"i":389,"t":"Go to your local cluster URL. E.g. https://192.168.42.58:8443/console/catalog. Username: admin or developer Password: anything will work # As adminoc login -u system:admin","s":"Login","u":"/docs/guide-local-install","h":"#login","p":382},{"i":391,"t":"minishift stop","s":"Stop","u":"/docs/guide-local-install","h":"#stop","p":382},{"i":393,"t":"minishift delete -f","s":"Reset","u":"/docs/guide-local-install","h":"#reset","p":382},{"i":396,"t":"For more details: read the official install Kubernetes on Ubuntu tutorial or see the official Ubuntu Kubernetes install documentation. sudo snap install microk8s --classicsudo usermod -a -G microk8s $USER# Restart your machinemkdir -p ~/.kubemicrok8s.kubectl config view --raw > $HOME/.kube/config# Make sure this works for dashboard on Ubuntumicrok8s.enable dashboard dns To do only if kubectl is not already installed on your machine: sudo snap alias microk8s.kubectl kubectl","s":"kubectl on Ubuntu","u":"/docs/guide-local-install","h":"#kubectl-on-ubuntu","p":382},{"i":398,"t":"Included in Docker installation. Use the installer provided by DockerHub. Activate it in Docker Preferences > Kubernetes. For Windows you will need to download the kubectl.exe and place it in your PATH. https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/windows/amd64/kubectl.exe We recommend to create a kubectl directory in C:/ and add this C:/kubectl to the Path environment variable in System properties > Advanced > Environment Variables > Path","s":"kubectl on MacOS & Windows","u":"/docs/guide-local-install","h":"#kubectl-on-macos--windows","p":382},{"i":400,"t":"# Install Kubernetes UIkubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yamlkubectl apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/d2s-core/master/argo/roles/kube-dashboard-adminuser-sa.ymlkubectl apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/d2s-core/master/argo/roles/kube-adminuser-rolebinding.yml# Get the Token to access the dashboardkubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')# Windows user will need to execute the 2 commands manually:kubectl -n kube-system get secret # And get the token containing 'admin-user'kubectl -n kube-system describe secret# For Windows: give the anonymous user global accesskubectl create clusterrolebinding cluster-system-anonymous --clusterrole=admin --user=system:anonymous# Note: this could be improved. I think only the Dashboard UI didn't have the required permissions.# Finally, start the web UI, and chose the Token connectionkubectl proxy Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ And provide the previously obtained token. Warning: you will need to save the token to login again next time (use the password save from your browser if possible).","s":"Install the Dashboard UI","u":"/docs/guide-local-install","h":"#install-the-dashboard-ui","p":382},{"i":402,"t":"kubectl should be running at start. Just restart the web UI kubectl proxy Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/","s":"Run kubectl","u":"/docs/guide-local-install","h":"#run-kubectl","p":382},{"i":404,"t":"Debug DNS on Ubuntu microk8s.enable dns Restart your machine. You might need to change your firewall configuration On Ubuntu sudo ufw allow in on cni0sudo ufw allow out on cni0sudo ufw default allow routed Try to connect to the internet from Kubernetes with the test-busybox pod. kubectl exec -ti busybox -- /bin/shping google.com","s":"Enable internet","u":"/docs/guide-local-install","h":"#enable-internet","p":382},{"i":406,"t":"# Create volumekubectl apply -n argo -f d2s-core/argo/storage/storage-mac.yaml Not working at the moment.","s":"Create persistent volume","u":"/docs/guide-local-install","h":"#create-persistent-volume","p":382},{"i":408,"t":"Clean uninstall before 2.2. kubectl get cm workflow-controller-configmap -o yaml -n kube-system --export | kubectl apply -n argo -f -kubectl delete -n kube-system cm workflow-controller-configmapkubectl delete -n kube-system deploy workflow-controller argo-uikubectl delete -n kube-system sa argo argo-uikubectl delete -n kube-system svc argo-ui","s":"Uninstall","u":"/docs/guide-local-install","h":"#uninstall","p":382},{"i":411,"t":"Argo workflows will be installed on the argo namespace. See the official Argo documentation for more details. kubectl create ns argokubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo/v2.4.2/manifests/install.yaml# Configure service account to run workflowkubectl create rolebinding default-admin --clusterrole=admin --serviceaccount=default:default# Test runargo submit --watch https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml See custom configuration for namespace install. kubectl apply -n argo -f https://raw.githubusercontent.com/vemonet/argo/master/manifests/namespace-install.yaml","s":"Install on your local Kubernetes","u":"/docs/guide-local-install","h":"#install-on-your-local-kubernetes","p":382},{"i":413,"t":"See the Argo workflows documentation.","s":"Install the client","u":"/docs/guide-local-install","h":"#install-the-client","p":382},{"i":415,"t":"kubectl -n argo port-forward deployment/argo-ui 8002:8001 Access on http://localhost:8002.","s":"Expose the UI","u":"/docs/guide-local-install","h":"#expose-the-ui","p":382},{"i":417,"t":"Guides Publish a Docker image","s":"Publish a Docker image","u":"/docs/guide-publish-image","h":"","p":416},{"i":420,"t":"Use your existing GitHub account if you have one: Create a Personal Access Token for GitHub packages at https://github.com/settings/tokens/new Provide a meaningful description for the token, and enable the following scopes when creating the token: write:packages: publish container images to GitHub Container Registry delete:packages: delete specified versions of private or public container images from GitHub Container Registry You might want to store this token in a safe place, as you will not be able to retrieve it later on github.com (you can still delete it, and create a new token easily if you lose your token) 👨‍💻 Log in to the GitHub Container Registry in your terminal (change USERNAME and ACCESS_TOKEN to yours): echo \"ACCESS_TOKEN\" | docker login ghcr.io -u USERNAME --password-stdin On Windows use this command: docker login ghcr.io -u USERNAME -p \"ACCESS_TOKEN\" See the official GitHub documentation.","s":"Login to GitHub Container Registry","u":"/docs/guide-publish-image","h":"#login-to-github-container-registry","p":416},{"i":422,"t":"Create an account at https://quay.io Login in your terminal (you will be asked for username and password) docker login quay.io","s":"Login to quay.io","u":"/docs/guide-publish-image","h":"#login-to-quayio","p":416},{"i":424,"t":"Get a DockerHub account at https://hub.docker.com (you most probably already have one if you installed Docker Desktop) 👩‍💻 Run in your terminal: docker login Provide your DockerHub username and password.","s":"Login to DockerHub","u":"/docs/guide-publish-image","h":"#login-to-dockerhub","p":416},{"i":426,"t":"Once you built a Docker image, and you logged in to a Container Registry, you might want to publish the image to pull and re-use it easily later.","s":"Publish your image 📢","u":"/docs/guide-publish-image","h":"#publish-your-image-","p":416},{"i":428,"t":"Free for public images The GitHub Container Registry is still in beta but will be free for public images when fully released. It enables you to store your Docker images at the same place you keep your code! 📦 Publish to your user Container Registry on GitHub: docker build -t ghcr.io/github-username/my-image:latest .docker push ghcr.io/github-username/my-image:latest For example, to the MaastrichtU-IDS organization Container Registry on GitHub: docker build -t ghcr.io/maastrichtu-ids/jupyterlab:latest .docker push ghcr.io/maastrichtu-ids/jupyterlab:latest Created automatically If the image does not exist, GitHub Container Registry will create it automatically and set it as Private by default. You can easily change it to Public in the image settings on github.com.","s":"Publish to GitHub Container Registry","u":"/docs/guide-publish-image","h":"#publish-to-github-container-registry","p":416},{"i":430,"t":"Free for public images Quay.io is free for public images and does not restrict images pulls. Create the image on quay.io Build and push to quay.io docker build -t ghcr.io/quay-username/my-image:latest .docker push quay.io/quay-username/my-image:latest","s":"Publish to Quay.io","u":"/docs/guide-publish-image","h":"#publish-to-quayio","p":416},{"i":432,"t":"DockerHub pull rates limitations ⚠️ DockerHub imposes strict pull limitations for clusters like the DSRI (using DockerHub might result in failing to pull your images on the DSRI). We highly recommend to use the GitHub Container Registry or RedHat quay.io Container Registry to publish public Docker images. Logged in If you are login with your DockerHub user on the DSRI, it should allow you to pull DockerHub images in your project (see above). Create the repository on DockerHub (attached to your user or an organization) Build and push the image: docker build -t dockerhub-username/jupyterlab:latest .docker push dockerhub-username/jupyterlab:latest You can also change the name (aka. tag) of an existing image: docker build -t my-jupyterlab .docker tag my-jupyterlab ghcr.io/github-username/jupyterlab:latest","s":"Publish to DockerHub","u":"/docs/guide-publish-image","h":"#publish-to-dockerhub","p":416},{"i":434,"t":"You can automate the building and publication of Docker images using GitHub Actions workflows 🔄 Use a working workflow as example 👀 Check the .github/workflows/publish-docker.yml file to see an example of a workflow to publish an image to the GitHub Container Registry. 👩‍💻 You only need to change the IMAGE_NAME, and use it in your GitHub repository to publish a Docker image for your application automatically! It will build from a Dockerfile at the root of the repository. Workflow triggers The workflow can be easily configured to: publish a new image to the latest tag at each push to the main branch publish an image to a new tag if a release is pushed on GitHub (using the git tag) e.g. v0.0.1 published as image 0.0.1","s":"Use automated workflows","u":"/docs/guide-publish-image","h":"#use-automated-workflows","p":416},{"i":436,"t":"Get started Monitor your applications","s":"Monitor your applications","u":"/docs/guide-monitoring","h":"","p":435},{"i":438,"t":"You can have an overview of the different resources consumed by the applications running in your project by going to the Monitoring tab (in the developer view) You can also check the CPU and memory usage directly from the terminal inside a specific container Go to your application terminal, and run: top Check the number of Cpu(s) used at the top: %Cpu(s): 3,3 us, Check the memory usage with the used column: MiB Mem : 515543.2 total, 403486.8 free, 98612.0 used, 13444.5 buff/cache","s":"Monitor your application resources use","u":"/docs/guide-monitoring","h":"#monitor-your-application-resources-use","p":435},{"i":440,"t":"If your application is facing issues when deployed: If the pod is not building, or not deploying properly, take a look at the Events tab of the deployment. It shows a log of all events faced by the deployment (assign to node, pull image, build, etc). Additionally, all Events in your project can be accessed in Monitoring. Various ways to check the events You can also check the Monitoring page in the left side menu to see all events in a project. Or use the terminal: oc get events When a pod is running you can check its logs in the Logs tab (after going to the pod page). It will show the logs output of the container, equivalent to doing docker logs. Get help If you cannot figure out the issue by yourself: Gather relevant information to help the DSRI team to solve your issue: URL to the faulty application, which error was shown in the Events tab? Or in the Logs tab? Seek help on the #helpdesk DSRI Slack channel Checkout if an issue have already been created for this problem, or create a new one: https://github.com/MaastrichtU-IDS/dsri-documentation/issues","s":"Debug an application deployment","u":"/docs/guide-monitoring","h":"#debug-an-application-deployment","p":435},{"i":442,"t":"Guides Install UM VPN","s":"Install UM VPN","u":"/docs/guide-vpn","h":"","p":441},{"i":444,"t":"You will need to have an account at Maastricht University with an email ending with @maastrichtuniversity.nl or @student.maastrichtuniversity.nl. Request access to the DSRI for your account Please fill this form 📬. to provide us some information on what you plan to do with the DSRI.","s":"Request an account","u":"/docs/guide-vpn","h":"#request-an-account","p":441},{"i":446,"t":"You need to be connected to the UM network to access the DSRI. Connect to UMnet or eduroam WiFi at Maastricht University Use the Maastricht University VPN at vpn.maastrichtuniversity.nl Log in to that using your UM username and password. Students By default the UM VPN is only available to employees. As a student you can access UM resources from any location via Student Desktop Anywhere. However, if VPN access is absolutely necessary you can request access via your course coordinator. The prefix of your UM email address with the first letter capitalized, e.g. Firstname.Lastname or F.LastnameOr your employee number at Maastricht University (a.k.a. P number), e.g. P7000000 Then You will see below page to download the AnyConnect Secure Mobility Client Install the VPN (AnyConnect Secure Mobility Client) on Windows​ Double click on the .exe file to install the VPN. You can follow below steps as in pictures. Log in to the VPN (AnyConnect Secure Mobility Client)​ Once you finish installing you can run the Cisco AnyConnect Secure Mobility Client. Then after you will get the bellow wizard and click connect Provide your UM username and password. (employee number at Maastricht University (a.k.a. P number), e.g. P7000000) Install the VPN (AnyConnect Secure Mobility Client) on Linux​ Connect to UMnet or eduroam WiFi at Maastricht University For Linux, use openconnect to connect to the UM VPN. You can easily install it on Ubuntu and Debian distributions with apt: sudo apt install openconnectsudo openconnect -u YOUR.USER --authgroup 01-Employees --useragent=AnyConnect vpn.maastrichtuniversity.nl Provide your UM password when prompted. For students: By default the UM VPN is only available to employees. As a student you can access UM resources from any location via Student Desktop Anywhere. However, if VPN access is absolutely necessary you can request access via your course coordinator.","s":"Connect to the UM network","u":"/docs/guide-vpn","h":"#connect-to-the-um-network","p":441},{"i":448,"t":"Deploy applications Install from Helm charts","s":"Install from Helm charts","u":"/docs/helm","h":"","p":447},{"i":451,"t":"Go lang is required to run Helm. Install go 1.14.4 on Linux, you can find instructions for MacOS, Windows and newer versions at https://golang.org/dl wget https://dl.google.com/go/go1.14.4.linux-amd64.tar.gz# Extract to /usr/localtar -C /usr/local -xzf go1.14.4.linux-amd64.tar.gz# Add Go to path in .profileecho \"export PATH=$PATH:/usr/local/go/bin\" >> ~/.profile# Or in .zshrc if you use ZSHecho \"export PATH=$PATH:/usr/local/go/bin\" >> ~/.zshrc Restart your laptop for the changes to take effects or execute source ~/.profile","s":"Install Golang","u":"/docs/helm","h":"#install-golang","p":447},{"i":453,"t":"You can also use the official documentation to install Helm on your machine. Install on Linux​ curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash See Helm documentation for Linux. Install on MacOS​ brew install helm See Helm documentation for MacOS. Install on Windows​ Install using Chocolatey. choco install kubernetes-helm See Helm documentation for Windows.","s":"Install Helm","u":"/docs/helm","h":"#install-helm","p":447},{"i":455,"t":"helm version","s":"Check Helm installation","u":"/docs/helm","h":"#check-helm-installation","p":447},{"i":457,"t":"Explore published Helm charts at https://hub.helm.sh ⛵","s":"Install a Helm chart","u":"/docs/helm","h":"#install-a-helm-chart","p":447},{"i":459,"t":"Example from the OpenShift 4.3 documentation. See also the official Helm documentation. Add the repository of official Helm charts to your local Helm client: helm repo add stable https://kubernetes-charts.storage.googleapis.com/ Update the repository: helm repo update Install an example MySQL chart, and start the application named example-mysql: helm install example-mysql stable/mysql Password The instructions to retrieve the admin password and connect to the database will be displayed in the terminal. Retrieve the database password with this command (N.B.: kubectl can also be used in place of oc): oc get secret example-mysql -o jsonpath=\"{.data.mysql-root-password}\" | base64 --decode; echo Verify that the chart has installed successfully: helm list Expose the MySQL service as a route: oc expose service example-mysqloc get routes Or port-forward to http://localhost:3306 oc port-forward svc/example-mysql 3306","s":"Start a MySQL database with Helm","u":"/docs/helm","h":"#start-a-mysql-database-with-helm","p":447},{"i":461,"t":"helm uninstall example-mysql","s":"Uninstall the application","u":"/docs/helm","h":"#uninstall-the-application","p":447},{"i":463,"t":"You can also define deployment parameters when installing a Helm chart, such as the service account and node selector. For example, here we make sure the application will run on DSRI CPU nodes and use the anyuid service account: Add Bitnami repository: helm repo add bitnami https://charts.bitnami.com/bitnami Install and start Postgresql: helm install postgresql-db bitnami/postgresql --set nodeSelector.dsri.unimaas.nl/cpu=true --set serviceAccount.name=anyuid","s":"Set deployment parameters","u":"/docs/helm","h":"#set-deployment-parameters","p":447},{"i":465,"t":"Miscellaneous Increase your processes speed","s":"Increase your processes speed","u":"/docs/increase-process-speed","h":"","p":464},{"i":467,"t":"With the DSRI you get access to a workspace with more memory and cores than your laptop (around 200G memory, and 64 cores on the DSRI, against around 16G memory and 8 cores on your laptop) Those additional resources might help to make your workload run faster, but not automatically! It will run faster If your code can make use of the really large amount of RAM to load more of the data to process in memory. But if your workload does not require dozens of GB of memory, and your laptop does not face out of memory issues, or crash, when you run your workload, then you probably already have enough memory on your laptop, and will not gain a significant boost from the increased memory. If you can run your workload in parallel, or enable the libraries you use to use the available cores. This will highly depend on the libraries you use. Do they support running their processes in parallel? Do you need to explicitly enable parallelism on a specific number of cores? Proper parallelism is not achieved easily, it needs to be manually implemented within the library processes. For example, Python has a \"Global Interpreter Lock\" (aka. GIL) that limit threads parallelism by design, so when you are doing some work on a spreadsheet with pandas, you are only going to use 1 thread (which is nice, because it makes the conceptualization and understanding of algorithms easier, but it also makes it harder to write truly efficient libraries) You will need to use complementary libraries if you want to use more threads while processing data with pandas. There are multiple ways and libraries to achieve this, but the easiest, if you want to check it yourself with pandas, is to use pandarallel. You could also implement the parallelism yourself with concurrent.futures","s":"The good","u":"/docs/increase-process-speed","h":"#the-good","p":464},{"i":469,"t":"Until now everything seems good, more memory, more cores... So, what's the catch? It can only get better, no? Application and workspace running on the DSRI uses a persistent volume to avoid losing data when the application is restarted. On most workspaces this persistent volume is mounted on the workspace working directory. This persistent volume is not hosted directly on the same node as your application, it's hosted on the cluster in a distributed fashion (remember you can attach this persistent volume to different applications, which might be hosted on different nodes themselves) And distributed storage means: slower read and write times! In your laptop the data is in a hard drive disc sitting at 2 cm of the CPU and memory. In the DSRI your workspace might be on node 4, when the persistent volume is on node 8. In this case the data will need to go through the network So if you write a script to just load data, do no computing, and write back the data to the persistent volume, it will probably be much faster on your laptop than on the DSRI!","s":"The bad","u":"/docs/increase-process-speed","h":"#the-bad","p":464},{"i":471,"t":"Only 1 folder (and its subfolders) usually mounted on the persistent volume. The rest is \"ephemeral storage\", which is the data bound to the application you started, this means the data will be stored on the same node as your workspace. Which might result in faster read/write speed! But also means the data will be lost if the workspace is restarted (which does not happen everyday, but can happen without notice) A solution could be to: Keep your code and important data as backup in the persistent volume (the workspace working dir usually) Copy the data your process needs to load in a folder outside of the persistent volume (on the ephemeral storage) Read/write data mostly from this folder on the ephemeral storage, avoid using the data in the persistent volume folder as much as possible Copy the important result files or temporary files you don't want to lose from the folder on the ephemeral storage to the folder on the persistent storage Let us know how it works for you on the Slack #general channel, and if you have suggestions to improve the workspaces.","s":"The solution","u":"/docs/increase-process-speed","h":"#the-solution","p":464},{"i":473,"t":"Guides Prepare a workshop","s":"Prepare a workshop","u":"/docs/guide-workshop","h":"","p":472},{"i":475,"t":"If the users are students from Maastricht University, or not from Maastricht University (without an email @maastrichtuniversity.nl, or @maastro.nl), you will need to contact the ICT support of your department to request the creation of accounts so that your users can connect to the UM VPN. At FSE, you will need to send an email to lo-fse@maastrichtuniversity.nl and DSRI-SUPPORT-L@maastrichtuniversity.nl with the following information: Emails of the users Why they need access to the DSRI (provide the ID of the course at Maastricht University if it is for a course) Until which date the users will need those VPN accounts","s":"Request VPN accounts for users","u":"/docs/guide-workshop","h":"#request-vpn-accounts-for-users","p":472},{"i":477,"t":"Fill this form 📬 to give us more details on your project (you don't need to do it if you have already filled it in the past).","s":"Fill a form","u":"/docs/guide-workshop","h":"#fill-a-form","p":472},{"i":479,"t":"Use the DSRI documentation to explain to your users how to access the DSRI.","s":"Prepare you workshop","u":"/docs/guide-workshop","h":"#prepare-you-workshop","p":472},{"i":481,"t":"Feel free to use the existing templates for JupyterLab, RStudio, or Visual Studio Code in the DSRI catalog. You can easily reuse our images to adapt it to your training need and install all required dependencies: https://github.com/MaastrichtU-IDS/jupyterlab https://github.com/MaastrichtU-IDS/rstudio https://github.com/MaastrichtU-IDS/code-server Then you will just need to instruct your users to start an existing templates with your newly published image. With the JupyterLab template you can also prepare a git repository to be cloned in the workspace as soon as they start it. You can find some examples of python scripts with database to run on the DSRI in this repository: https://github.com/MaastrichtU-IDS/dsri-demo","s":"Publish an image for your training","u":"/docs/guide-workshop","h":"#publish-an-image-for-your-training","p":472},{"i":483,"t":"You can use this video showing how to start a RStudio workspace, the process is similar for JupyterLab and VisualStudio Code: https://www.youtube.com/watch?v=Y0BjotH1LiE Otherwise just do it directly with them.","s":"Show your users how to start a workspace","u":"/docs/guide-workshop","h":"#show-your-users-how-to-start-a-workspace","p":472},{"i":485,"t":"Guides Known Issues","s":"Known Issues","u":"/docs/guide-known-issues","h":"","p":484},{"i":487,"t":"Sometimes you cannot access anymore the data you put in the persistent folder of your container. It can be due to a node going down, if the persistent volume your pod is connected to is on this node, then it cannot access it anymore. You can easily fix this issue by restarting the pod of your application, it will make it properly connect to resources on nodes that are up. To restart the pod, go in topology, click on your application, go to the details tab, and decrease the pod count to 0, then put it back up to 1.","s":"Cannot access your data in the persistent folder","u":"/docs/guide-known-issues","h":"#cannot-access-your-data-in-the-persistent-folder","p":484},{"i":489,"t":"Pod or Deployment will not start You could run into a following message in the Events tab that looks similar to this Error: kubelet may be retrying requests that are timing out in CRI-O due to system load. Currently at stage container volume configuration: context deadline exceeded: error reserving ctr name The issue above will occur if you are using a large persistent volume. It can be resolved by adding the following to your Deployment(Config): spec: template: metadata: annotations: io.kubernetes.cri-o.TrySkipVolumeSELinuxLabel: 'true' spec: runtimeClassName: selinux Take note of the indentation and the place in the file! An example of this can be found here:","s":"Large volumes","u":"/docs/guide-known-issues","h":"#large-volumes","p":484},{"i":491,"t":"Spot the issue If the Events tab show this error: --> Scaling filebrowser-case-1 to 1error: update acceptor rejected my-app-1: pods for rc 'my-project/my-app-1' took longer than 600 seconds to become available Then check for the application ImageStream in Build > Images, and you might see this for your application image: Internal error occurred: toomanyrequests: You have reached your pull rate limit.You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit. You can solve this by creating a secret to login to DockerHub in your project: oc create secret docker-registry dockerhub-login --docker-server=docker.io --docker-username=dockerhub_username --docker-password=dockerhub_password --docker-email=example@mail.com Linking the login secret to the default service account: oc secrets link default dockerhub-login --for=pull tip Login to DockerHub should raise the limitations To definitely solve this issue you can publish the DockerHub image to the GitHub Container Registry. Follow those instructions on your laptop: Login to the GitHub Container Registry with docker login. Pull the docker image from docker pull myorg/myimage:latest git@github.com:MaastrichtU-IDS/dsri-documentation.gitgit@github.com:MaastrichtU-IDS/dsri-documentation.gitgit@github.com:MaastrichtU-IDS/dsri-documentation.git Change its tag docker tag myorg/myimage:latest ghcr.io/maastrichtu-ids/myimage:latest Push it back to the GitHub Container Registry: docker push ghcr.io/maastrichtu-ids/myimage:latest Image created automatically If the image does not exist, GitHub will create automatically when you push it for the first time! You can then head to your organization Packages tab to see the package. Make it public By default new images are set as Private, go to your Package Settings, and click Change Visibility to set it as Public, this avoids the need to login to pull the image. You can update the image if you want access to the latest version, you can set a GitHub Actions workflow to do so. Finally you will need to update your DSRI deployment, or template, to use the newly created image on ghcr.io, and redeploy the application with the new template.","s":"DockerHub pull limitations","u":"/docs/guide-known-issues","h":"#dockerhub-pull-limitations","p":484},{"i":493,"t":"Spot the issue If the Events tab show this error: --> cd /usr/local/src/work2/aerius-sample-sequencing/CD4K4ANXXTrinity --seqType fq --max_memory 100G --CPU 64 --samples_file samples.txt --output /usr/local/src/work2/Trinity_output_zip_090221error: The function starts but at some points just exits without warnings or errors to Windows folder DSRI in the container's terminal keep running fine but never finishes. At some point a red label ''disconnected'' appears and the terminal stops and the analysis never continues. Those two issues are due to the process running attach to the terminal Should be able to easily run it using the \"Bash way\": add nohup at the beginning and & at the end It will run in the back and all output that should have gone to the terminal will go to a file nohup.out in the repo nohup Trinity --seqType fq --max_memory 100G --CPU 64 --samples_file samples.txt --output /usr/local/src/work2/Trinity_output_zip_090221 & To check if it is still running: ps aux | grep Trinity Be careful make sure the terminal uses bash and not shell (\"sh\") To use bash just type bash in the terminal: bash","s":"How to run function within a container ''in the background'","u":"/docs/guide-known-issues","h":"#how-to-run-function-within-a-container-in-the-background","p":484},{"i":495,"t":"danger ⚠️ remote: HTTP Basic: Access denied fatal: Authentication failed for It happen every time when we forced to change the Windows password. Apply command from powershell (run as administrator) git config --system --unset credential.helper And then remove gitconfig file from C:\\Program Files\\Git\\mingw64/etc/ location (Note: this path will be different in MAC like \"/Users/username\") After that use git command like git pull or git push, it asked me for username and password. applying valid username and password and git command working. Windows:​ Go to Windows Credential Manager. This is done in a EN-US Windows by pressing the Windows Key and typing 'credential'. In other localized Windows variants you need to use the localized term. alternatively you can use the shortcut control /name Microsoft.CredentialManager in the run dialog (WIN+R) Edit the git entry under Windows Credentials, replacing old password with the new one. Mac:​ cmd+space and type \"KeyChain Access\", You should find a key with the name like \"gitlab.*.com Access Key for user\". You can order by date modified to find it more easily. Right click and delete.","s":"Git authentication issue","u":"/docs/guide-known-issues","h":"#git-authentication-issue","p":484},{"i":497,"t":"Spot the issue If you get 403 forbidden issue while try to upload folders / files or creating new folder / file 403 forbidden Above issue will occur if you are not using the persistent storage. A persistent storage can be created by the DSRI team for a persistent storage of the data. Contact the DSRI team to request a persistent storage. You can find the persistent storage name as below","s":"Filebrowser 403 forbidden","u":"/docs/guide-known-issues","h":"#filebrowser-403-forbidden","p":484},{"i":499,"t":"On this page","s":"JupyterHub with Spark","u":"/docs/jupyterhub-spark","h":"","p":498},{"i":501,"t":"You will need to have the usual oc tool installed, and to install kfctl on your machine, a tool to deploy Kubeflow applications, download the latest version for your OS 📥️ You can then install it by downloading the binary and putting it in your path, for example on Linux: wget https://github.com/kubeflow/kfctl/releases/download/v1.2.0/kfctl_v1.2.0-0-gbc038f9_linux.tar.gztar -xzf kfctl_v1.2.0-0-gbc038f9_linux.tar.gzsudo mv kfctl /usr/local/bin/ Clone the repository with the DSRI custom images and deployments for the OpenDataHub platform, and go to the kfdef folder: git clone https://github.com/MaastrichtU-IDS/odh-manifestscd odh-manifests/kfdef","s":"🧊 Install kfctl","u":"/docs/jupyterhub-spark","h":"#-install-kfctl","p":498},{"i":503,"t":"Go the the kfdef folder All scripts need to be run from the kfdef folder 📂 You can deploy JupyterHub with 2 different authentications system, use the file corresponding to your choice: For the default DSRI authentication use kfctl_openshift_dsri.yaml For GitHub authentication use kfctl_openshift_github.yaml You need to create a new GitHub OAuth app: https://github.com/settings/developers And provide the GitHub client ID and secret through environment variable before running the start script: export GITHUB_CLIENT_ID=YOUR_CLIENT_IDexport GITHUB_CLIENT_SECRET=YOUR_CLIENT_SECRET First you will need to change the namespace: in the file you want to deploy, to provide the project where you want to start JupyterHub (currently opendatahub-ids), then you can deploy JupyterHub and Spark with kfctl: ./start_odh.sh kfctl_openshift_dsri.yaml 🗄️ Persistent volumes are automatically created for each instance started in JupyterHub to insure persistence of the data even JupyterHub is stopped. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. ⚡️ A Spark cluster with 3 workers is automatically created with the service name spark-cluster, you can use the URL of the master node to access it from your workspace: spark://spark-cluster:7077","s":"🪐 Deploy JupyterHub and Spark","u":"/docs/jupyterhub-spark","h":"#-deploy-jupyterhub-and-spark","p":498},{"i":505,"t":"Matching Spark versions Make sure all the Spark versions are matching, the current default version is 3.0.1 You can test the Spark cluster connection with PySpark: from pyspark.sql import SparkSession, SQLContextimport osimport socket# Create a Spark sessionspark_cluster_url = \"spark://spark-cluster:7077\"spark = SparkSession.builder.master(spark_cluster_url).getOrCreate()sc = spark.sparkContext# Test your Spark connectionspark.range(5, numPartitions=5).rdd.map(lambda x: socket.gethostname()).distinct().collect()# Or try:#x = ['spark', 'rdd', 'example', 'sample', 'example']x = [1, 2, 3, 4, 5]y = sc.parallelize(x)y.collect()# Or try:data = [1, 2, 3, 4, 5]distData = sc.parallelize(data)distData.reduce(lambda a, b: a + b)","s":"✨ Use the Spark cluster","u":"/docs/jupyterhub-spark","h":"#-use-the-spark-cluster","p":498},{"i":507,"t":"Make sure all the Spark versions are matching, the current default version is 3.0.1: Go to the Spark UI to verify the version of the Spark cluster Run spark-shell --version to verify the version of the Spark binary installed in the workspace Run pip list | grep pyspark to verify the version of the PySpark library Check the JupyterLab workspace Dockerfile to change the version of Spark installed in the workspace, and see how you can download and install a new version of the Spark binary. If you need to change the Python, Java or PySpark version in the workspace you can create a environment.yml file, for example for 2.4.5: name: sparkchannels: - defaults - conda-forge - anacondadependencies: - python=3.7 - openjdk=8 - ipykernel - nb_conda_kernels - pip - pip: - pyspark==2.4.5 Create the environment with conda: mamba env create -f environment.yml","s":"Match the version","u":"/docs/jupyterhub-spark","h":"#match-the-version","p":498},{"i":509,"t":"You can also create a route to access the Spark UI and monitor the activity on the Spark cluster: oc expose svc/spark-cluster-ui Get the Spark UI URL: oc get route --selector radanalytics.io/service=ui --no-headers -o=custom-columns=HOST:.spec.host","s":"Spark UI","u":"/docs/jupyterhub-spark","h":"#spark-ui","p":498},{"i":511,"t":"You can create a new Spark cluster, for example here using Spark 3.0.1 with the installed Spark Operator: cat < Storage > Persistent Volume Claims You can also link your git repository to the project for automatic deployment see using git in JupyterLab This can also be deployed using Helm from the terminal, the steps are: helm repo add dsri https://maastrichtu-ids.github.io/dsri-helm-charts/helm repo updatehelm install freesurfer dsri/jupyterlab \\ --set serviceAccount.name=anyuid \\ --set openshiftRoute.enabled=true \\ --set image.repository=ghcr.io/maastrichtu-ids/jupyterlab \\ --set image.tag=freesurfer \\ --set storage.mountPath=/root \\ --set password=changemeoc get route --selector app.kubernetes.io/instance=freesurfer --no-headers -o=custom-columns=HOST:.spec.host Log in to the corresponding jupyter notebook and start the terminal, then enter freesurfer as a command","s":"JupyterLab with FreeSurfer","u":"/docs/neuroscience","h":"#jupyterlab-with-freesurfer","p":526},{"i":531,"t":"Generate a Dockerfile with: FreeSurfer 6.0.1 FSL 6.0.3 docker run --rm repronim/neurodocker:0.7.0 generate docker \\ --base debian:stretch --pkg-manager apt \\ --freesurfer version=6.0.1 --fsl version=6.0.3 > Dockerfile","s":"FreeSurfer and FSL","u":"/docs/neuroscience","h":"#freesurfer-and-fsl","p":526},{"i":533,"t":"Generate a Dockerfile with: FreeSurfer 6.0.1 AFNI, R and Python3 docker run --rm repronim/neurodocker:0.7.0 generate docker \\ --base debian:stretch --pkg-manager apt \\ --afni version=latest install_r=true install_r_pkgs=true install_python3=true \\ --freesurfer version=6.0.1 > Dockerfile","s":"FreeSurfer and AFNI","u":"/docs/neuroscience","h":"#freesurfer-and-afni","p":526},{"i":535,"t":"Before deploying the Dockerfile to the DSRI you can open it, and add commands to install additional package you are interested in, such as nighres or nipype. Checkout the documentation to deploy the Dockerfile on DSRI. UI with VNC Running a UI with VNC (e.g. FSLeyes) is still a work in progress. See this issue for more details.","s":"Deploy the generated Dockerfile","u":"/docs/neuroscience","h":"#deploy-the-generated-dockerfile","p":526},{"i":537,"t":"More details about using GPU with FSL: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/GPU","s":"Use the GPUs","u":"/docs/neuroscience","h":"#use-the-gpus","p":526},{"i":539,"t":"Guides Command Line Interface","s":"Command Line Interface","u":"/docs/openshift-commands","h":"","p":538},{"i":541,"t":"Here is an overview of common oc commands: Command Description oc login --token= Login to the DSRI OpenShift cluster in your terminal oc get projects List all available projects oc project Switch to project oc get pods Get running pods (a pod can run one or multiple containers for your application) oc rsh Remote terminal connexion to a pod (Shell/Bash) oc cp Copy files from host to container or vice versa, e.g. from host: oc cp : or from to host: oc cp : oc rsync Similar to rsync command on Linux to synchronize directories between container and host or the other way around oc exec Execute command in pods oc delete pod Delete pod","s":"Overview","u":"/docs/openshift-commands","h":"#overview","p":538},{"i":544,"t":"oc projects","s":"List projects","u":"/docs/openshift-commands","h":"#list-projects","p":538},{"i":546,"t":"oc project my-project","s":"Connect to project","u":"/docs/openshift-commands","h":"#connect-to-project","p":538},{"i":548,"t":"To update an ImageStream in your project to pull the latest update from the external repository (e.g. from ghcr.io or DockerHub): oc import-image ","s":"ImageStreams","u":"/docs/openshift-commands","h":"#imagestreams","p":538},{"i":551,"t":"oc create -f my-pod.yaml E.g. d2s-pod-virtuoso.yaml.","s":"Create pod from YAML","u":"/docs/openshift-commands","h":"#create-pod-from-yaml","p":538},{"i":553,"t":"oc get pod List running pods: oc get pods --field-selector=status.phase=Running","s":"List pods","u":"/docs/openshift-commands","h":"#list-pods","p":538},{"i":555,"t":"oc get pod | grep Using selector with Apache Flink as example, and showing only the pod id without header: oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name","s":"Get specific pod","u":"/docs/openshift-commands","h":"#get-specific-pod","p":538},{"i":557,"t":"Connect to a pod with Bash. oc rsh ","s":"Remote Shell connection","u":"/docs/openshift-commands","h":"#remote-shell-connection","p":538},{"i":559,"t":"Example creating a folder: oc exec -- mkdir -p /mnt/workspace/resources","s":"Execute command in pod","u":"/docs/openshift-commands","h":"#execute-command-in-pod","p":538},{"i":561,"t":"oc delete pod Force pod deletion If the pod is not properly deleted, you can force its deletion: oc delete pod --force --grace-period=0 ","s":"Delete pod","u":"/docs/openshift-commands","h":"#delete-pod","p":538},{"i":563,"t":"oc logs -f Debug a pod Get more details on how to debug a pod.","s":"Get pod logs","u":"/docs/openshift-commands","h":"#get-pod-logs","p":538},{"i":565,"t":"Create app from template using the CLI and providing parameters as arguments: oc new-app my-template -p APPLICATION_NAME=my-app -p ADMIN_PASSWORD=mypassword Example for the Semantic Web course notebooks: oc new-app template-jupyterstack-notebook -p APPLICATION_NAME=swcourseName -p NOTEBOOK_PASSWORD=PASSWORDoc delete all --selector template=template-jupyterstack-notebook","s":"Create app from template","u":"/docs/openshift-commands","h":"#create-app-from-template","p":538},{"i":567,"t":"See the Load data page.","s":"Copy files","u":"/docs/openshift-commands","h":"#copy-files","p":538},{"i":569,"t":"Get started Install the client","s":"Install the client","u":"/docs/openshift-install","h":"","p":568},{"i":572,"t":"Download the oc and kubectl Command Line Interface clients: wget https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz && tar xvf oc.tar.gzsudo mv oc kubectl /usr/local/bin/","s":"On Linux","u":"/docs/openshift-install","h":"#on-linux","p":568},{"i":574,"t":"Use brew: brew install openshift-cli Or manually download the program and add it to your path: Download https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/macosx/oc.tar.gz Unzip the archive Move the oc binary to a directory on your PATH. To check your PATH, open a terminal and execute the following command: echo $PATH","s":"On Mac","u":"/docs/openshift-install","h":"#on-mac","p":568},{"i":576,"t":"Create a folder for OpenShift in Program Files: C:\\Program Files (x86)\\OpenShift Click here to download the oc tool .zip file, and move it to C:\\Program Files (x86)\\OpenShift. Extract the .zip file. Next set the system PATH environment variables for the directory containing the oc.exe file, which now resides in your newly created OpenShift folder inside of C:\\Program Files (x86)\\OpenShift Open the Control Panel, and click on System Click on Advance system settings on the left or open the Advance tab of System Properties. Click the button labeled Environment Variables... at the bottom. Look for the option Path in either the User variables section (for the current user) or the System variables section (for all users on the system). This makes it easy to access the oc command line interface by simply opening up the PowerShell and typing in the oc command, e.g.: oc version Official documentation See the official documentation to install the client if needed.","s":"On Windows","u":"/docs/openshift-install","h":"#on-windows","p":568},{"i":578,"t":"To use the oc Command Line Interface, you will need to authenticate to the DSRI in your terminal: PASSWORD NOT SUPPORTED Authentication to the oc Command Line Interface using your password is not supported. oc login --token= The token is provided by the Web UI: Go to the DSRI web UI. Click on the Copy Login Command button (in the top right of the page). Paste the copied command in your terminal, and execute it to login with oc 🔑 Login command The command should look like this: oc login https://api.dsri2.unimaas.nl:6443 --token=$GENERATED_TOKEN","s":"Login in the terminal with oc","u":"/docs/openshift-install","h":"#login-in-the-terminal-with-oc","p":568},{"i":580,"t":"Get started Delete an application","s":"Delete an application","u":"/docs/openshift-delete-services","h":"","p":579},{"i":582,"t":"The best way to make sure all objects related to your application have been deleted is to use the command line providing your application name: oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=my-application Force deletion You can force the deletion if the objects are not deleting properly: oc delete all,secret,configmaps,serviceaccount,rolebinding --force --grace-period=0 --selector app=my-application","s":"From the terminal","u":"/docs/openshift-delete-services","h":"#from-the-terminal","p":579},{"i":584,"t":"We recommend to use the oc CLI to easily delete an application. But in the case you cannot install oc on your computer you can delete the different objects created by the application (easy to find in the Topology page): Delete the Route Delete the Service Delete the Deployment Config","s":"From the web UI","u":"/docs/openshift-delete-services","h":"#from-the-web-ui","p":579},{"i":586,"t":"Guides Login to Docker registries","s":"Login to Docker registries","u":"/docs/login-docker-registry","h":"","p":585},{"i":588,"t":"Access You need to be connected to the UM network to access this container registry. This container registry is available at UM Container registry. Here you can login using your UM credentials by clicking on the \"Login via OIDC provider\" Public Projects You don't need to follow the steps below if you are using one of the Public projects. These are available without credentials.","s":"UM Container registry","u":"/docs/login-docker-registry","h":"#um-container-registry","p":585},{"i":590,"t":"Go to UM Container registry, click on your username in the top right corner followed by clicking on User Profile. Click on the Copy icon. Login with your credentials: docker login cr.icts.unimaas.nl(Username)(Copied in Step 1)","s":"Logging in with Docker CLI","u":"/docs/login-docker-registry","h":"#logging-in-with-docker-cli","p":585},{"i":592,"t":"Go to UM Container registry, look for a project of type Proxy Cache. For each of the mayor registries we created a Proxy Cache. Remember the project name, for example dockerhub. On the DSRI you can deploy an image like in this example: Docker CLI The same concept can be applied using the docker CLI docker pull cr.icts.unimaas.nl/dockerhub/ubuntu:22.04","s":"Using a Proxy Cache","u":"/docs/login-docker-registry","h":"#using-a-proxy-cache","p":585},{"i":594,"t":"Go to UM Container registry, click on + NEW PROJECT. Fill in the details of project name and Access Level (preferred method is to leave the checkbox unchecked). Click OK","s":"Creating your own project","u":"/docs/login-docker-registry","h":"#creating-your-own-project","p":585},{"i":596,"t":"Go to UM Container registry, click on your username in the top right corner followed by clicking on User Profile. Click on the Copy icon. Create a secret to login to UM Harbor Container Registry in your project: oc create secret docker-registry um-harbor-secret --docker-server=cr.icts.unimaas.nl --docker-username= --docker-password= Link the login secret to the default service account: oc secrets link default um-harbor-secret --for=pull","s":"Using your own user","u":"/docs/login-docker-registry","h":"#using-your-own-user","p":585},{"i":598,"t":"Go to UM Container registry, click on your project if you already created one. Click on the tab Robot Accounts Click on New Robot Account Create the Robot account to your liking Copy the secret or export it Create a secret to login to UM Harbor Container Registry in your project: oc create secret docker-registry um-harbor-secret --docker-server=cr.icts.unimaas.nl --docker-username= --docker-password= Link the login secret to the default service account: oc secrets link default um-harbor-secret --for=pull","s":"Using a robot account","u":"/docs/login-docker-registry","h":"#using-a-robot-account","p":585},{"i":600,"t":"Go to GitHub Settings, and create a Personal Access Token (PAT) which will be used as password to connect to the GitHub Container Registry Create a secret to login to GitHub Container Registry in your project: oc create secret docker-registry github-ghcr-secret --docker-server=ghcr.io --docker-username= --docker-password= --docker-email= Link the login secret to the default service account: oc secrets link default github-ghcr-secret --for=pull","s":"GitHub Container Registry","u":"/docs/login-docker-registry","h":"#github-container-registry","p":585},{"i":602,"t":"Increase DockerHub limitations Login with DockerHub also increase the DockerHub limitations to pull images in your project Create a secret to login to DockerHub in your project: oc create secret docker-registry dockerhub-secret --docker-server=docker.io --docker-username= --docker-password= --docker-email= Link the login secret to the default service account: oc secrets link default dockerhub-secret --for=pull","s":"DockerHub","u":"/docs/login-docker-registry","h":"#dockerhub","p":585},{"i":604,"t":"Get started Upload data","s":"Upload data","u":"/docs/openshift-load-data","h":"","p":603},{"i":606,"t":"If you are using JupyterLab or VSCode you should be able to load data to the container by simply drag and drop the files to upload in the JupyterLab/VSCode web UI. For RStudio, use the Upload file button in the RStudio web UI to upload files from your computer to the RStudio workspace. File too big If those solutions don't work due to the files size, try one of the solutions below.","s":"In RStudio, JupyterLab and VSCode","u":"/docs/openshift-load-data","h":"#in-rstudio-jupyterlab-and-vscode","p":603},{"i":608,"t":"The quickest way to upload large files or folders from a laptop or server to the DSRI is to use the oc command line interface. Install the client To install the oc client on your laptop/server, visit the Install the client page oc cp directly copy, and overwrite existing files, from a laptop or server to an Application pod on the DSRI. First get the using your application name: oc get pod --selector app=","s":"Copy large files with the terminal","u":"/docs/openshift-load-data","h":"#copy-large-files-with-the-terminal","p":603},{"i":610,"t":"Folders are uploaded recursively by default: oc cp : Use absolute path in the pod You need to provide the absolute (full) path where you want to copy it in the pod. Use your application workspace path, e.g. /home/jovyan for JupyterLab or /home/rstudio for RStudio) For example: oc cp my-folder jupyterlab-000:/home/jovyan You can also use this one-liner to automatically get the pod ID based on your app label: oc get pod --selector app= | xargs -I{} oc cp {}:","s":"Copy from local to pod","u":"/docs/openshift-load-data","h":"#copy-from-local-to-pod","p":603},{"i":612,"t":"Just do the inverse: oc cp : ","s":"Copy from pod to local","u":"/docs/openshift-load-data","h":"#copy-from-pod-to-local","p":603},{"i":614,"t":"You can download data from your SURFdrive to your pod by creating a public link to the file: Go to the file in SURFdrive you'd like to share Click share and the create public link Fill in a name for the public link (like DSRI). The name does not matter much, but it can help you keep track of the goal of the public link. Click copy to clipboard Visit link in browser and copy the direct URL displayed on that page. Use the direct URL you just copied to download the file using either wget or curl (e.g. \"wget https://surfdrive.surf.nl/files/index.php/s/5mFwyAKj4UexlJb/download\") Revoke link in the SURFdrive portal","s":"Download data from SURFdrive","u":"/docs/openshift-load-data","h":"#download-data-from-surfdrive","p":603},{"i":616,"t":"If you have a lot of large files and/or they are updated regularly, you can use rsync as it synchronizes the files if they already exist, preventing duplication and making synchronization faster. You can also see the progress with rsync which you cannot with cp. And if the upload is stopped for any reason rsync should pick it up from where it stopped (instead of restarting from scratch like oc cp does) caution Rsync does not work with symlinks (created with ln -s)","s":"Synchronizes files with oc rsync","u":"/docs/openshift-load-data","h":"#synchronizes-files-with-oc-rsync","p":603},{"i":618,"t":"oc rsync --progress : You can also use this one-liner to automatically get the pod ID based on your app label: oc get pod --selector app= | xargs -I{} oc rsync --progress {}:","s":"Sync local to pod","u":"/docs/openshift-load-data","h":"#sync-local-to-pod","p":603},{"i":620,"t":"Again, do the inverse: oc rsync --progress : ","s":"Sync pod to local","u":"/docs/openshift-load-data","h":"#sync-pod-to-local","p":603},{"i":622,"t":"You can use more options to improve the upload of large files: --compress compress file data during the transfer --delete delete files not present in source --watch Watch directory for changes and resync automatically","s":"More options","u":"/docs/openshift-load-data","h":"#more-options","p":603},{"i":625,"t":"Get started Prepare your project","s":"Prepare your project","u":"/docs/prepare-project-for-dsri","h":"","p":624},{"i":627,"t":"Using git is mandatory to deploy your code on the DSRI. Store your code in a git repository to keep track of changes, and make it easier to share and re-use your code outside of your computer. Platform recommendations We recommend those platforms depending on your use-case: GitHub for public repositories GitLab hosted at Maastricht University for private repositories Any other git platform, such as BitBucket or gitlab.com, is fine too.","s":"Code in a git repository","u":"/docs/prepare-project-for-dsri","h":"#code-in-a-git-repository","p":624},{"i":629,"t":"If your project is using a large amount of data that cannot be pushed to a git repository, you will need to use a persistent storage to store your data on the DSRI. See the Storage on the DSRI documentation for more details about creating a persistent storage. Here are the options to upload your data to the DSRI storage:","s":"Get your data ready","u":"/docs/prepare-project-for-dsri","h":"#get-your-data-ready","p":624},{"i":631,"t":"If the data is stored on a local machine, such as your computer: Drag and drop files from your computer to the VisualStudio Code or JupyterLab web UI, if applicable. Otherwise, use the oc cp command to copy data to your application pod. See the Load data documentation page for more information. Upload to persistent storage Make sure you upload the data to a folder mounted on a persistent storage in the pod to avoid losing your data if the pod restarts.","s":"Data is on your local machine","u":"/docs/prepare-project-for-dsri","h":"#data-is-on-your-local-machine","p":624},{"i":633,"t":"Same as for your laptop, you will need to install and use the oc cp command to copy data to your application pod. See the Load data documentation page for more information.","s":"Data is on a server","u":"/docs/prepare-project-for-dsri","h":"#data-is-on-a-server","p":624},{"i":635,"t":"In certain cases, UM servers are not accessible by default from the DSRI. This is even the case for servers that are normally publicly accessible. To be able to access these UM servers from the DSRI, we need to put in the request to open the connection. Please let us know either the servername and port you like to access, or the URL (e.g. um-vm0057.unimaas.nl on port 443 or https://gitlab.maastrichtuniversity.nl). You can reach out to us either by mail or by Slack. The procedure is described in the diagram below:","s":"Request access to internal UM servers","u":"/docs/prepare-project-for-dsri","h":"#request-access-to-internal-um-servers","p":624},{"i":637,"t":"Deploy applications Install from Operators","s":"Install from Operators","u":"/docs/operators","h":"","p":636},{"i":639,"t":"Contact us Contact us on the DSRI Slack #helpdesk channel, if you want to install a new Operator on the DSRI.","s":"Install existing Operators","u":"/docs/operators","h":"#install-existing-operators","p":636},{"i":641,"t":"Install the operator-sdk tool. See the official documentation. Operators can be built using 3 different approaches: Helm: a framework to define the deployment logic based on regular kubernetes YAML, but less capabilities for complete auto-update and insights. Ansible: define the deployment logic with Ansible, provide maximum capabilities. Golang: define the deployment logic in Golang, provide maximum capabilities, but require more code.","s":"Build Operators","u":"/docs/operators","h":"#build-operators","p":636},{"i":643,"t":"Documentation: Official docs to build Operators Official docs to build Operator from Helm charts: https://sdk.operatorframework.io/docs/building-operators/helm/tutorial Official docs to build Operator with Ansible: https://sdk.operatorframework.io/docs/building-operators/ansible/quickstart RedHat Certified Operator guide Make an operator use anyuid: https://redhat-connect.gitbook.io/certified-operator-guide/what-if-ive-already-published-a-community-operator/applying-security-context-constraints Submit community Operators: https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/submitting-a-community-operator-to-operatorhub.io Examples: Deployment example: https://github.com/microcks/microcks-ansible-operator/blob/master/roles/microcks/tasks/main.yml Older OpenShift guide: https://docs.openshift.com/container-platform/4.1/applications/operator_sdk/osdk-ansible.html Simple older example with route: https://github.com/djzager/ansible-role-hello-world-k8s","s":"External resources","u":"/docs/operators","h":"#external-resources","p":636},{"i":645,"t":"Guides Data storage","s":"Data storage","u":"/docs/openshift-storage","h":"","p":644},{"i":647,"t":"Switch to the Administrator view Go to the Project panel Select your project Expand the Storage panel then go to the Persistent Volume Claim panel Click the button call Create Persistent Volume Claim then you will redirect the wizard of Create Persistent Volume Claim Provide the unique Persistent Volume Claim Name start with pvc- example: pvc-filebrowser Select the Access Mode RWXand Storage Size Access Mode CLI abbreviation Description ReadWriteOnce RWO The volume can be mounted as read-write by a single node. ReadOnlyMany ROX The volume can be mounted as read-only by many nodes. ReadWriteMany RWX The volume can be mounted as read-write by many nodes. Click Create info The DSRI using the Openshift Container Stroage ( OCS) which is based on CEPH offers ReadWriteOnce access mode. ReadWriteOnce (RWO) volumes cannot be mounted on multiple nodes. Use the ReadWriteMany (RWX) access mode when possible. If a node fails, the system does not allow the attached RWO volume to be mounted on a new node because it is already assigned to the failed node. If you encounter a multi-attach error message as a result, force delete the pod on a shut down or crashed node. Static persistent volumes provides a sustainable persistent storage over time for applications that need to run regular Docker images (which usually use the root user). info Some Applications such as Jupyter template automatically creates a persistent storage","s":"Create the Persistent Storage","u":"/docs/openshift-storage","h":"#create-the-persistent-storage","p":644},{"i":649,"t":"On the Topology page select your application, Click Action on your application Select the Add Storage option from the dropdown list. Select the Use Existing Claim option from the Add Storage wizard and Select the Claim Add the Mount Path Save info You can try above method if you want to connect more applications to the same storage","s":"Connect the Existing Persistent Storage","u":"/docs/openshift-storage","h":"#connect-the-existing-persistent-storage","p":644},{"i":651,"t":"Switch to the Administrator view Go to the Project panel Select your project Expand the Storage panel then go to the Persistent Volume Claim panel Click on the three dots (⋮) next to the Persistent Volume Claim you want to expand. Click on Expand PVC in the menu. Enter the size you want to expand your PVC with. Hit Expand. It can take upto 2 minutes before your PVC is expanded.","s":"Expand existing Persistent Storage","u":"/docs/openshift-storage","h":"#expand-existing-persistent-storage","p":644},{"i":653,"t":"Dynamic persistent volumes can be created automatically by an application template. Dynamic storage can also be created manually, go to Storage on the left sidebar in a project: Click Create Storage top right of the Storage page. Storage class: ceph-fs Access Mode: Single User (RWO): only the user who created this volume can read/write to this volume. Shared Access (RWX): all users with access to the projects can read/write this volume. Read Only (ROX): all users with access to the projects can read this volume.","s":"Use the dynamic storage","u":"/docs/openshift-storage","h":"#use-the-dynamic-storage","p":644},{"i":655,"t":"Disabled We currently disabled this solution by default, as it was confusing for users and would lead to data loss. When creating a pod, OpenShift will by default use ephemeral storage. It creates a volumes bind to the pod. So the volume will be deleted. It is recommended to use dynamic provisioning for a more sustainable storage solution. But ephemeral storage can be sufficient for testing.","s":"Use the ephemeral storage","u":"/docs/openshift-storage","h":"#use-the-ephemeral-storage","p":644},{"i":657,"t":"Guides Delete objects (advanced)","s":"Delete objects (advanced)","u":"/docs/openshift-delete-objects","h":"","p":656},{"i":659,"t":"The best way to make sure all objects related to your application have been deleted is to use the command line providing your application name. Different selectors can be used to easily delete all objects generated by an application deployment. 2 selectors can easily be found in the template configuration: app : the name you gave when creating your application template : the name of the template you used to create the application. Use it only if you want to delete all applications created by a specific template. oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=my-application Delete storage if necessary from the OpenShift web UI. Force deletion You can force the deletion if the objects are not deleting properly: oc delete all,secret,configmaps,serviceaccount,rolebinding --force --grace-period=0 --selector app=my-application","s":"Delete an application","u":"/docs/openshift-delete-objects","h":"#delete-an-application","p":656},{"i":661,"t":"Get the ID of the specific pod you want to delete: oc get pod Use the pod ID retrieved to delete the pod: oc delete pod Force deletion If the pod is not properly deleted, you can force its deletion: oc delete pod --force --grace-period=0 ","s":"Delete pod","u":"/docs/openshift-delete-objects","h":"#delete-pod","p":656},{"i":663,"t":"Be careful All objects and persistent storages in this project will be deleted and cannot be retrieved. To properly delete a project you need to first delete all objects in this project: oc delete all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinstance --all -n Then delete the project: oc delete project ","s":"Delete a project","u":"/docs/openshift-delete-objects","h":"#delete-a-project","p":656},{"i":665,"t":"Be careful All data stored in this persistent storage will be lost and cannot be retrieved. oc delete pvc storage-name","s":"Delete persistent storage","u":"/docs/openshift-delete-objects","h":"#delete-persistent-storage","p":656},{"i":668,"t":"If a provisioned service is stuck on Marked for deletion you might need to set finalizers to null in the YAML. This can be done using the OpenShift web UI: Go to the Provisionned Service in the OpenShift UI overview Click on Edit YAML Remove the finalizers: finalizers: - kubernetes-incubator/service-catalog You can also do it using the oc CLI: oc get serviceinstance # Delete problematic line from serviceinstance to delete themoc get serviceinstance -o yaml | grep Terminating | sed \"/kubernetes-incubator/d\"| oc apply -f - No global catalog The OpenShift Catalog does not handle deploying templates globally properly (on all projects). If a template is deployed globally, OpenShift will try to create unnecessary objects such as provisioned service (aka. ServiceInstance), or ClusterClasses. Those services are not used, and some of them cannot be deleted easily. Catalog per project At the moment it is more reliable to create the template in directly in your project if you need to use it multiple time.","s":"Stuck provisioned service","u":"/docs/openshift-delete-objects","h":"#stuck-provisioned-service","p":656},{"i":670,"t":"Project can get stuck as marked for deletion. Usually due to Objects still present in the project that are not terminated or finalizers left in the some objects YAML file. The following commands will allow you to clean up all the projects stuck in terminating state you have access to . Force deletion of terminating projects: for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc delete project --force --grace-period=0 $i ; done Delete all objects in terminating projects: for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc delete all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinstance --force --grace-period=0 --all -n $i ; done Remove Kubernetes finalizers from terminating projects: for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc get project $i -o yaml | sed \"/kubernetes/d\" | sed \"/finalizers:/d\" | oc apply -f - ; done Fix deletion If ServiceInstances refuses to get deleted, try to remove kubernetes finalizers: for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc get serviceinstance -n $i -o yaml | sed \"/kubernetes-incubator/d\"| oc apply -f - ; done Check deletion Check if there are still objects in a project: oc get all,configmap,pvc,serviceaccount,secret,rolebinding,serviceinstance","s":"Delete stuck project","u":"/docs/openshift-delete-objects","h":"#delete-stuck-project","p":656},{"i":672,"t":"Guides Create a new Project","s":"Create a new Project","u":"/docs/project-management","h":"","p":671},{"i":674,"t":"Avoid creating multiple projects Try to avoid to create multiple projects for nothing please. Be responsible and delete applications you are not using anymore in your project to free resources, instead of creating a new project with a different number at the end. It is also easier to connect your different applications containers and storages when you create them in the same project. You can create a project using the Developer perspective, as follows: Click the Project drop-down menu to see a list of all available projects. Select Create Project. In the Create Project dialog box, enter a unique name in the Name field. Use a short and meaningful name for your project as the project identifier is unique across all projects, such as workspace-yourname or ml-covid-pathways Add the Display Name DSR Workshopand Description DSRI Community Workshop Projectsdetails for the project. Click Create. Use the left navigation panel to navigate to the Project view and see the dashboard for your project. Optional: Use the Project drop-down menu at the top of the screen and select all projects to list all of the projects in your cluster. Use the Details tab to see the project details. If you have adequate permissions for a project, you can use the Project Access tab to provide or revoke administrator, edit, and view privileges for the project.","s":"Create a project using the web UI","u":"/docs/project-management","h":"#create-a-project-using-the-web-ui","p":671},{"i":676,"t":"You need to be logged in to the DSRI and copy the login command. Run oc new-project --description=\"\" --display-name=\"\" Example oc new-project dsri-workshop --description=\"DSRI Workshop\" \\ --display-name=\"DSRI Community Workshop Projects\" Reuse your project Only create new projects when it is necessary (for a new project). You can easily clean up your current project instead of creating a new one every time you want to try something.","s":"Create a project using the CLI","u":"/docs/project-management","h":"#create-a-project-using-the-cli","p":671},{"i":678,"t":"You can use the Project view in the Developer perspective to grant or revoke access permissions to your project. To add users to your project and provide Admin, Edit, or View access to them: In the Developer perspective, navigate to the Project view. In the Project page, select the Project Access tab. Click Add Access to add a new row of permissions to the default ones. Enter the user name, click the Select a role drop-down list, and select an appropriate role. Click Save to add the new permissions. You can also use: The Select a role drop-down list, to modify the access permissions of an existing user. The Remove Access icon, to completely remove the access permissions of an existing user to the project. info Advanced role-based access control is managed in the Roles and Roles Binding views in the Administrator perspective","s":"Access permissions for developers to your project","u":"/docs/project-management","h":"#access-permissions-for-developers-to-your-project","p":671},{"i":680,"t":"Navigate to Home → Projects. Locate the project that you want to delete from the list of projects. On the far right side of the project listing, select Delete Project from the Options menu . When the Delete Project pane opens, enter the name of the project that you want to delete in the field. Click Delete.","s":"Delete a project using the web UI","u":"/docs/project-management","h":"#delete-a-project-using-the-web-ui","p":671},{"i":682,"t":"Delete Project When you delete a project, the server updates the project status to Terminating from Active. Then, the server clears all content from a project that is in the Terminating state before finally removing the project. While a project is in Terminating status, you cannot add new content to the project. Projects can be deleted from the CLI or the web console. You need to be logged in to the DSRI and copy the login command. Run oc delete project Example oc delete project dsri-workshop","s":"Delete a project using the CLI","u":"/docs/project-management","h":"#delete-a-project-using-the-cli","p":671},{"i":684,"t":"Miscellaneous PyTorch Profiling","s":"PyTorch Profiling","u":"/docs/profile-pytorch-code","h":"","p":683},{"i":686,"t":"According to wikipedia: \"Profiling is a form of dynamic program analysis that measures, for example, the space (memory) or time complexity of a program, the usage of particular instructions, or the frequency and duration of function calls. Most commonly, profiling information serves to aid program optimization, and more specifically, performance engineering.\"","s":"What is profiling?","u":"/docs/profile-pytorch-code","h":"#what-is-profiling","p":683},{"i":688,"t":"You may know that training large models like GPT-3 takes several million dollars source and a few hundred MWh source. If the engineers that trained these models did not spend time on optimization, it might have been several million dollars and hunderds of MWh more. Sure, the model you'd like to train is probably not quite as big. But maybe you want to train it 10000 times, because you want to do hyperparameter optimization. And even if you only train it once, it may take quite a bit of compute resources, i.e. money and energy.","s":"Why should I care about profiling?","u":"/docs/profile-pytorch-code","h":"#why-should-i-care-about-profiling","p":683},{"i":690,"t":"Well, you should always care if your code runs efficiently, but there's different levels of caring. From personal experience: if I know I'm going to run a code only once, for a few days, on a single GPU, I'll probably not create a full profile. What I would do is inspect my GPU and CPU utilization during my runs, just to see if it is somewhat efficient, and if I didn't make any obvious mistakes (e.g. accidentally not using the GPU, even if I have one available). If I know that I'll run my code on multiple GPUs, for multiple days, (potentially) on multiple nodes, and/or I need to run it multiple times, I know that my resource footprint is going to be large, and it's worth spending some time and effort to optimize the code. That's when I'll create a profile. The good part is: the more often you do it, the quicker and more adapt you become at it.","s":"When should I care about profiling?","u":"/docs/profile-pytorch-code","h":"#when-should-i-care-about-profiling","p":683},{"i":692,"t":"We can assist you with analyzing the bottleneck/s in your deep learning pipeline and recommend the improvments to speed up your pipeline.","s":"How DSRI team can help you?","u":"/docs/profile-pytorch-code","h":"#how-dsri-team-can-help-you","p":683},{"i":694,"t":"This documentation is taken from the Surf's PyTorch profiling wiki (https://servicedesk.surf.nl/wiki/display/WIKI/PyTorch+Profiling) Tutorial on PyTorch profiling can be found here: (https://github.com/sara-nl/PraceHPML2022/blob/master/notebooks/PyTorch_profiling/PyTorch_profiling.ipynb)","s":"External Resources and references","u":"/docs/profile-pytorch-code","h":"#external-resources-and-references","p":683},{"i":696,"t":"Miscellaneous Tensorflow Optimization","s":"Tensorflow Optimization","u":"/docs/speeding-tensorflow-dl","h":"","p":695},{"i":698,"t":"The amount of resources that you have is not nearly as important as using them to their maximum potential. It’s all about doing more with less.In this write up, we discuss optimizations related to data preparation, data reading, data augmentation,training, and inference.","s":"🔶 Speeding up Tensorflow based deep learning pipelines","u":"/docs/speeding-tensorflow-dl","h":"#-speeding-up-tensorflow-based-deep-learning-pipelines","p":695},{"i":700,"t":"Let’s look at each area of the deep learning pipeline step by step, including data preparation, data reading, data augmentation, training, and, finally, inference.","s":"A possible checklist for speeding up your deep learning pipeline in Tensorflow?","u":"/docs/speeding-tensorflow-dl","h":"#a-possible-checklist-for-speeding-up-your-deep-learning-pipeline-in-tensorflow","p":695},{"i":702,"t":"1) Store as TFRecords 2) Reduce Size of Input Data 3) Use TensorFlow Datasets","s":"Data Preparation","u":"/docs/speeding-tensorflow-dl","h":"#data-preparation","p":695},{"i":704,"t":"1) Use tf.data 2) Prefetch Data 3) Parallelize CPU Processing 4) Parallelize I/O and Processing 5) Enable Nondeterministic Ordering 6) Cache Data 7) Turn on Experimental Optimizations 8) Autotune Parameter Values","s":"Data Reading","u":"/docs/speeding-tensorflow-dl","h":"#data-reading","p":695},{"i":706,"t":"1) Use GPU for Augmentation","s":"Data Augmentation","u":"/docs/speeding-tensorflow-dl","h":"#data-augmentation","p":695},{"i":708,"t":"1) Use Automatic Mixed Precision 2) Use Larger Batch Size 3) Use Multiples of Eight 4) Find the Optimal Learning Rate 5) Use tf.function 6) Overtrain, and Then Generalize 6a) Use progressive sampling 6b) Use progressive augmentation 6c) Use progressive resizing” 7) Install an Optimized Stack for the Hardware 8) Optimize the Number of Parallel CPU Threads 9) Use Better Hardware 10) Distribute Training 11) Examine Industry Benchmarks","s":"Training","u":"/docs/speeding-tensorflow-dl","h":"#training","p":695},{"i":710,"t":"1) Use an Efficient Model 2) Quantize the Model 3) Prune the Model 4) Use Fused Operations 5) Enable GPU Persistence","s":"Inference","u":"/docs/speeding-tensorflow-dl","h":"#inference","p":695},{"i":712,"t":"We can assist you with analyzing the bottleneck/s in your deep learning pipeline and recommend the improvments to speed up your pipeline.","s":"How DSRI team can help you?","u":"/docs/speeding-tensorflow-dl","h":"#how-dsri-team-can-help-you","p":695},{"i":714,"t":"This documentation is adopted from the \"Practical Deep Learning for Cloud, Mobile, and Edge by Koul etl (publish by O’Reilly)","s":"External Resources and references","u":"/docs/speeding-tensorflow-dl","h":"#external-resources-and-references","p":695},{"i":716,"t":"On this page","s":"Working with sensible data","u":"/docs/sensible-data","h":"","p":715},{"i":718,"t":"Since DSRI can only be accessed when on the physical UM network or using the UM VPN, deployed services will not be available on the public Internet 🔒 All activities must be legal in basis. You must closely examine and abide by the terms and conditions of any data, software, or web service that you use as part of your work 📜","s":"Reminder: DSRI restrictions","u":"/docs/sensible-data","h":"#reminder-dsri-restrictions","p":715},{"i":720,"t":"The DSRI administration disclaims all responsibility in the misuse of sensible data processed on the DSRI We can guarantee you that only you, and 4 administrators are able to access the data (you might need to see with the data owner if that is not a problem) Feel to ask us more details","s":"Disclaimer","u":"/docs/sensible-data","h":"#disclaimer","p":715},{"i":722,"t":"Miscellaneous SURF Offerings","s":"SURF Offerings","u":"/docs/surf-offerings","h":"","p":721},{"i":725,"t":"SURF is the ICT cooperative for Dutch education and research institutions. As a collaborative organization, SURF’s members—its owners—work together to deliver top-tier digital services, address complex innovation challenges, and exchange valuable knowledge. Computing and storage infrastructure are essential for cutting-edge research. SURF supports researchers with a diverse range of computing and storage services. But before diving into these services, let’s briefly explore what a cluster computer is.","s":"What is SURF?","u":"/docs/surf-offerings","h":"#what-is-surf","p":721},{"i":727,"t":"A cluster computer is essentially a group of interconnected computers, called nodes, working together as a unified system. Each node has its own CPU, memory, and disk space, along with access to a shared file system. Imagine these nodes connected by network cables, like those in your home or office. Cluster computers are designed for high-performance workloads, allowing users to run hundreds of computational tasks simultaneously.","s":"What is a cluster computer?","u":"/docs/surf-offerings","h":"#what-is-a-cluster-computer","p":721},{"i":729,"t":"Some of the computing and storage solution provided by SURF are: 1) Spider Cluster - High-performance Data Processing (DP) platform: Spider is a versatile DP platform aimed at processing large structured data sets. Spider is an in house compute cluster built on top of SURF’s in-house elastic Cloud. This allows for scalable processing of many terabytes or even petabytes of data, utilizing many hundreds of cores simultaneously, in exceedingly short timespans. Superb network throughput ensures connectivity to external data storage systems. Spider is used for large scale multi-year data intensive projects, for users to actively process their data, such are large static data sets or continuously growing data sets. Examples include genomics data, astronomic telescope data, physics detector data and satellite earth observations. 2) Snellius Cluster - the Dutch National supercomputer: Snellius is the Dutch National supercomputer hosted at SURF. The system facilitates scientific research carried out in many Universities, independent research institutes, governmental organizations, and private companies in the Netherlands. Snellius is a cluster of heterogeneous nodes built by Lenovo, containing predominantly AMD technology, with capabilities for high performance computing (parallel, symmetric multiprocessing). The system also has several system-specific storage resources, that are geared towards supporting the various types of computing. 3) SURF Research Cloud (SRC): SURF Research Cloud is a service to facilitate scientists’ collaborative work. The central idea in SRC is collaborative workspace. A workspcae translates directly to a \"Virtual Machine\". These hosted workspaces aka virtual machines can be used for conducting research and development individually or together with your team/project members. 4) Research Data Storage Services: 4.1) Data Archive : The SURF Data Archive allows users to safely archive up to petabytes of valuable research data to ensure the long term accessibility and reproducibility of their work. The Data Archive is also connected to SURF’s compute infrastructure, via a fast network connection, allowing for the seamless depositing and retrieval of data. 4.2) Data Repository : The Data Repository service is a web-based data publication and archiving platform that allows researchers to store, annotate and publish research data to ensure long-term preservation and availability of their datasets. All published datasets get their own DOI and Handle, while every file gets its own independent Handle to allow persistent reference on all levels. 4.3) dCache : dCache is scalable storage system. It contains more than 50 petabytes of scientific data, accessible through several authentication methods and protocols. It consists of magnetic tape storage and hard disk storage and both are addressed by a common file system. 4.4) Object Store : Object storage is ideal for storing unstructured data that can grow without bound. Object storage does not have a directory-type structure like a normal file system has but it organises its data in so-called containers that contain objects. There is no tree-like structure with files and directories. There are only containers with objects in them. SURF Object Store service is based on Ceph RGW and provides access using the S3 protocol, which is the defacto standard for addressing object storage.","s":"Different types of Services provided by SURF:","u":"/docs/surf-offerings","h":"#different-types-of-services-provided-by-surf","p":721},{"i":731,"t":"The DSRI team is here to help you navigate SURF’s services, including: 1) Grant Applications: We assist researchers in applying for SURF grants. For instance: * Small applications: Up to 1 million System Billing Units (SBU) on Snellius and/or 100 TB of dCache storage.(https://www.surf.nl/en/small-compute-applications-nwo)* Large applications: Customized resource allocations based on project needs. 2) Resource Estimation: Unsure about your computing and storage requirements? We help estimate your needs in terms of SURF’s billing units. 3) Use Case Analysis: We assess whether your research project is a good fit for SURF’s services.","s":"How to Get Started with SURF Services?","u":"/docs/surf-offerings","h":"#how-to-get-started-with-surf-services","p":721},{"i":733,"t":"SURF: https://www.surf.nl/en Deep Learning Tutorials by UvA: https://uvadlc-notebooks.readthedocs.io/en/latest/index.html","s":"External Resources and references","u":"/docs/surf-offerings","h":"#external-resources-and-references","p":721},{"i":735,"t":"Get started Start your workspace","s":"Start your workspace","u":"/docs/start-workspace","h":"","p":734},{"i":737,"t":"Anything running in DSRI needs to be running in a docker container. Docker containers are namespaces that share the kernel on a linux system, you can see them as a clean minimalist Linux computers with only what you need to run your programs installed. This allows you to completely control the environment where your code runs, and avoid conflicts. When running experiments we can start from existing images that have been already published for popular data science applications with a web interface. You can use, for example, JupyterLab when running python, RStudio when running R, or VisualStudio Code if you prefer. Once you access a running container, you can install anything you need like if it was a linux/ubuntu computer (most of them runs with admin privileges), and run anything via the notebook/RStudio/VSCode interface, or the terminal.","s":"Introduction to containers","u":"/docs/start-workspace","h":"#introduction-to-containers","p":734},{"i":739,"t":"First step to get your code running on the DSRI is to pick the base interface you want to use to access your workspace on the DSRI. We prepared generic Docker images for data science workspaces with your favorite web UI pre-installed to easily deploy your workspace. So you just need to choose your favorite workspace, start the container, access it, add your code, and install your dependencies. Login to the DSRI dashboard Select your project, or create one with a meaningful short name representing your project, e.g. workspace-yourname Go to the +Add page, and select to add From Developer Catalog => All services Search for templates corresponding to the application you want to deploy among the one described below (make sure the filter for templates is properly checked). JupyterLab: Access and run your code using the popular Jupyter notebooks, with kernel for python, java, R, julia. It also provides a good web interface to access the terminal, upload and browse the files. VisualStudio Code: Your daily IDE, but in your browser, running on the DSRI. RStudio: R users favorite's. The terminal: For people who are used to the terminal and just want to run scripts, it provides smaller and more stable images, which makes installation and deployment easier. You can use the Ubuntu template to start a basic ubuntu image and access it from the terminal. Any web interface: You can easily run and access most programs with a web interface on the DSRI. You can use the template Custom workspace if your application is exposed on port 8888. Otherwise visit the page Anatomy of a DSRI application for more details. Desktop interface: there is the possibility to start a container as a Linux operating system with a graphical desktop interface. It can be useful to deploy software like Matlab, but the setup can be a bit more complex. You will get an Ubuntu computer with a basic Desktop interface, running on the DSRI, that you can access directly in your web browser. The desktop interface is accessed through a web application by using noVNC, which exposes the VNC connection without needing a VNC client. More applications You can also find more documentation on the different applications that can be deployed from the DSRI under Deploy applications in the menu on the left.","s":"Choose your interface","u":"/docs/start-workspace","h":"#choose-your-interface","p":734},{"i":740,"t":"Once you chose your favorite way to run your experiments, you can click on the application you want to use for your workspace. Checkout the description to learn more details about the application that will be deployed. Then click on Instantiate Template, and fill the parameters, such as the password to access the web UI. Note that the application name needs to be unique in the project. Finally click on the Create button. You should see your application in your project dashboard, it can take a few seconds to a few minutes to pull the docker image and start the application. Once the application has started you will be able to access it by clicking on its circle, then click the Route, that has been automatically generated for the web interface, in the Resources tab. Check the workshop For a more detailed tutorial, you can follow the workshop to start Data Science applications on the DSRI","s":"Start your workspace","u":"/docs/start-workspace","h":"#start-your-workspace","p":734},{"i":742,"t":"We recommend you to use git to clone your project code in your workspace, as it helps sharing and managing the evolution of your project. It will be preinstalled in most images, otherwise you can install it easily with apt-get install git With web interface like JupyterLab, VisualStudio Code and Rstudio you can easily upload small and medium size files directly through the UI with a drag and drop. Otherwise you can use the terminal, install the oc client, and use the oc cp or oc rsync commands to upload large files to your workspace on the DSRI. See the Upload data page for more details.","s":"Upload your code and data","u":"/docs/start-workspace","h":"#upload-your-code-and-data","p":734},{"i":744,"t":"Once the workspace is started, you can install the different dependencies you need to run your experiments. It is recommended to save all the commands you used to install the different requirements in a script (e.g. install.sh). This will insure you can reinstall the environment easily and faithfully if the container is restarted. You can also use them to create a Docker image with everything prepared for your application. Most containers for science are based on debian/ubuntu, so you can install new packages with apt-get: apt-get updateapt-get install -y build-essentials wget curl","s":"Install your dependencies","u":"/docs/start-workspace","h":"#install-your-dependencies","p":734},{"i":746,"t":"You can use your web interface to run your code as you like to do: notebooks, rstudio, execute via VSCode Note that for jobs which are running for a long time the web UI is not always the best solution, e.g. Jupyter notebooks can be quite instable when running a 30 min codeblock. A quick solution for that is to run your code in scripts, using the bash terminal. You can use the nohup prefix, and & suffix to run your script in the background, so that you can even disconnect, and come back later to check the results and logs. For example with a python script, you would do: nohup python my_script.py & The script will run in the background, and all terminal output will be stored in the file nohup.out You can also check if the process is currently running by typing ps aux or top You can kill the process by getting the process ID (PID) using the previous commands, and then: kill -9 PID","s":"Run your code","u":"/docs/start-workspace","h":"#run-your-code","p":734},{"i":748,"t":"When you are not using your application anymore you can stop the pod. If you are using a Dynamic or Persistent storage you can restart the pod and continue working with all your data in the same state as you left it. Do not waste resources Please think of stopping applications you are not using to avoid consuming unnecessary resources. On the Topology page click on the down arrow ⬇️ next to the number of pods deployed. You can then restart the pod by clicking the up arrow ⬆️ Note that starting more than 1 pod will not increase the amount of resources you have access to, most of the time it will only waste resources and might ends up in weird behavior on your side. The web UI will randomly assign you to 1 of the pod started when you access it. This only works for clusters with multiple workers, such as Apache Flink and Spark. Or if you connect directly to each pod with the terminal to run different processes.","s":"Stop your application","u":"/docs/start-workspace","h":"#stop-your-application","p":734},{"i":750,"t":"When you try to access your workspace and you encounter the page below, usually this indicates that your pod is not running. For example, this will be the case if you stopped your pod, or if there was maintenance on the cluster. To start the pod, go to the Topology page, and click on the up arrow ⬆️ next to the number of pods deployed. Make sure you scale it to 1. Scaling it to more than 1 will not increase the amount of resources you have access to, most of the time it will only waste resources and causes weird behavior on your side. Do not waste resources Please only scale up resources you're using, and scale down when you're not using them anymore. Consuming resources consumes unnecessary power and might prevent other users from using the DSRI.","s":"Start your application","u":"/docs/start-workspace","h":"#start-your-application","p":734},{"i":752,"t":"Once you have tested your workspace and you know how to set it up it can be helpful to define a Dockerfile to build and publish a Docker image with everything directly installed (instead of installing your requirements after starting a generic workspace) Start from an existing generic Docker image, depending on the base technologies you need, such as Debian, Ubuntu, Python, JupyterLab, VisualStudio Code, RStudio... Add your source code in the Docker image using ADD . . or COPY . . Install dependencies (e.g. RUN apt-get install gfortran) Define which command to run when starting the container (e.g. ENTRYPOINT[\"jupyter\", \"lab\"]) Here is a simple example Dockerfile for a python application: # The base image to start from, choose the one with everything you need installedFROM python:3.8# Change the user and working directory to make sure we are using rootUSER rootWORKDIR /root# Install additional packagesRUN apt-get update && \\ apt-get install build-essentials# This line will copy all files and folder that are in the same folder as the Dockerfile (usually the code you want to run in the container)ADD . . # This line will install all the python packages described in the requirements.txt of your source codeRUN pip install -r requirements.txt && \\ pip install notebook jupyterlab# Command to run when the container is started, here it starts JupyterLab as a serviceENTRYPOINT [ \"jupyter\", \"lab\" ] Here are some examples of Dockerfile for various type of web applications: Custom JupyterLab based on the official jupyter/docker-stacks Custom RStudio VisualStudio Code server Python web app See the guide to Publish a Docker image for more details on this topic.","s":"Optional: define a docker image","u":"/docs/start-workspace","h":"#optional-define-a-docker-image","p":734},{"i":754,"t":"Guides Workflows Deploy Airflow","s":"Deploy Airflow","u":"/docs/workflows-airflow","h":"","p":753},{"i":756,"t":"You will need to have Helm installed on your computer to deploy a Helm chart, see the Helm docs for more details. Install the Helm chart to be able to deploy Airflow on the DSRI: helm repo add apache-airflow https://airflow.apache.orghelm repo update","s":"Install the chart","u":"/docs/workflows-airflow","h":"#install-the-chart","p":753},{"i":757,"t":"You can quickly deploy Airflow on the DSRI, with DAGs automatically synchronized with your Git repository. We use a values.yml file with all default parameters pre-defined for the DSRI, so you just need to edit the password and git repository configuration in this command, and run it: helm install airflow apache-airflow/airflow \\ -f https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/airflow/values.yml \\ --set webserver.defaultUser.password=yourpassword \\ --set dags.gitSync.repo=https://github.com/bio2kg/bio2kg-etl.git \\ --set dags.gitSync.branch=main \\ --set dags.gitSync.subPath=workflows/dags info If you need to do more configuration you can download the a values.yml file, edit it directly to your settings and use this file locally with -f values.yml A few seconds after Airflow started to install, you will need to fix the postgresql deployment in a different terminal window (unfortunately setting the serviceAccount.name of the sub chart postgresql don't work, even if it should be possible according to the official helm docs). Run this command to fix postgresql: oc patch statefulset/airflow-postgresql --patch '{\"spec\":{\"template\":{\"spec\": {\"serviceAccountName\": \"anyuid\"}}}}' Once Airflow finished to deploy, you can access its web interface temporarily by forwarding the webserver on your machine at http://localhost:8080 oc port-forward svc/airflow-webserver 8080:8080 Or permanently expose the interface on a URL accessible when logged to the UM VPN, with HTTPS enabled: oc expose svc/airflow-webserveroc patch route/airflow-webserver --patch '{\"spec\":{\"tls\": {\"termination\": \"edge\", \"insecureEdgeTerminationPolicy\": \"Redirect\"}}}' Finally, get the route to the Airflow web interface, or access it via the DSRI web UI: oc get routes","s":"Deploy Airflow","u":"/docs/workflows-airflow","h":"#deploy-airflow","p":753},{"i":759,"t":"You can find example DAGs for bash operator, python operator and Kubernetes pod operator here. Here an example of a DAG using the Kubernetes pod operator to run tasks as pods, you will need to change the namespace parameter to your DSRI project where Airflow is deployed: from airflow import DAGfrom datetime import datetime, timedeltafrom airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperatorfrom airflow.operators.dummy_operator import DummyOperatordefault_args = { 'owner': 'airflow', 'depends_on_past': False, 'start_date': datetime.utcnow(), 'email': ['airflow@example.com'], 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(minutes=5)}dag = DAG( 'kubernetes_pod_operator', default_args=default_args, schedule_interval=None # schedule_interval=timedelta(minutes=10))start = DummyOperator(task_id='run_this_first', dag=dag)passing = KubernetesPodOperator( namespace='CHANGEME', image=\"python:3.6\", cmds=[\"python\",\"-c\"], arguments=[\"print('hello world')\"], labels={\"app\": \"airflow\"}, name=\"passing-test\", task_id=\"passing-task\", get_logs=True, dag=dag)passing.set_upstream(start)","s":"Example workflows","u":"/docs/workflows-airflow","h":"#example-workflows","p":753},{"i":761,"t":"helm uninstall airflow","s":"Delete the chart","u":"/docs/workflows-airflow","h":"#delete-the-chart","p":753},{"i":763,"t":"Here are a few links for more details on the official Airflow Helm chart: Helm chart docs Helm chart source code Helm chart parameters Other ways to deploy Airflow on OpenShift: Community Helm chart GitHub repo Airflow template for OpenShift","s":"See also","u":"/docs/workflows-airflow","h":"#see-also","p":753},{"i":765,"t":"Guides Workflows Run Argo workflows","s":"Run Argo workflows","u":"/docs/workflows-argo","h":"","p":764},{"i":767,"t":"Argo 🦑 is a container native workflow engine for Kubernetes supporting both DAG and step based workflows. Download and install the Argo client on your computer to start workflows on the DSRI.","s":"Install the argo client","u":"/docs/workflows-argo","h":"#install-the-argo-client","p":764},{"i":769,"t":"sudo curl -L -o /usr/local/bin/argo https://github.com/argoproj/argo/releases/download/v2.4.2/argo-linux-amd64sudo chmod +x /usr/local/bin/argo","s":"On Ubuntu","u":"/docs/workflows-argo","h":"#on-ubuntu","p":764},{"i":771,"t":"brew install argoproj/tap/argo","s":"On MacOS","u":"/docs/workflows-argo","h":"#on-macos","p":764},{"i":773,"t":"Get Argo executable version 2.4.2 from Argo Releases on GitHub. See official Argo documentation.","s":"On Windows","u":"/docs/workflows-argo","h":"#on-windows","p":764},{"i":775,"t":"Run Hello world workflow to test if Argo has been properly installed. And take a look at the examples provided in Argo documentation to discover how to use the different features available. argo submit --watch https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml Logged in You will need to have the oc client installed and be logged in with oc login, see the install documentation page.","s":"Test Argo","u":"/docs/workflows-argo","h":"#test-argo","p":764},{"i":778,"t":"Deploy the Argo Helm chart. Install and use helm Add the Helm charts repository: helm repo add argo https://argoproj.github.io/argo-helm Install chart: helm install my-argo argo/argo --version 0.15.2","s":"Argo workflows with Helm","u":"/docs/workflows-argo","h":"#argo-workflows-with-helm","p":764},{"i":780,"t":"Ask on the DSRI Slack #helpdesk channel to have the ArgoCD Operator installed in your project.","s":"ArgoCD Operator","u":"/docs/workflows-argo","h":"#argocd-operator","p":764},{"i":782,"t":"On Ubuntu​ sudo rm /usr/local/bin/argo You can now reinstall a newer version of Argo.","s":"Uninstall argo","u":"/docs/workflows-argo","h":"#uninstall-argo","p":764},{"i":784,"t":"We will use examples from the MaastrichtU-IDS/d2s-core project.","s":"Run workflows to convert structured data to RDF","u":"/docs/workflows-argo","h":"#run-workflows-to-convert-structured-data-to-rdf","p":764},{"i":786,"t":"git clone --recursive https://github.com/MaastrichtU-IDS/d2s-project-template.gitcd d2s-project-template Authenticate to the OpenShift cluster using oc login .","s":"Clone the repository","u":"/docs/workflows-argo","h":"#clone-the-repository","p":764},{"i":788,"t":"Steps-based workflow for XML files, see the example workflow YAML file on GitHub. argo submit d2s-core/argo/workflows/d2s-workflow-transform-xml.yaml \\ -f support/config/config-transform-xml-drugbank.yml Provide config files Config files can be provided using the -f arguments, but are not necessary. DAG workflow for XML files, see the YAML file on GitHub. argo submit d2s-core/argo/workflows/d2s-workflow-transform-xml-dag.yaml \\ -f support/config/config-transform-xml-drugbank.yml","s":"Workflow to convert XML files to RDF","u":"/docs/workflows-argo","h":"#workflow-to-convert-xml-files-to-rdf","p":764},{"i":790,"t":"Steps-based workflow for CSV files argo submit d2s-core/argo/workflows/d2s-workflow-transform-csv.yaml \\ -f support/config/config-transform-csv-stitch.yml DAG workflow for CSV files argo submit d2s-core/argo/workflows/d2s-workflow-transform-csv-dag.yaml \\ -f support/config/config-transform-csv-stitch.yml Solve issue Try this to solve issue related to steps services IP: {{steps.nginx-server.pod-ip}}","s":"Workflow to convert CSV files to RDF","u":"/docs/workflows-argo","h":"#workflow-to-convert-csv-files-to-rdf","p":764},{"i":793,"t":"argo list","s":"List running Argo workflows","u":"/docs/workflows-argo","h":"#list-running-argo-workflows","p":764},{"i":795,"t":"argo terminate my-workflow Workflow This might not stop the workflow, in this case use: argo delete my-workflow","s":"Stop a workflow","u":"/docs/workflows-argo","h":"#stop-a-workflow","p":764},{"i":797,"t":"argo delete my-workflow","s":"Delete a workflow","u":"/docs/workflows-argo","h":"#delete-a-workflow","p":764},{"i":799,"t":"Get into a container, to understand why it bugs, by creating a YAML with the command tail -f /dev/null to keep it hanging. See the example in the d2s-argo-workflow repository: apiVersion: argoproj.io/v1alpha1kind: Workflowmetadata: generateName: test-devnull-argo-spec: entrypoint: execute-workflow # Use existing volume volumes: - name: workdir persistentVolumeClaim: claimName: pvc-mapr-projects-test-vincent templates: - name: execute-workflow steps: - - name: run-rdfunit template: rdfunit - name: rdfunit container: image: umids/rdfunit:latest command: [tail] args: [\"-f\", \"/dev/null\"] volumeMounts: - name: workdir mountPath: /data subPath: dqa-workspace Then start the workflow: argo submit --serviceaccount argo tests/test-devnull-argo.yaml And connect with the Shell (change the pod ID to your pod ID): oc rsh test-devnull-argo-pod","s":"Debug a workflow","u":"/docs/workflows-argo","h":"#debug-a-workflow","p":764},{"i":801,"t":"Guides Workflows Run CWL workflows","s":"Run CWL workflows","u":"/docs/workflows-cwl","h":"","p":800},{"i":803,"t":"Git clone in /calrissian on a persistent volume on the cluster from a terminal. cd /data/calrissiangit clone --recursive https://github.com/MaastrichtU-IDS/d2s-project-template.gitcd d2s-project-template You will need to create the folder for the workflow output data, in our example it is output-data: mkdir /data/calrissian/output-data You might need to give permissions (CWL execution will fail due to permissions issues otherwise). chmod -R 777 /data/calrissian","s":"Clone the repository","u":"/docs/workflows-cwl","h":"#clone-the-repository","p":800},{"i":805,"t":"Start the CWL execution from your computer using the oc client. Define the CWL command arguments to run in run-workflows-cwl.yaml (be careful to properly define the paths to the CWL files in the pod storage). oc create -f d2s-core/support/run-workflows-cwl.yaml Delete the pod You will need to delete the pod if you want to re-create it.","s":"Start pod","u":"/docs/workflows-cwl","h":"#start-pod","p":800},{"i":807,"t":"oc delete -f d2s-core/support/run-workflows-cwl.yaml","s":"Delete created pod","u":"/docs/workflows-cwl","h":"#delete-created-pod","p":800},{"i":809,"t":"Guides Libraries for Machine Learning","s":"Libraries for Machine Learning","u":"/docs/tools-machine-learning","h":"","p":808},{"i":811,"t":"See this vulgarisation article explaining the different principles of Machine Learning. The Azure Machine Learning Algorithm Cheat Sheet helps you choose the right algorithm for a predictive analytics model. This repository provides tutorials and examples to a vast number of Machine / Deep Learning library.","s":"Machine Learning libraries","u":"/docs/tools-machine-learning","h":"#machine-learning-libraries","p":808},{"i":813,"t":"https://scikit-learn.org/stable/","s":"SciKit Learn","u":"/docs/tools-machine-learning","h":"#scikit-learn","p":808},{"i":815,"t":"See this article for more details about modern Deep Learning libraries.","s":"Deep Learning libraries","u":"/docs/tools-machine-learning","h":"#deep-learning-libraries","p":808},{"i":817,"t":"Python library developed by Google. https://www.tensorflow.org/","s":"Tensorflow","u":"/docs/tools-machine-learning","h":"#tensorflow","p":808},{"i":819,"t":"Python library developed by Facebook. https://pytorch.org/","s":"PyTorch","u":"/docs/tools-machine-learning","h":"#pytorch","p":808},{"i":821,"t":"Java library developed by Amazon. See the introduction article. https://djl.ai/","s":"Deep Java Library","u":"/docs/tools-machine-learning","h":"#deep-java-library","p":808},{"i":823,"t":"Layer on top of Tensorflow. https://sonnet.readthedocs.io/en/latest/","s":"Sonnet","u":"/docs/tools-machine-learning","h":"#sonnet","p":808},{"i":825,"t":"Python library. Layer on top of Tensorflow, CNTK, Theano. https://keras.io/","s":"Keras","u":"/docs/tools-machine-learning","h":"#keras","p":808},{"i":827,"t":"Layer on top of Tensorflow, PyTorch, SciKit Learn developed by Netflix. https://metaflow.org/","s":"Metaflow","u":"/docs/tools-machine-learning","h":"#metaflow","p":808},{"i":829,"t":"Guides Workflows Introduction to workflows","s":"Introduction to workflows","u":"/docs/workflows-introduction","h":"","p":828},{"i":831,"t":"Multiple technologies are available to run workflows on OpenShift/Kubernetes clusters. Each has its strengths and weaknesses in different areas. Use-case dependant The technology to use needs to be chosen depending on your use-case.","s":"Introduction","u":"/docs/workflows-introduction","h":"#introduction","p":828},{"i":833,"t":"Those solutions can easily be deployed on the DSRI. Let","s":"Current solutions on the DSRI","u":"/docs/workflows-introduction","h":"#current-solutions-on-the-dsri","p":828},{"i":835,"t":"GitHub Actions allows you to define automatically containerized workflows through a simple YAML file hosted in your GitHub repository. See the page about GitHub Actions runners for more details, and to deploy runners on the DSRI.","s":"GitHub Actions workflows","u":"/docs/workflows-introduction","h":"#github-actions-workflows","p":828},{"i":837,"t":"Airflow is a platform to programmatically author, schedule and monitor workflows, aka. DAGs (directed acyclic graphs). See the page about Airflow for more details, and to deploy Airflow on the DSRI.","s":"Apache Airflow","u":"/docs/workflows-introduction","h":"#apache-airflow","p":828},{"i":839,"t":"Argo is a container native workflow engine for Kubernetes supporting both DAG and step based workflows. Workflows easy to define using Kubernetes-like YAML files. Easy to define if your workflow is composed of Docker containers to run with arguments. Contact us Contact us if you want to run Argo workflow on the DSRI","s":"Argo","u":"/docs/workflows-introduction","h":"#argo","p":828},{"i":841,"t":"Let us know if you are interested in deploying, and using, any of those workflows on the DSRI.","s":"More options","u":"/docs/workflows-introduction","h":"#more-options","p":828},{"i":843,"t":"Optimized for Tensorflow workflows on Kubernetes. Pipelines written in Python.","s":"Kubeflow","u":"/docs/workflows-introduction","h":"#kubeflow","p":828},{"i":845,"t":"Define, schedule and run workflows. Can be deployed with OpenDataHub, see also this deployment for OpenShift. See also: Airflow on Kubernetes blog, and Kubernetes in Airflow documentation.","s":"Apache Airflow","u":"/docs/workflows-introduction","h":"#apache-airflow-1","p":828},{"i":847,"t":"Run batch pipelines on Kubernetes with Volcano. More a scheduler than a workflow engine. Volcano can be used to run Spark, Kubeflow or KubeGene workflows.","s":"Volcano","u":"/docs/workflows-introduction","h":"#volcano","p":828},{"i":849,"t":"Nextflow has been developed by the genomic research scientific community and is built to run bioinformatics pipeline. Define your workflow in a Bash script fashion, providing input, output and the command to run. Without the need to create and use Docker container for Conda pipelines.","s":"Nextflow","u":"/docs/workflows-introduction","h":"#nextflow","p":828},{"i":851,"t":"Developed by the genomic research scientific community. Good support for provenance description (export as RDF). Support on OpenShift still in development Apache Airflow workflows-cwl Propose a GUI to build the workflows: Rabix Composer","s":"CWL","u":"/docs/workflows-introduction","h":"#cwl","p":828},{"i":853,"t":"KubeGene is a turn-key genome sequencing workflow management framework. See the Workflow example, and how to define a tool.","s":"KubeGene","u":"/docs/workflows-introduction","h":"#kubegene","p":828},{"i":855,"t":"Open-source platform for rapidly deploying machine learning models on Kubernetes. Manage, serve and scale models built in any framework on Kubernetes. Contact us Feel free to contact us if you have any questions about running workflows on DSRI or to request the support of a new technology.","s":"Seldon","u":"/docs/workflows-introduction","h":"#seldon","p":828},{"i":857,"t":"Guides Workflows Run Nextflow workflows","s":"Run Nextflow workflows","u":"/docs/workflows-nextflow","h":"","p":856},{"i":859,"t":"Install the nextflow client on your computer: wget -qO- https://get.nextflow.io | bash Official documentation See the Nextflow documentation.","s":"Install Nextflow","u":"/docs/workflows-nextflow","h":"#install-nextflow","p":856},{"i":861,"t":"Try the hello world workflow from Nextflow using an existing storage: nextflow kuberun https://github.com/nextflow-io/hello -v pvc-mapr-projects-showcase:/data Use Conda environments You can easily define Conda environments and workflows with Nextflow.","s":"Run workflow","u":"/docs/workflows-nextflow","h":"#run-workflow","p":856},{"i":863,"t":"Guides Workflows Deploy GitHub Runners","s":"Deploy GitHub Runners","u":"/docs/workflows-github-actions","h":"","p":862},{"i":865,"t":"You will need to have Helm installed on your computer to deploy a GitHub Actions Runner, see the Helm docs for more details. Install the Helm chart to be able to deploy the GitHub Actions Runner on the DSRI: helm repo add openshift-actions-runner https://redhat-actions.github.io/openshift-actions-runner-charthelm repo update Then create a GitHub Personal Access Token as per the instructions in the runner image README. tl;dr: go to your Settings on GitHub: https://github.com/settings/tokens, click the button to create a new token, give it a meaningful name (e.g. DSRI Runner my-project), and check the following permissions: ✅️ repo (maybe also workflow?) ✅️ admin:org if the Runner is for an organization","s":"Install the chart","u":"/docs/workflows-github-actions","h":"#install-the-chart","p":862},{"i":867,"t":"Before deploying the runner, make sure you are in the project where you want to deploy it: oc project my-project","s":"Deploy a Runner","u":"/docs/workflows-github-actions","h":"#deploy-a-runner","p":862},{"i":869,"t":"Deploy a runner available for all repositories of an organization (you can fine tune the access via GitHub Settings) Provide the token previously created, and the organization name export GITHUB_PAT=\"TOKEN\"export GITHUB_OWNER=My-Org Deploy the runner for the organization: helm install actions-runner openshift-actions-runner/actions-runner \\ --set-string githubPat=$GITHUB_PAT \\ --set-string githubOwner=$GITHUB_OWNER \\ --set runnerLabels=\"{ dsri, $GITHUB_OWNER }\" \\ --set replicas=3 \\ --set serviceAccountName=anyuid \\ --set memoryRequest=\"512Mi\" \\ --set memoryLimit=\"100Gi\" \\ --set cpuRequest=\"100m\" \\ --set cpuLimit=\"64\" You can also change the default runner image: --set runnerImage=ghcr.io/vemonet/github-actions-conda-runner \\ --set runnerTag=latest Checkout all available parameters here Check the deployment: helm get manifest actions-runner | kubectl get -f - Go to your organization Settings page on GitHub, then go to the Actions tab, and scroll to the bottom. In the list of active runners you should see the runners you just deployed.","s":"For an organization","u":"/docs/workflows-github-actions","h":"#for-an-organization","p":862},{"i":871,"t":"You can also deploy a runner for a specific repository: export GITHUB_PAT=\"TOKEN\"# For an org runner, this is the org.# For a repo runner, this is the repo owner (org or user).export GITHUB_OWNER=vemonet# For an org runner, omit this argument. # For a repo runner, the repo name.export GITHUB_REPO=shapes-of-you Deploy the runner: helm install actions-runner openshift-actions-runner/actions-runner \\ --set-string githubPat=$GITHUB_PAT \\ --set-string githubOwner=$GITHUB_OWNER \\ --set-string githubRepository=$GITHUB_REPO \\ --set runnerLabels=\"{ dsri, anything-helpful }\"","s":"For a repository","u":"/docs/workflows-github-actions","h":"#for-a-repository","p":862},{"i":873,"t":"You can now set GitHub Action workflows, in the .github/workflows folder, to be run on this runner (the repository needs to be under the organization, or user you added the workflow to). The job will be sent to run on the DSRI: jobs: your-job: runs-on: [\"self-hosted\", \"dsri\", \"my-org\" ] steps: ...","s":"Define Actions to run on DSRI","u":"/docs/workflows-github-actions","h":"#define-actions-to-run-on-dsri","p":862},{"i":875,"t":"helm uninstall actions-runner","s":"Uninstall the runner","u":"/docs/workflows-github-actions","h":"#uninstall-the-runner","p":862},{"i":877,"t":"Experimental Experimental: this deployment workflow is still experimental, let us know on Slack if you are interested in using it. Alternatively you can also build and deploy your application using a GitHub Actions workflow. You will need to connect to the UM VPN in your workflow by defining 2 secrets for VPN_USER and VPN_PASSWORD, this is done by this step: - name: Connect to the VPN run: | sudo apt-get install -y openconnect network-manager-openconnect echo '${{ secrets.VPN_PASSWORD }}' | sudo openconnect --passwd-on-stdin --no-xmlpost --non-inter --background --authgroup 01-Employees --user ${{ secrets.VPN_USER }} vpn.maastrichtuniversity.nl sleep 10 RedHat documentation RedHat provides the following instructions and template to deploy an application on OpenShift The OpenShift Starter workflow will: Checkout your repository Perform a Docker build Push the built image to an image registry Log in to your OpenShift cluster Create an OpenShift app from the image and expose it to the internet. Before you begin: Have write access to a container image registry such as quay.io or Dockerhub. Have access to an OpenShift cluster. For instructions to get started with OpenShift see https://www.openshift.com/try The project you wish to add this workflow to should have a Dockerfile. If you don't have a Dockerfile at the repository root, see the buildah-build step. Builds from scratch are also available, but require more configuration. To get the workflow running: Add this workflow to your repository. Edit the top-level 'env' section, which contains a list of environment variables that must be configured. Create the secrets referenced in the 'env' section under your repository Settings. Edit the 'branches' in the 'on' section to trigger the workflow on a push to your branch. Commit and push your changes. For a more sophisticated example, see https://github.com/redhat-actions/spring-petclinic/blob/main/.github/workflows/petclinic-sample.yaml Also see our GitHub organization, https://github.com/redhat-actions/ name: Deploy to OpenShift# ⬇️ Modify the fields marked with ⬇️ to fit your project, and create any secrets that are referenced.# https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secretsenv: # ⬇️ EDIT with your registry and registry path. REGISTRY: ghcr.io/maastrichtu-ids # ⬇️ EDIT with your registry username. REGISTRY_USER: REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }} # ⬇️ EDIT to log into your OpenShift cluster and set up the context. # See https://github.com/redhat-actions/oc-login#readme for how to retrieve these values. OPENSHIFT_SERVER: ${{ secrets.OPENSHIFT_SERVER }} OPENSHIFT_TOKEN: ${{ secrets.OPENSHIFT_TOKEN }} # ⬇️ EDIT with the port your application should be accessible on. APP_PORT: 8080 # ⬇️ EDIT if you wish to set the kube context's namespace after login. Leave blank to use the default namespace. OPENSHIFT_NAMESPACE: \"\" # If you wish to manually provide the APP_NAME and TAG, set them here, otherwise they will be auto-detected. APP_NAME: \"my-app\" TAG: \"\"on: # https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows push: # Edit to the branch(es) you want to build and deploy on each push. branches: [ main ]jobs: openshift-ci-cd: name: Build and deploy to OpenShift runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 - name: Determine app name if: env.APP_NAME == '' run: | echo \"APP_NAME=$(basename $PWD)\" | tee -a $GITHUB_ENV - name: Determine tag if: env.TAG == '' run: | echo \"TAG=${GITHUB_SHA::7}\" | tee -a $GITHUB_ENV # https://github.com/redhat-actions/buildah-build#readme - name: Build from Dockerfile uses: redhat-actions/buildah-build@v1 with: image: ${{ env.APP_NAME }} tag: ${{ env.TAG }} # If you don't have a dockerfile, see: # https://github.com/redhat-actions/buildah-build#building-from-scratch # Otherwise, point this to your Dockerfile relative to the repository root. dockerfiles: | ./Dockerfile # https://github.com/redhat-actions/push-to-registry#readme - name: Push to registry id: push-to-registry uses: redhat-actions/push-to-registry@v1 with: image: ${{ env.APP_NAME }} tag: ${{ env.TAG }} registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} # The path the image was pushed to is now stored in ${{ steps.push-to-registry.outputs.registry-path }} - name: Connect to the VPN run: | sudo apt-get install -y openconnect network-manager-openconnect echo '${{ secrets.VPN_PASSWORD }}' | sudo openconnect --passwd-on-stdin --no-xmlpost --non-inter --background --authgroup 01-Employees --user ${{ secrets.VPN_USER }} vpn.maastrichtuniversity.nl sleep 10 # oc-login works on all platforms, but oc must be installed first. # The GitHub Ubuntu runner already includes oc. # https://github.com/redhat-actions/oc-login#readme - name: Log in to OpenShift uses: redhat-actions/oc-login@v1 with: openshift_server_url: ${{ env.OPENSHIFT_SERVER }} openshift_token: ${{ env.OPENSHIFT_TOKEN }} insecure_skip_tls_verify: true namespace: ${{ env.OPENSHIFT_NAMESPACE }} # This step should create a deployment, service, and route to run your app and expose it to the internet. # Feel free to replace this with 'oc apply', 'helm install', or however you like to deploy your app. - name: Create and expose app run: | export IMAGE=\"${{ steps.push-to-registry.outputs.registry-path }}\" export PORT=${{ env.APP_PORT }} export SELECTOR=\"app=${{ env.APP_NAME }}\" echo \"SELECTOR=$SELECTOR\" >> $GITHUB_ENV set -x # Take down any old deployment oc delete all --selector=\"$SELECTOR\" oc new-app --name $APP_NAME --docker-image=\"$IMAGE\" # Make sure the app port is exposed oc patch svc $APP_NAME -p \"{ \\\"spec\\\": { \\\"ports\\\": [{ \\\"name\\\": \\\"$PORT-tcp\\\", \\\"port\\\": $PORT }] } }\" oc expose service $APP_NAME --port=$PORT oc get all --selector=\"$SELECTOR\" set +x export ROUTE=\"$(oc get route $APP_NAME -o jsonpath='{.spec.host}')\" echo \"$APP_NAME is exposed at $ROUTE\" echo \"ROUTE=$ROUTE\" >> $GITHUB_ENV - name: View application route run: | [[ -n ${{ env.ROUTE }} ]] || (echo \"Determining application route failed in previous step\"; exit 1) echo \"======================== Your application is available at: ========================\" echo ${{ env.ROUTE }} echo \"===================================================================================\" echo echo \"Your app can be taken down with: \\\"oc delete all --selector='${{ env.SELECTOR }}'\\\"\"","s":"Deploy using GitHub Actions workflows","u":"/docs/workflows-github-actions","h":"#deploy-using-github-actions-workflows","p":862},{"i":879,"t":"GitHub runner chart repository: https://github.com/redhat-actions/openshift-actions-runner-chart Image for the runner: https://github.com/redhat-actions/openshift-actions-runner An action to automatically deploy a runner on a cluster (require to run openconnect to VPN first): https://github.com/redhat-actions/openshift-actions-runner","s":"See also","u":"/docs/workflows-github-actions","h":"#see-also","p":862}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/2",[0,3.628,1,5.119,2,5.495,3,5.447,4,5.828,5,2.958,6,2.16,7,3.628,8,2.958,9,2.958,10,2.16,11,3.628,12,1.194,13,2.958,14,2.958,15,3.628,16,3.628,17,1.194,18,1.194,19,2.958,20,5.119,21,5.119,22,2.958,23,2.16,24,3.628,25,3.628,26,2.958,27,2.958,28,5.119,29,3.628,30,2.958,31,2.16,32,3.628,33,3.628,34,2.958,35,1.194,36,2.16,37,4.199,38,3.628,39,5.662,40,3.628,41,4.69,42,4.199,43,1.194,44,2.958,45,2.16,46,2.958,47,2.958,48,2.16,49,4.69,50,3.391,51,2.958,52,2.958,53,2.16,54,4.199,55,2.958,56,3.628,57,2.958,58,2.16,59,5.828,60,5.495,61,2.16,62,3.628,63,1.194,64,3.628,65,3.628,66,2.958,67,5.119,68,2.019,69,2.16,70,2.16,71,2.16,72,1.194,73,2.16,74,2.16,75,2.958,76,3.628,77,2.958,78,1.194,79,2.16,80,2.958,81,1.194,82,1.194,83,2.958,84,2.16,85,1.194,86,3.628,87,1.194,88,2.958,89,2.16,90,1.194,91,2.958,92,2.958,93,2.958,94,5.119,95,2.958,96,2.958,97,2.958,98,2.958,99,2.958,100,2.958,101,2.958,102,2.958,103,1.194,104,2.16,105,3.628,106,1.194,107,2.16,108,1.194,109,3.628,110,2.16,111,2.958,112,1.194,113,1.194,114,1.194,115,1.194,116,2.16,117,2.16,118,1.194,119,2.16,120,2.958,121,1.194,122,1.194,123,2.37,124,2.958,125,2.16,126,1.194,127,1.194,128,1.194,129,1.194,130,1.194,131,2.16,132,1.194,133,3.628,134,4.69,135,1.194,136,2.16,137,1.194,138,1.194,139,1.194,140,2.16,141,2.16,142,1.194,143,1.194,144,1.194,145,1.194,146,1.194,147,2.16,148,1.194,149,2.958]],["t/4",[0,3.628,1,5.119,2,5.495,3,5.447,4,5.828,5,2.958,6,2.16,7,3.628,8,2.958,9,2.958,10,2.16,11,3.628,12,1.194,13,2.958,14,2.958,15,3.628,16,3.628,17,1.194,18,1.194,19,2.958,20,5.119,21,5.119,22,2.958,23,2.16,24,3.628,25,3.628,26,2.958,27,2.958,28,5.119,29,3.628,30,2.958,31,2.16,32,3.628,33,3.628,34,2.958,35,1.194,36,2.16,37,4.199,38,3.628,39,5.662,40,3.628,41,4.69,42,4.199,43,1.194,44,2.958,45,2.16,46,2.958,47,2.958,48,2.16,49,4.69,50,3.391,51,2.958,52,2.958,53,2.16,54,4.199,55,2.958,56,3.628,57,2.958,58,2.16,59,5.828,60,5.495,61,2.16,62,3.628,63,1.194,64,3.628,65,3.628,66,2.958,67,5.119,68,2.019,69,2.16,70,2.16,71,2.16,72,1.194,73,2.16,74,2.16,75,2.958,76,3.628,77,2.958,78,1.194,79,2.16,80,2.958,81,1.194,82,1.194,83,2.958,84,2.16,85,1.194,86,3.628,87,1.194,88,2.958,89,2.16,90,1.194,91,2.958,92,2.958,93,2.958,94,5.119,95,2.958,96,2.958,97,2.958,98,2.958,99,2.958,100,2.958,101,2.958,102,2.958,103,1.194,104,2.16,105,3.628,106,1.194,107,2.16,108,1.194,109,3.628,110,2.16,111,2.958,112,1.194,113,1.194,114,1.194,115,1.194,116,2.16,117,2.16,118,1.194,119,2.16,120,2.958,121,1.194,122,1.194,123,2.37,124,2.958,125,2.16,126,1.194,127,1.194,128,1.194,129,1.194,130,1.194,131,2.16,132,1.194,133,3.628,134,4.69,135,1.194,136,2.16,137,1.194,138,1.194,139,1.194,140,2.16,141,2.16,142,1.194,143,1.194,144,1.194,145,1.194,146,1.194,147,2.16,148,1.194,149,2.958]],["t/6",[150,8.815,151,8.815,152,4.74,153,7.495,154,8.815,155,8.019]],["t/8",[156,4.965,157,7.198,158,8.933,159,8.933,160,5.053]],["t/10",[156,4.713,157,6.833,161,7.21,162,2.912,163,2.49,164,7.714,165,4.979,166,7.714,167,6.282]],["t/13",[168,2.636,169,2.695,170,2.143]],["t/15",[160,3.866,169,2.007,170,2.061,171,1.718,172,4.67,173,4.739,174,4.739,175,5.396,176,4.739,177,5.811,178,6.218,179,3.51,180,4.373,181,5.396,182,6.796,183,1.466,184,2.408,185,5.811,186,3.365,187,6.218,188,2.882,189,3.735,190,3.799]],["t/17",[123,2.163,160,2.382,169,2.455,170,0.984,171,2.249,173,2.92,174,2.92,175,3.848,179,3.232,183,1.616,184,1.484,191,2.599,192,3.927,193,2.763,194,2.695,195,0.988,196,5.071,197,6.251,198,1.835,199,2.18,200,2.575,201,3.12,202,2.763,203,3.232,204,2.302,205,3.831,206,3.244,207,3.831,208,3.581,209,3.581,210,3.581,211,5.35,212,3.581,213,2.575,214,2.229,215,2.195,216,1.969,217,7.606,218,2.046,219,2.382,220,4.211,221,4.363,222,4.211,223,2.575,224,3.581,225,2.575,226,1.993,227,4.211,228,3.244,229,3.244,230,3.831,231,1.795,232,5.071,233,1.192,234,3.244,235,3.244]],["t/19",[152,3.961,169,2.163,170,1.72,173,3.627,183,1.122,189,2.859,192,2.727,195,1.339,199,2.132,215,2.727,236,1.767,237,1.827,238,6.701,239,2.922,240,7.367,241,3.401,242,2.687,243,3.525,244,3.246,245,4.216,246,4.76,247,4.76,248,1.909,249,3.133,250,2.64,251,4.094,252,2.859,253,2.179,254,3.627,255,3.743,256,2.648,257,1.903,258,2.648,259,3.133,260,2.908,261,3.627,262,1.571,263,3.432,264,1.246,265,3.014,266,2.727,267,3.627,268,2.416]],["t/21",[163,1.468,169,2.441,170,1.167,195,0.785,198,2.177,219,2.827,226,2.365,233,3.134,236,1.688,237,1.746,239,1.983,257,1.842,258,2.53,264,1.699,269,2.007,270,3.528,271,3.702,272,3.278,273,3.576,274,2.202,275,4.249,276,1.674,277,4.249,278,3.85,279,3.85,280,2.396,281,2.934,282,3.124,283,2.605,284,2.177,285,4.546,286,5.454,287,4.149,288,2.566,289,3.85,290,3.85,291,4.027,292,3.85,293,1.647,294,3.367]],["t/23",[184,2.499,191,2.049,195,1.114,233,2.008,236,2.396,239,2.814,257,2.335,258,2.511,264,1.69,266,2.586,269,1.198,270,1.93,287,4.51,289,6.96,295,6.031,296,6.031,297,3.675,298,2.858,299,3.549,300,2.476,301,4.961,302,2.319,303,4.434,304,3.997,305,2.667,306,2.049,307,3.254,308,2.806,309,4.961,310,2.806,311,2.547,312,4.961,313,1.622,314,4.961,315,6.672,316,2.07,317,4.961,318,6.672,319,3.675,320,2.319,321,4.513,322,3.821]],["t/25",[169,1.435,190,2.717,195,1.29,216,2.285,231,2.084,233,2.691,236,2.774,239,2.784,244,2.154,257,1.813,269,1.695,270,1.902,287,3.503,288,4.609,289,6.913,294,3.293,295,6.982,302,2.295,303,4.388,305,2.628,311,3.604,323,3.128,324,2.154,325,4.888,326,3.765,327,4.918,328,4.645,329,1.958,330,5.387,331,2.927,332,3.389,333,3.128,334,5.875,335,2.671,336,4.156,337,2.628]],["t/27",[168,2.673,338,7.498]],["t/30",[168,1.045,169,1.069,170,2.16,171,1.416,191,1.503,195,1.317,198,1.585,199,1.053,200,2.225,203,1.869,216,1.701,221,2.523,226,1.722,259,2.179,261,2.523,262,2.777,264,0.867,274,3.695,283,1.897,284,1.585,293,2.27,316,1.519,318,2.932,334,4.029,339,3.639,340,3.31,341,2.892,342,1.768,343,2.275,344,2.023,345,1.603,346,3.094,347,2.932,348,2.523,349,2.604,350,1.922,351,2.387,352,2.058,353,4.171,354,2.329,355,2.275,356,3.31,357,1.989,358,3.31,359,2.096,360,2.932,361,1.488,362,2.058,363,3.639,364,2.35,365,3.639,366,2.225,367,1.816,368,2.096,369,1.641,370,3.13,371,2.023,372,3.094,373,3.31,374,2.523,375,2.329,376,2.329,377,2.096,378,2.191,379,2.932,380,2.275,381,1.842,382,2.539,383,1.43,384,3.31,385,3.639,386,3.639,387,3.31,388,3.639,389,2.696,390,2.451,391,2.803,392,1.926,393,2.932,394,2.451,395,3.639,396,2.604]],["t/32",[169,2.128,170,1.966,171,1.285,172,3.835,173,5.024,174,5.024,179,4.323,183,1.806,192,3.777,195,1.138,197,3.196,216,2.39,236,1.727,262,2.176,264,1.218,268,2.36,316,3.513,324,2.252,350,1.427,361,2.962,370,2.841,396,3.657,397,3.001,398,4.119,399,3.353,400,4.65,401,4.65,402,4.65,403,4.346,404,4.65,405,4.346,406,3.657,407,3.353,408,5.185,409,3.126,410,3.544,411,2.841,412,2.664,413,3.001,414,4.346,415,4.65,416,4.65,417,1.607,418,1.989]],["t/34",[170,1.798,184,2.712,195,1.209,257,1.989,262,2.312,268,3.555,360,6.204,361,3.147,364,3.213,370,4.28,371,4.28,381,3.897,419,6.546,420,7.7,421,7.7,422,2.579,423,5.931]],["t/37",[184,2.126,195,1.276,202,6.449,236,2.038,237,2.107,239,2.394,257,1.559,264,1.437,298,3.476,302,2.657,329,2.417,352,3.413,370,3.354,378,3.162,424,5.635,425,6.034,426,4.862,427,4.183,428,4.79,429,5.13,430,6.548,431,6.034,432,3.861,433,4.183,434,2.182,435,4.648,436,4.065,437,4.065,438,2.348,439,5.489,440,4.47]],["t/39",[337,5.063,344,4.842,347,4.387,357,5.147,378,2.119,441,5.445,442,6.891,443,8.416,444,7.332,445,7.924,446,7.924,447,7.575,448,4.629,449,4.953,450,5.445,451,4.387,452,5.835,453,5.445,454,3.49,455,5.445,456,5.445,457,4.194,458,5.445,459,5.445,460,5.445,461,5.445,462,5.445,463,4.953]],["t/41",[167,6.445,231,3.709,262,2.613,379,7.011,381,4.404,417,2.735,464,6.032]],["t/43",[418,3.672]],["t/45",[123,2.136,162,1.428,163,1.221,168,1.791,171,1.046,183,1.337,195,1.304,233,1.765,239,1.65,250,1.491,253,2.212,257,1.611,258,2.105,262,1.249,263,2.728,269,1.505,274,1.832,286,2.728,302,1.36,329,2.996,345,2.747,392,3.3,422,1.393,465,5.539,466,7.17,467,2.442,468,3.536,469,3.536,470,2.976,471,4.159,472,3.028,473,1.92,474,2.976,475,2.6,476,2.403,477,4.159,478,2.491,479,2.047,480,4.159,481,4.159,482,4.159,483,1.718,484,4.159,485,2.855,486,4.308,487,5.671,488,3.784,489,3.351,490,3.351,491,3.784,492,1.736,493,4.159,494,4.159,495,3.351,496,2.661,497,3.536,498,2.884,499,4.159,500,4.159,501,4.159,502,2.778,503,1.666,504,3.536]],["t/47",[257,2.248,262,2.613,264,2.073,313,2.845,381,4.404,406,6.225,483,3.593]],["t/49",[152,2.293,157,3.437,160,2.413,168,2.181,169,1.252,170,1.484,183,1.804,186,2.1,195,1.412,223,2.608,236,2.564,237,2.651,250,1.529,253,1.262,262,1.908,276,1.429,288,2.19,293,1.406,302,1.395,305,2.293,311,2.19,313,2.077,350,1.19,378,2.472,382,1.923,383,3.304,406,3.051,409,2.608,417,1.997,438,1.66,472,2.072,476,2.926,483,1.761,485,1.628,492,2.651,496,2.729,505,6.353,506,3.437,507,3.051,508,8.411,509,1.66,510,1.901,511,2.798,512,1.879,513,2.019,514,2.504,515,3.88,516,3.285,517,2.072,518,2.831,519,2.331,520,1.761,521,3.051,522,4.265,523,4.265,524,4.265,525,3.437,526,2.957]],["t/51",[257,2.278,262,2.647,264,2.1,381,4.462,483,3.64,527,6.79]],["t/53",[123,2.204,168,1.233,170,1.002,171,1.079,183,1.635,192,2.237,195,1.197,236,2.156,237,2.229,257,1.109,262,2.534,264,1.023,276,1.438,288,2.204,302,2.492,305,2.308,311,2.204,313,1.404,345,1.891,378,2.484,382,1.935,383,3.315,409,2.625,417,1.349,438,1.67,472,2.085,476,2.937,483,1.773,492,2.664,509,1.67,510,1.913,511,2.816,512,1.891,515,3.905,516,3.306,517,2.085,518,2.845,519,2.346,528,6.932,529,4.293,530,4.293,531,3.905,532,4.293,533,8.438,534,8.438,535,3.649,536,4.293,537,2.892,538,3.18,539,4.293,540,4.293,541,4.293,542,4.293,543,4.293,544,1.624,545,1.913,546,2.386,547,2.428,548,4.293]],["t/55",[418,3.672]],["t/57",[156,4.774,170,2.006,256,4.347,315,6.921,367,4.287,549,4.858,550,4.411,551,7.302]],["t/59",[168,2.212,199,2.228,256,3.897,329,3.085,367,3.843,408,5.509,513,3.644,544,3.598,552,4.075,553,5.051,554,5.931,555,4.435,556,4.355,557,5.051,558,4.075,559,3.954]],["t/61",[170,1.476,171,1.588,183,1.356,226,2.991,256,3.198,257,1.633,262,1.897,264,1.997,268,2.917,296,7.126,315,6.753,367,3.154,407,4.145,468,5.372,549,3.574,550,3.245,551,5.372,554,4.867,560,4.145,561,3.453,562,6.319,563,6.319,564,6.319,565,6.319,566,6.319,567,8.381,568,3.95,569,6.319,570,3.512,571,4.257,572,3.784,573,3.784]],["t/63",[169,2.659,192,4.719,370,5.032,574,4.719]],["t/65",[169,2.915,170,2.043,171,1.38,179,2.819,180,3.513,191,2.267,192,5.175,272,3.601,308,4.309,316,2.291,324,2.419,370,4.863,408,3.927,411,3.051,412,2.861,414,4.667,415,4.994,416,4.994,419,4.667,547,4.309,572,3.288,575,4.667,576,3.432,577,4.423,578,4.994,579,3.601,580,3.357,581,4.994,582,2.819,583,2.702,584,4.994,585,4.994,586,4.667,587,3.432,588,4.994,589,3.698,590,4.994,591,3.223]],["t/67",[257,2.278,262,2.647,264,2.1,381,4.462,483,3.64,592,7.103]],["t/70",[123,1.988,168,2.056,169,1.137,170,0.904,171,0.973,183,1.536,191,1.598,195,1.258,199,1.12,200,5.558,204,2.115,236,1.994,237,2.062,241,2.726,244,1.705,253,1.747,257,1.525,262,1.773,264,0.922,265,2.23,276,1.297,286,2.539,288,1.988,302,1.931,305,2.081,311,1.988,313,1.266,323,2.477,345,2.601,375,2.477,378,2.297,382,1.745,383,3.146,409,2.367,417,1.217,418,1.506,438,2.297,472,3.89,476,3.087,483,1.598,485,2.733,492,2.464,496,2.477,509,1.506,510,1.725,511,2.539,512,1.705,514,2.272,516,2.981,517,1.88,518,2.631,519,2.115,520,2.956,521,2.769,544,1.464,547,2.189,593,1.959,594,3.291,595,3.871,596,3.119,597,3.291,598,2.318,599,2.42,600,3.978,601,3.119]],["t/72",[163,1.311,168,1.888,169,1.311,195,1.224,199,2.489,200,5.259,203,3.374,221,3.095,236,2.634,237,2.724,241,3.034,244,1.967,257,1.153,286,2.928,313,2.812,323,2.857,350,1.246,375,2.857,426,3.597,434,1.614,438,1.737,472,3.192,476,3.315,483,1.844,509,1.737,510,1.989,511,2.928,512,1.967,513,2.113,572,2.674,582,2.293,589,3.007,593,2.26,594,3.796,599,4.108,602,4.867,603,4.464,604,4.261,605,2.168,606,2.44,607,3.597,608,6.571,609,4.061,610,3.597,611,4.464,612,2.928,613,2.037,614,4.464,615,5.978,616,5.295,617,3.307]],["t/74",[162,2.678,163,2.037,168,1.057,169,1.08,170,0.859,171,1.428,183,0.789,184,2.747,195,1.225,214,1.947,218,1.787,231,1.568,236,2.343,237,2.422,241,2.622,242,1.889,250,1.319,257,1.467,262,1.105,264,1.652,276,1.233,302,1.203,308,2.081,311,2.916,345,1.621,350,1.027,364,1.535,368,2.119,369,1.659,370,2.045,371,2.045,378,3.466,383,3.499,418,1.432,434,2.053,437,2.478,472,1.787,476,2.189,483,1.519,492,1.535,509,1.432,510,1.64,511,2.413,512,1.621,517,1.787,518,3.969,519,3.79,556,2.081,582,1.889,618,4.99,619,2.3,620,2.25,621,1.585,622,1.862,623,2.551,624,5.166,625,3.679,626,2.834,627,3.679,628,3.679,629,2.725,630,3.725,631,4.576,632,3.128,633,3.128,634,2.834]],["t/76",[165,2.125,169,2.016,170,1.31,179,1.859,180,2.316,183,0.776,186,1.782,188,2.365,191,2.316,195,0.881,219,2.047,233,1.588,236,1.223,237,1.264,239,1.436,241,3.17,243,2.438,253,1.071,257,0.935,264,0.862,269,1.867,270,2.182,284,1.577,293,2.263,294,3.778,302,1.183,311,1.859,323,3.588,324,1.595,333,2.316,337,4.497,345,1.595,362,2.047,378,2.671,383,2.697,394,2.438,413,2.125,438,1.408,440,2.681,472,2.724,473,1.671,485,1.382,502,1.613,503,2.247,517,2.724,518,3.059,537,2.438,556,2.047,593,1.832,612,2.374,631,2.916,635,3.292,636,1.735,637,2.788,638,5.101,639,4.518,640,3.778,641,4.768,642,3.678,643,3.077,644,3.588,645,2.012,646,2.012,647,4.319,648,2.788,649,3.077,650,2.916,651,3.077,652,2.509,653,2.509,654,3.292,655,2.916,656,2.374,657,1.916,658,2.012,659,2.681,660,3.588]],["t/78",[574,4.656,661,6.881,662,5.053,663,5.146,664,6.018]],["t/80",[166,4.172,169,1.347,170,1.071,171,1.153,226,2.171,281,2.693,282,2.867,283,2.391,329,1.838,337,2.466,351,3.009,357,2.507,359,2.642,422,1.537,571,4.515,573,2.747,617,3.398,622,2.322,644,2.935,661,6.711,663,2.642,664,6.734,665,5.446,666,4.172,667,2.427,668,3.696,669,6.703,670,2.935,671,2.549,672,3.398,673,3.18,674,3.009,675,4.587,676,3.9,677,4.172,678,7.407,679,4.172,680,4.587,681,3.09,682,4.587,683,4.587,684,3.09,685,3.398,686,5.401,687,4.587,688,4.587,689,4.587,690,3.282,691,6.703,692,4.587,693,3.696,694,6.097,695,3.696,696,3.18,697,4.172,698,4.587]],["t/82",[152,1.434,168,0.766,171,1.104,183,2.196,226,1.263,255,1.908,260,2.442,261,1.849,269,0.644,283,5.14,284,1.162,329,1.069,352,2.485,354,1.707,359,1.537,378,1.038,408,1.908,417,0.838,422,0.894,451,4.513,454,1.069,553,1.75,571,2.959,597,2.268,661,7.342,664,5.749,665,5.692,668,2.149,677,2.426,678,5.521,679,2.426,685,1.976,699,2.667,700,3.143,701,5.002,702,1.976,703,2.667,704,3.996,705,2.149,706,2.667,707,2.667,708,3.384,709,2.055,710,2.667,711,1.597,712,2.667,713,1.976,714,2.667,715,2.426,716,2.426,717,3.384,718,2.268,719,1.537,720,2.426,721,2.667,722,2.667,723,2.667,724,2.667,725,2.268,726,2.055,727,2.426,728,2.426,729,2.149,730,1.908,731,2.268,732,2.667,733,4.393,734,3.735,735,1.976,736,4.007,737,3.996,738,1.263,739,2.882,740,1.566,741,2.667,742,5.601,743,5.601,744,5.601,745,4.393,746,2.667,747,2.667,748,2.055,749,2.426,750,2.149,751,2.667,752,2.667,753,2.667,754,2.426,755,2.667,756,2.667,757,4.393,758,2.667,759,2.667,760,2.667,761,2.667]],["t/84",[184,2.878,422,2.737,568,5.107,658,4.541,661,7.598,696,5.664,762,5.845,763,5.845,764,7.432,765,8.17,766,6.052]],["t/86",[171,1.039,183,2.074,195,0.649,218,2.008,259,4.462,269,1.799,310,3.51,335,2.259,364,1.725,366,2.528,383,1.624,422,2.079,485,1.578,518,1.842,571,5.579,577,3.331,622,2.092,661,6.379,664,2.785,665,5.177,767,4.133,768,7.45,769,4.133,770,4.133,771,4.133,772,4.133,773,4.133,774,4.133,775,4.133,776,4.133,777,4.133,778,4.133,779,4.133,780,4.133,781,4.133,782,4.133,783,3.51,784,2.866,785,4.133,786,4.133,787,4.133,788,4.133,789,4.133,790,4.133,791,4.133,792,6.206,793,4.133,794,4.133,795,4.133,796,4.133,797,4.133,798,4.133,799,4.133,800,4.133,801,6.206,802,5.276,803,3.184,804,3.76,805,4.133,806,4.133,807,2.528,808,4.133,809,4.133,810,4.133,811,4.133]],["t/88",[171,1.275,183,2.298,259,5.021,284,2.21,310,2.87,422,1.7,571,4.856,605,2.464,640,3.418,664,4.856,665,5.706,690,3.63,719,4.829,734,4.313,738,2.401,804,6.557,812,5.073,813,5.073,814,7.208,815,5.073,816,5.073,817,5.073,818,5.808,819,5.073,820,5.073,821,5.073,822,5.073,823,5.073,824,5.073,825,3.63,826,5.073,827,5.073,828,5.073,829,5.073,830,4.615,831,5.073,832,5.073,833,5.073,834,5.073,835,5.073,836,5.073,837,4.088]],["t/90",[259,4.559,344,4.231,357,4.16,366,4.655,544,3.573,662,4.306,663,5.441,763,5.446,764,6.925,838,7.612,839,7.612,840,7.612,841,5.446,842,7.612,843,7.612,844,7.612]],["t/93",[123,3.185,157,4.997,165,4.86,171,1.559,179,3.185,218,3.012,228,4.777,269,1.497,270,3.221,286,4.068,407,4.068,436,4.178,503,3.733,509,2.413,512,2.732,544,2.346,545,2.764,684,4.178,731,5.273,845,4.777,846,5.273,847,7.53,848,3.399,849,3.714,850,3.572,851,5.273,852,5.273,853,3.792,854,3.334,855,4.594,856,4.3]],["t/95",[198,2.978,248,2.493,249,4.093,276,2.29,297,5.063,338,5.507,418,3.802,422,2.29,503,2.738,507,4.89,509,2.659,544,2.585,545,3.046,626,5.265,670,4.373,847,6.218,848,2.493,854,3.675,857,5.507,858,4.67,859,5.507,860,3.937,861,4.273,862,4.604,863,6.835]],["t/97",[156,2.465,168,1.878,170,1.036,179,3.358,183,1.403,195,1.22,228,5.037,252,2.424,258,2.245,269,1.071,270,1.726,287,3.877,293,2.155,350,1.238,438,1.726,439,4.034,503,2.62,507,3.173,509,1.726,544,3.243,552,2.347,621,1.911,662,2.508,696,3.075,730,3.173,848,2.385,849,3.916,855,6.772,858,2.347,861,4.088,862,4.405,864,3.416,865,4.317,866,2.987,867,6.259,868,6.604,869,4.435,870,4.435,871,3.173,872,3.173,873,4.435,874,3.31,875,4.435,876,5.269,877,3.77,878,4.435,879,3.075,880,4.034,881,4.034,882,3.173,883,4.034,884,4.435]],["t/99",[195,1.267,306,3.333,503,3.234,507,5.774,544,3.053,545,3.597,606,4.411,855,5.979,856,5.596,860,4.649,885,3.516,886,7.342,887,4.565]],["t/101",[195,1.195,418,3.675,507,5.446,857,6.134,858,4.029,867,7.611,868,6.472,888,8.593,889,7.612,890,5.446,891,6.472,892,7.612,893,7.612,894,7.612,895,7.612]],["t/103",[161,5.932,162,3.389,170,1.629,313,2.281,418,2.715,507,4.992,544,2.639,613,3.183,670,4.465,731,5.932,856,4.838,867,7.204,896,8.941,897,4.992,898,4.838,899,6.977,900,5.622,901,6.347,902,6.977,903,6.977,904,3.482,905,6.977]],["t/105",[162,2.278,195,1.041,231,2.827,269,1.601,299,4.745,335,3.624,555,3.82,620,4.055,867,5.344,906,8.652,907,6.19,908,4.35,909,5.108,910,6.632,911,6.632,912,6.632,913,6.632,914,6.632,915,6.632,916,6.632,917,6.632,918,6.632,919,6.632,920,6.632,921,6.033,922,5.344,923,6.632,924,6.033,925,6.632]],["t/107",[168,1.924,170,1.564,179,4.472,228,6.708,233,1.896,270,2.606,361,2.738,503,3.489,507,4.792,509,2.606,510,2.985,544,2.534,605,3.253,849,5.215,855,4.962,876,5.397,879,4.644,880,6.093,881,6.093,926,4.394,927,5.397,928,3.789,929,4.962,930,6.698,931,6.698,932,5.695]],["t/109",[257,2.219,262,2.579,264,2.421,329,3.441,381,4.347,483,3.547,933,5.634]],["t/111",[162,1.933,183,2.049,248,2.053,250,2.017,269,1.359,270,2.19,276,1.885,280,2.698,329,2.255,331,4.642,378,2.19,383,2.211,418,2.19,472,2.734,485,2.149,518,2.508,519,3.076,558,4.692,591,4.55,618,3.601,621,3.82,646,3.128,766,4.169,904,2.809,933,5.084,934,2.425,935,3.868,936,3.304,937,3.026,938,3.601,939,5.12,940,4.535,941,5.628,942,5.628,943,3.902,944,5.741,945,2.664]],["t/113",[169,2.162,184,2.593,248,2.685,249,4.408,265,4.24,269,2.233,270,2.864,288,3.78,305,3.958,326,5.67,329,2.949,591,4.322,944,5.453,946,7.125,947,5.353,948,5.931,949,4.71]],["t/115",[168,2.114,169,2.162,190,4.091,195,1.156,253,2.736,280,3.529,350,2.581,392,3.896,485,2.81,517,3.575,520,3.04,558,4.896,559,3.78,803,5.67,933,4.829,950,9.25,951,4.829]],["t/117",[195,1.422,348,6.277,574,4.719,933,5.939]],["t/119",[199,2.228,310,5.381,377,4.435,952,7.7,953,7.7,954,7.7,955,7.7,956,7.7,957,7.7,958,7.7,959,7.7,960,7.7,961,6.546,962,7.7,963,5.704,964,7.7]],["t/121",[162,1.902,163,0.611,171,0.523,183,2.278,242,2.401,260,2.598,262,1.663,280,1.708,281,1.221,302,1.165,310,1.176,313,0.68,343,2.227,346,3.975,359,2.693,362,1.176,367,1.038,382,0.937,422,0.697,432,1.33,435,1.601,443,1.768,454,0.833,576,2.227,591,1.221,613,0.948,622,1.052,633,1.768,652,1.441,656,1.364,694,1.891,700,1.487,715,1.891,740,1.221,762,1.487,860,1.198,871,2.549,901,1.891,909,1.601,933,4.767,938,1.33,945,2.213,961,4.71,963,3.463,965,1.675,966,1.601,967,1.675,968,2.079,969,1.768,970,1.891,971,2.744,972,3.601,973,1.601,974,1.136,975,5.778,976,2.079,977,2.079,978,5.235,979,3.563,980,6.518,981,1.401,982,3.241,983,3.975,984,2.079,985,2.079,986,2.079,987,2.079,988,2.079,989,2.079,990,2.079,991,2.079,992,2.079,993,2.079,994,2.079,995,2.079,996,2.079,997,2.079,998,2.079,999,2.079,1000,2.079,1001,1.33,1002,2.079,1003,2.079,1004,2.079,1005,1.768,1006,2.079,1007,2.079,1008,1.601,1009,1.891,1010,1.33,1011,2.079,1012,1.245,1013,1.601,1014,2.079,1015,2.871,1016,2.079,1017,2.079,1018,2.079,1019,2.079,1020,2.079,1021,3.563,1022,2.079,1023,2.079,1024,3.563,1025,5.54,1026,3.241,1027,2.079,1028,2.079,1029,3.563,1030,3.563,1031,1.768,1032,2.079,1033,3.563,1034,2.079,1035,1.675,1036,5.54,1037,2.079,1038,2.079,1039,1.54,1040,1.54,1041,1.768,1042,3.563,1043,2.079,1044,3.563,1045,2.079,1046,2.079,1047,2.079,1048,2.079,1049,2.079,1050,2.079,1051,1.891,1052,2.079,1053,2.639,1054,2.079,1055,2.079,1056,2.079,1057,2.079,1058,1.891,1059,2.079,1060,2.079,1061,1.891,1062,2.079,1063,2.079,1064,2.079,1065,1.891,1066,2.079,1067,1.441,1068,2.549,1069,1.768,1070,2.079,1071,1.891]],["t/123",[163,1.338,171,1.145,183,1.863,185,3.873,218,2.213,254,3.158,256,3.993,269,1.1,273,3.259,282,2.848,341,4.459,348,3.158,349,3.259,382,2.054,422,1.526,432,2.915,587,2.848,646,2.532,652,6.021,667,3.53,708,3.509,719,2.624,728,4.144,933,5.696,970,4.144,978,3.509,981,4.493,1001,2.915,1015,3.671,1039,4.94,1058,4.144,1072,5.67,1073,4.555,1074,4.555,1075,3.259,1076,4.555,1077,4.555,1078,4.555,1079,4.555,1080,2.988,1081,4.555,1082,4.555,1083,4.555,1084,4.555,1085,4.555,1086,3.509,1087,2.274,1088,4.555,1089,4.144,1090,2.728,1091,2.988,1092,3.374,1093,4.144,1094,3.509,1095,3.374,1096,3.259,1097,2.674,1098,3.509,1099,2.674]],["t/125",[183,1.283,218,2.905,234,4.606,256,4.089,262,1.796,331,3.581,341,3.071,349,4.278,350,1.669,412,3.117,417,1.88,467,3.511,667,3.165,729,4.818,933,5.299,963,4.43,978,6.222,981,4.028,1015,6.509,1039,4.43,1067,4.146,1072,5.084,1090,3.581,1092,4.43,1098,4.606,1099,3.511,1100,5.44,1101,4.028,1102,7.371,1103,5.98,1104,5.98,1105,5.44,1106,3.738,1107,4.146,1108,5.98,1109,5.98]],["t/127",[195,1.176,234,4.127,242,2.751,245,4.317,256,3.791,272,4.913,276,1.795,341,3.846,349,3.833,359,3.086,422,1.795,620,3.276,623,3.715,667,3.964,860,3.086,933,3.514,978,4.127,981,5.046,1001,3.428,1039,3.969,1072,4.555,1090,3.209,1092,3.969,1095,3.969,1096,3.833,1098,4.127,1106,3.349,1107,3.715,1110,3.209,1111,5.77,1112,5.358,1113,7.49,1114,5.358,1115,3.609,1116,4.874,1117,3.276,1118,4.127,1119,5.358,1120,5.358,1121,4.555,1122,4.127,1123,5.358,1124,5.358,1125,5.358,1126,4.874,1127,5.358,1128,5.358]],["t/129",[195,1.308,218,3.04,256,3.168,262,1.88,322,7.21,330,4.106,331,3.749,341,3.214,636,3.001,667,3.313,749,5.694,860,3.606,933,4.106,963,4.637,1039,6.934,1067,4.34,1095,4.637,1096,4.478,1097,3.675,1098,4.822,1099,3.675,1100,5.694,1105,5.694,1118,4.822,1129,8.329,1130,6.26,1131,8.329,1132,6.26,1133,6.26,1134,6.26]],["t/131",[160,0.779,162,1.42,168,0.712,171,0.851,176,0.955,183,1.48,184,1.457,186,0.678,195,0.835,202,0.904,242,0.707,254,2.346,256,1.712,258,1.255,260,0.766,262,1.433,268,0.636,269,0.599,280,0.66,282,0.861,283,1.763,293,1.363,304,1.11,306,0.569,313,0.45,315,1.11,328,2.34,334,0.985,341,4.06,342,1.205,343,1.55,344,0.766,345,1.093,348,4.558,352,0.779,361,1.014,362,0.779,369,1.118,382,0.621,384,1.253,410,0.955,412,1.293,417,0.433,422,1.599,426,1.11,429,2.876,432,0.881,438,0.536,444,1.02,451,1.11,454,0.552,467,1.456,473,0.636,478,1.485,537,1.671,538,1.837,568,0.861,576,1.55,592,1.11,613,0.628,637,1.061,640,1.671,652,4.304,658,0.766,667,0.729,690,0.985,704,4.839,708,1.061,717,2.606,719,0.793,726,1.911,730,0.985,735,1.837,736,2.421,837,1.11,860,1.949,877,1.171,883,1.253,921,1.253,932,1.171,933,5.315,961,2.109,963,2.506,971,1.061,972,1.911,973,1.061,974,0.753,975,1.171,978,1.911,980,1.171,981,0.928,982,1.253,1013,3.677,1031,2.109,1035,1.11,1067,0.955,1089,1.253,1090,0.825,1095,4.87,1097,1.456,1102,2.726,1106,0.861,1111,1.061,1135,2.109,1136,1.171,1137,1.999,1138,6.575,1139,6.208,1140,1.377,1141,2.48,1142,4.773,1143,1.377,1144,3.383,1145,2.48,1146,1.377,1147,1.377,1148,2.109,1149,1.171,1150,1.377,1151,1.377,1152,2.256,1153,1.11,1154,0.904,1155,1.377,1156,1.377,1157,2.256,1158,1.377,1159,1.55,1160,1.061,1161,2.48,1162,1.377,1163,1.377,1164,2.256,1165,1.253,1166,1.377,1167,2.109,1168,1.377,1169,1.377,1170,0.842,1171,1.377,1172,1.377,1173,1.377,1174,1.377,1175,1.171,1176,1.377,1177,1.377,1178,1.02,1179,1.377,1180,1.11,1181,5.794,1182,3.763,1183,1.377,1184,1.171,1185,1.061,1186,1.377,1187,1.377,1188,1.377,1189,1.774,1190,1.253,1191,1.377,1192,1.171,1193,1.377,1194,1.377,1195,1.377,1196,1.377,1197,1.377,1198,1.377,1199,1.377,1200,1.377,1201,1.377,1202,1.377,1203,2.256,1204,2.876,1205,1.377,1206,1.377,1207,1.377,1208,1.377,1209,1.377,1210,1.377,1211,1.377,1212,1.377,1213,1.171,1214,0.985,1215,1.253,1216,1.11,1217,1.377,1218,1.403,1219,1.253,1220,1.253,1221,1.377,1222,1.377,1223,1.11,1224,1.377,1225,0.729,1226,2.48,1227,2.48,1228,1.377,1229,1.377,1230,2.48,1231,2.48,1232,1.377,1233,1.377,1234,1.377,1235,1.377,1236,1.377,1237,2.876,1238,1.377,1239,1.377,1240,1.377,1241,2.48,1242,1.377,1243,1.377,1244,1.377,1245,1.377,1246,1.377,1247,1.171,1248,1.377,1249,1.253,1250,1.377,1251,2.48,1252,1.171,1253,1.253,1254,1.377,1255,1.377,1256,1.02,1257,1.377,1258,1.377]],["t/133",[257,2.308,264,2.478,313,2.921,434,3.23]],["t/135",[170,1.409,183,1.294,195,1.276,226,2.856,264,2.189,266,4.236,267,5.635,308,3.413,313,3.356,316,3.835,352,3.413,362,3.413,413,3.542,417,1.897,434,2.182,513,2.856,514,5.772,570,3.354,656,3.958,936,3.542,1259,4.47,1260,3.476,1261,5.489,1262,6.034,1263,5.489,1264,5.489,1265,6.034]],["t/137",[123,2.979,163,2.072,169,1.111,170,0.883,171,1.459,183,1.245,184,1.333,186,2.856,188,1.595,195,0.911,231,1.612,233,2.241,236,1.278,237,1.321,239,1.501,248,2.887,249,3.475,257,2.205,264,2.363,269,0.913,276,1.944,302,1.897,313,2.791,327,2.266,331,2.266,337,2.034,345,1.667,350,1.056,361,1.546,411,2.103,412,1.972,417,1.189,418,2.746,434,1.368,438,2.258,475,2.365,485,2.215,503,1.516,583,1.862,617,2.802,621,3.041,623,2.623,630,2.482,644,2.421,719,2.179,858,2.002,887,2.14,936,2.221,937,2.034,938,2.421,947,3.071,983,3.216,1111,2.914,1266,5.228,1267,2.067,1268,3.216,1269,3.783,1270,3.783,1271,2.707,1272,2.548,1273,3.048,1274,3.992,1275,3.441,1276,2.548]],["t/139",[171,1.851,199,2.927,226,3.484,257,1.902,308,4.164,313,3.024,320,4.324,417,2.314,434,3.953,513,3.484,544,2.784,850,5.328]],["t/141",[171,1.477,189,3.211,216,3.731,226,2.781,243,5.377,266,4.725,267,4.073,269,1.418,276,1.968,287,2.932,313,2.61,320,2.746,369,3.598,413,3.449,434,2.886,472,2.854,492,2.452,514,4.686,573,3.518,613,2.68,848,2.912,850,3.384,891,4.995,926,3.854,936,3.449,1218,3.323,1277,5.875,1278,4.989,1279,3.958,1280,5.344,1281,5.875,1282,3.958,1283,3.449,1284,5.875]],["t/143",[168,1.815,171,1.588,183,1.798,184,2.226,186,3.111,195,0.992,231,2.693,250,2.265,266,4.902,316,3.924,320,4.872,350,1.763,369,2.849,434,3.03,472,3.069,492,2.637,613,3.823,1278,3.95,1285,6.456,1286,6.753,1287,4.681,1288,5.372,1289,6.319]],["t/145",[170,1.079,171,1.161,189,2.524,195,0.725,225,2.824,244,2.968,248,1.685,264,1.894,265,4.581,266,4.556,269,1.626,293,1.522,313,2.858,316,3.318,320,3.149,352,2.612,369,2.082,380,2.887,397,2.711,417,1.452,418,1.797,434,2.875,438,1.797,472,2.243,492,1.927,514,6.029,556,2.612,570,2.567,605,2.243,696,3.202,700,3.304,848,3.189,850,4.581,853,2.824,854,2.483,885,2.012,938,2.955,949,2.955,1159,2.887,1278,2.887,1283,2.711,1286,3.721,1290,4.618,1291,3.721,1292,3.721,1293,4.618]],["t/147",[162,1,163,2.009,183,1.727,199,0.842,218,1.414,231,1.241,233,0.824,235,2.242,239,1.872,242,1.495,253,1.76,256,3.011,264,0.693,269,1.944,280,1.396,281,1.709,293,0.959,307,1.909,329,1.166,342,1.414,343,1.82,361,1.929,418,1.133,427,2.018,428,1.517,479,1.433,509,1.133,510,1.297,586,4.011,599,1.82,621,2.564,633,2.475,807,3.638,908,3.095,951,5.561,1086,2.242,1294,7.768,1295,6.453,1296,2.648,1297,8.05,1298,2.911,1299,2.911,1300,2.911,1301,2.911,1302,2.826,1303,2.648,1304,2.911,1305,2.911,1306,3.178,1307,2.648,1308,4.503,1309,2.95,1310,2.911,1311,2.911,1312,2.911,1313,2.911,1314,2.911,1315,2.911,1316,2.911,1317,2.911,1318,2.911,1319,5.058,1320,2.911,1321,2.911,1322,2.911,1323,2.911,1324,2.242,1325,2.911,1326,2.648,1327,3.427,1328,4.718,1329,2.911,1330,2.475,1331,4.718,1332,4.718,1333,4.718,1334,4.718,1335,2.911,1336,2.911,1337,2.911,1338,2.911,1339,2.911]],["t/149",[257,2.278,262,2.647,264,2.1,355,5.511,381,4.462,483,3.64]],["t/151",[163,2.007,170,1.596,191,2.823,195,1.385,198,2.978,216,3.195,233,2.498,236,2.309,237,2.387,239,2.711,253,2.61,257,1.766,269,1.65,316,4.077,355,6.108,476,2.634,483,2.823,657,3.617,1340,4.739,1341,4.89,1342,4.739]],["t/153",[152,2.628,163,2.061,168,1.404,169,1.435,170,1.918,183,1.049,191,3.391,195,1.409,199,1.414,203,3.604,206,3.765,215,2.548,236,1.651,237,1.707,239,1.939,250,1.752,251,2.717,264,1.165,316,2.04,355,6.185,422,1.638,469,4.156,476,1.884,483,2.019,485,1.866,503,1.958,552,2.587,558,2.587,559,2.51,593,2.474,606,2.671,626,3.765,711,2.927,739,3.206,848,1.783,1340,3.389,1343,8.081,1344,4.888,1345,3.939,1346,4.447,1347,7.019,1348,3.939,1349,4.447,1350,6.385,1351,4.888,1352,5.656]],["t/155",[152,2.609,163,1.425,168,1.394,169,1.425,170,1.91,183,1.041,188,2.046,191,3.378,195,1.488,199,1.404,203,3.586,215,2.529,236,1.639,237,1.695,239,1.925,250,1.739,251,2.697,264,1.156,293,1.599,316,2.025,355,5.925,417,1.525,418,1.888,422,2.339,469,4.126,476,1.87,483,2.004,485,1.853,498,3.364,544,1.836,552,2.568,558,2.568,559,2.492,606,2.652,626,3.738,739,3.183,848,1.77,934,3.009,1340,3.364,1345,3.91,1346,4.414,1349,4.414,1352,5.626,1353,8.647,1354,4.853,1355,4.126]],["t/158",[163,1.928,170,1.534,191,3.55,195,1.35,202,4.307,233,1.859,236,2.218,237,2.294,239,2.605,242,3.372,257,1.697,287,3.277,316,2.74,355,5.374,380,4.105,476,2.531,483,2.712,485,2.507,544,2.484,636,3.148,657,3.475,1149,5.583,1340,4.553,1341,4.698,1356,8.597,1357,5.583,1358,5.291,1359,6.567]],["t/160",[163,1.804,170,1.435,191,3.397,195,1.292,233,1.74,236,2.075,237,2.146,239,2.438,262,1.845,302,2.009,316,2.564,328,3.476,355,3.841,370,3.415,382,2.77,410,4.26,476,2.368,483,2.538,485,2.346,624,5.59,657,3.252,667,3.252,945,3.894,1013,4.733,1154,5.396,1184,5.224,1340,4.26,1341,4.396,1360,8.227,1361,4.552,1362,4.396,1363,6.145,1364,6.145,1365,6.145]],["t/163",[163,1.873,170,1.49,191,3.483,195,1.002,233,2.388,236,2.155,237,2.228,264,1.52,269,1.54,300,3.184,316,2.662,355,3.988,476,3.251,483,2.634,485,2.435,580,3.901,636,3.058,657,3.376,750,5.14,936,3.745,1278,3.988,1340,4.423,1341,4.564,1366,9.449,1367,8.434,1368,4.423,1369,3.901,1370,6.379,1371,6.379]],["t/165",[162,1.385,163,2.156,168,1.159,170,1.423,183,0.865,191,2.516,195,1.284,231,1.719,233,2.316,253,2.172,264,0.961,269,1.471,270,1.57,276,1.351,313,2.675,316,1.683,320,3.823,342,1.959,355,2.522,380,2.522,392,2.135,411,2.242,412,2.102,434,2.655,476,2.348,485,1.54,492,2.542,509,1.57,510,1.798,512,1.777,513,1.909,514,2.368,520,1.666,545,1.798,553,2.646,605,1.959,657,2.135,665,2.522,858,3.885,879,2.797,928,2.282,936,2.368,1102,3.25,1107,2.797,1283,2.368,1340,2.797,1341,2.886,1368,2.797,1372,3.67,1373,3.43,1374,10.461,1375,3.67,1376,4.034,1377,6.764,1378,3.107,1379,4.034,1380,4.034]],["t/167",[156,3.878,170,1.629,195,1.096,215,3.637,254,4.838,257,1.803,264,1.662,355,4.362,383,2.741,428,3.637,658,3.878,934,3.006,965,5.622,1026,6.347,1092,5.168,1175,5.932,1357,5.932,1381,8.941,1382,5.622,1383,5.932,1384,6.977,1385,6.977,1386,6.977,1387,6.977,1388,6.977]],["t/169",[257,2.248,262,2.613,264,2.073,381,4.404,483,3.593,558,4.605,559,4.468]],["t/171",[168,1.266,169,0.786,170,1.52,171,0.673,175,1.638,183,0.946,184,1.553,188,1.129,191,1.106,195,1.13,199,1.625,201,1.984,203,1.375,204,1.464,236,1.489,237,1.539,241,1.237,251,1.489,252,1.464,255,1.916,258,1.356,262,1.323,265,1.543,269,0.647,276,0.897,284,1.92,288,1.375,293,0.883,302,1.441,305,1.44,313,2.675,320,1.252,329,1.073,335,1.464,350,0.747,361,1.802,364,2.716,366,1.638,367,2.803,368,1.543,369,1.207,375,1.714,376,1.714,377,2.539,378,1.042,381,2.231,382,1.207,383,2.207,417,0.842,422,0.897,434,2.354,435,2.063,470,1.916,476,2.509,483,1.106,492,1.839,509,1.042,510,1.193,511,1.757,512,1.18,513,2.086,517,1.301,518,2.503,519,1.464,558,2.333,559,4.682,579,1.757,583,1.318,591,1.572,593,1.356,604,3.557,622,1.356,636,2.113,807,3.435,848,1.608,861,1.674,865,1.489,874,2.843,885,2.836,904,1.337,929,1.984,1092,3.265,1099,1.572,1225,1.417,1368,1.857,1382,2.158,1389,4.16,1390,1.714,1391,2.063,1392,2.436,1393,2.158,1394,3.056,1395,2.063,1396,4.16,1397,2.678,1398,2.678,1399,2.436,1400,2.678,1401,2.678,1402,2.436,1403,2.678,1404,2.436,1405,2.277,1406,1.916,1407,2.277,1408,1.857,1409,2.436,1410,2.277,1411,2.063]],["t/173",[162,1.737,163,0.932,168,2.066,171,1.585,183,0.681,195,1.314,198,2.747,199,2.54,216,2.364,268,1.465,269,1.221,270,1.968,300,2.524,302,2.061,313,1.038,324,1.398,350,2.007,364,3.277,376,6.162,377,3.632,399,2.082,405,2.698,418,2.453,422,1.694,434,1.147,454,1.271,472,1.541,473,1.465,479,1.562,492,1.324,549,1.795,559,3.237,580,1.941,582,1.63,604,1.734,605,1.541,701,2.444,709,2.444,711,1.9,738,1.502,807,3.092,848,1.845,854,1.706,861,1.984,862,2.138,872,2.27,874,1.606,885,1.382,1012,1.9,1087,1.584,1107,2.2,1225,3.337,1352,2.557,1394,2.2,1395,2.444,1396,2.351,1412,5.36,1413,5.156,1414,4.299,1415,4.299,1416,3.173,1417,5.36,1418,4.299,1419,4.371,1420,2.887,1421,2.887,1422,2.887,1423,3.173,1424,2.698,1425,2.698,1426,2.887,1427,2.887,1428,2.138]],["t/175",[162,2.213,171,1.093,172,2.301,175,2.659,191,1.796,195,1.333,225,2.659,241,3.921,242,2.233,250,1.559,252,2.376,268,2.008,282,2.718,298,2.505,323,2.782,345,1.916,350,1.214,364,1.814,382,2.905,389,3.221,413,2.553,423,3.349,520,2.661,545,2.872,552,2.301,593,3.886,605,2.112,665,4.029,848,2.351,850,2.505,865,2.417,874,5.219,904,3.216,1154,2.852,1189,3.111,1294,3.349,1327,4.891,1361,3.221,1429,3.015,1430,5.688,1431,3.697,1432,3.697,1433,3.697,1434,3.697,1435,3.697,1436,3.697,1437,5.193,1438,3.697,1439,3.349,1440,5.479,1441,3.111,1442,3.349,1443,3.504,1444,3.015]],["t/177",[170,1.932,188,3.488,233,2.342,264,1.97,620,5.058,848,3.017,874,4.186,885,3.603,948,6.664,1445,7.524,1446,6.664]],["t/179",[257,2.278,262,2.647,264,2.1,374,6.112,381,4.462,483,3.64]],["t/181",[168,1.774,169,2.731,171,1.553,181,2.512,184,2.176,186,2.022,195,1.515,215,2.141,221,4.283,223,2.512,236,2.086,237,2.593,241,1.897,242,2.11,248,1.499,252,2.245,273,2.939,276,1.376,300,3.083,313,2.02,327,2.46,330,2.695,337,2.209,374,7.379,411,2.283,417,1.291,434,1.485,476,2.381,483,1.697,503,1.646,509,1.598,510,1.831,512,1.81,513,1.944,544,1.554,572,2.46,582,3.812,602,5.499,618,3.952,622,2.079,738,1.944,803,3.164,854,3.321,856,2.848,885,1.79,907,2.939,936,2.412,947,4.685,1369,2.512,1447,2.848,1448,3.737]],["t/183",[162,1.243,168,1.611,170,0.845,171,0.91,172,2.968,181,3.429,183,0.776,194,2.316,195,1.214,199,2.56,200,4.728,203,2.88,204,1.978,213,2.213,214,2.968,226,1.713,231,1.543,244,1.595,250,1.297,256,1.832,269,0.874,284,1.577,293,1.848,297,2.681,300,1.806,308,2.047,313,2.245,323,2.316,331,2.168,350,1.01,374,7.062,436,2.438,467,2.125,472,1.758,473,1.671,476,1.395,485,1.382,509,1.408,512,2.471,513,1.713,520,2.316,580,2.213,593,1.832,602,2.681,612,2.374,616,2.916,678,3.077,948,2.916,949,2.316,1001,2.316,1086,4.319,1099,2.125,1115,2.438,1122,2.788,1272,2.438,1302,2.168,1373,3.077,1413,2.047,1443,2.916,1449,3.619,1450,3.619,1451,2.916,1452,3.619,1453,3.619,1454,2.374,1455,2.589,1456,3.292,1457,3.619,1458,3.619,1459,3.619,1460,3.619,1461,4.154,1462,3.619,1463,3.619,1464,4.768,1465,3.619,1466,3.619,1467,3.292,1468,3.619,1469,3.619,1470,3.619]],["t/185",[169,2.162,189,4.023,195,1.156,198,3.207,199,2.677,231,3.138,313,2.407,364,3.072,374,7.013,377,5.328,558,3.896,848,2.685,1471,9.25,1472,7.792]],["t/187",[168,1.057,169,2.475,170,1.62,171,1.428,179,1.889,188,1.551,191,2.345,195,0.892,199,1.065,204,2.011,216,1.72,218,1.787,221,2.551,223,2.25,225,2.25,233,1.042,236,1.918,237,2.422,253,1.088,262,1.105,264,0.876,276,1.903,293,1.213,302,1.857,313,1.857,324,1.621,335,2.011,337,1.978,344,2.045,345,1.621,350,1.936,357,3.104,374,6.967,380,2.3,382,1.659,383,1.445,413,2.16,417,1.156,418,1.432,438,1.432,457,2.834,472,1.787,473,1.699,476,2.189,483,1.519,485,2.168,513,1.741,518,2.531,520,3.221,521,2.632,544,1.392,572,2.203,600,2.478,602,2.725,604,2.011,612,2.413,660,3.634,663,2.119,738,1.741,879,2.551,973,2.834,1012,2.203,1040,2.725,1170,4.241,1390,3.634,1406,2.632,1473,2.551,1474,3.679,1475,3.347,1476,2.632,1477,3.679,1478,3.347,1479,3.679,1480,3.347,1481,3.679]],["t/189",[152,1.714,160,1.803,162,1.743,165,1.872,168,1.458,169,1.491,170,1.477,171,1.59,183,0.684,186,2.499,188,1.345,189,1.742,191,2.096,195,0.501,199,0.923,216,2.956,219,1.803,231,2.696,236,1.077,237,2.209,248,1.852,249,1.909,250,1.143,253,1.502,264,1.876,266,2.646,276,1.701,293,1.051,302,2.742,320,3.68,324,1.405,327,1.909,335,1.742,336,2.711,337,1.714,345,1.405,350,1.765,361,1.303,370,3.515,374,7.228,412,1.662,418,1.241,438,1.241,473,1.472,475,1.993,485,1.938,492,1.33,517,1.549,520,2.612,521,2.281,544,1.206,582,1.637,605,1.549,621,1.374,644,2.04,662,1.803,738,1.509,854,2.729,858,2.686,866,3.419,887,1.803,927,2.569,928,1.803,936,1.872,937,2.729,938,2.04,945,1.509,1272,2.148,1274,3.577,1275,2.9,1276,2.148,1373,5.377,1475,2.9,1482,3.188,1483,3.188,1484,3.188,1485,2.211,1486,2.362,1487,2.711]],["t/191",[257,2.708,264,2.157,357,4.948]],["t/193",[168,2.114,171,1.851,199,2.13,214,3.896,218,3.575,262,2.21,274,4.075,345,4.075,357,5.055,369,3.319,473,3.399,671,4.091,865,4.091,1012,4.408,1106,5.782,1218,4.164,1476,5.266]],["t/195",[169,1.612,170,1.282,171,1.38,184,1.934,188,2.315,195,1.196,199,2.532,203,2.819,216,2.566,264,1.308,274,3.356,281,3.223,293,1.809,313,3.09,320,2.566,357,4.163,364,2.291,366,3.357,368,3.162,369,2.475,376,3.513,377,3.162,406,3.927,434,1.985,435,4.228,457,5.868,473,2.535,483,2.267,544,2.077,552,2.905,618,3.513,711,3.288,719,4.388,763,3.927,890,3.927,1282,3.698,1283,3.223,1406,3.927,1488,6.93,1489,4.667,1490,4.667]],["t/197",[160,2.914,169,1.513,170,1.974,184,1.815,189,2.815,195,1.144,198,2.244,231,2.196,233,1.458,236,2.461,237,2.545,248,1.879,257,1.331,264,1.227,269,1.759,274,3.725,276,1.726,284,2.244,297,3.816,313,2.382,327,3.085,345,2.27,357,4.62,364,2.15,366,3.15,411,2.863,418,2.004,476,3.258,483,3.796,509,2.004,510,2.296,511,3.379,621,2.22,636,2.47,763,3.686,936,3.024,947,2.726,1068,3.686,1267,2.815,1302,3.085,1490,4.38,1491,5.152,1492,4.38,1493,5.152]],["t/199",[160,1.983,168,1.007,169,1.029,171,0.881,183,0.752,188,1.478,195,1.432,198,1.527,199,2.199,202,2.3,203,1.8,218,1.703,219,3.805,226,1.659,252,2.989,259,2.099,262,2.282,270,1.364,274,2.41,276,1.174,282,2.191,283,2.851,293,1.803,300,1.75,313,1.146,324,1.545,344,1.948,345,1.545,349,2.508,350,0.978,353,2.597,354,2.243,357,4.153,361,1.433,362,1.983,364,1.463,369,3.426,376,2.243,377,2.019,378,2.617,382,2.466,383,2.986,407,2.3,412,1.827,418,1.364,435,2.7,436,2.362,473,1.619,492,2.807,517,1.703,525,2.825,552,1.855,555,2.019,572,3.276,605,1.703,617,2.597,621,1.511,671,1.948,681,2.362,702,2.597,711,3.276,719,2.019,730,2.508,848,1.279,854,1.885,927,2.825,1005,2.98,1012,2.099,1068,2.508,1087,1.75,1106,2.191,1159,2.191,1225,1.855,1260,2.019,1272,2.362,1413,1.983,1454,2.3,1494,2.825,1495,3.506,1496,4.651,1497,2.7,1498,2.825,1499,3.506,1500,3.506,1501,3.506,1502,3.506]],["t/201",[152,3.971,162,0.966,163,0.826,164,2.559,168,0.808,170,0.657,171,0.707,175,1.72,176,1.95,179,2.356,183,0.984,187,4.174,195,1.16,199,0.814,213,3.554,215,1.466,219,1.591,223,1.72,231,2.477,233,0.796,236,1.55,237,1.603,248,2.12,249,1.685,250,3.123,251,1.564,253,1.984,257,1.186,264,1.76,267,1.95,281,1.652,288,1.445,302,1.5,327,1.685,328,1.591,342,2.823,345,2.022,350,0.785,357,4.567,361,1.15,364,1.174,411,1.564,417,1.442,422,0.942,432,2.936,454,2.686,457,2.167,468,2.392,479,1.385,485,1.074,503,1.838,509,1.095,510,1.254,520,1.895,535,2.392,606,1.537,609,2.559,612,3.01,656,1.845,667,1.489,671,1.564,674,3.01,783,1.591,858,2.429,859,2.267,866,1.895,868,2.392,887,1.591,904,1.404,936,1.652,937,1.513,938,1.8,971,2.167,1087,2.29,1107,1.95,1180,2.267,1189,2.013,1267,1.537,1327,2.643,1451,2.267,1503,2.813,1504,2.559,1505,1.95,1506,2.392,1507,2.392,1508,2.392,1509,2.869,1510,2.392,1511,6.099,1512,2.559,1513,2.559,1514,2.813,1515,2.559,1516,2.084,1517,1.685,1518,2.392,1519,2.813,1520,2.813,1521,2.813,1522,3.698,1523,5.812,1524,4.683,1525,2.559]],["t/203",[176,4.739,183,1.466,253,2.022,274,3.011,337,3.675,344,3.799,356,6.218,357,5.34,485,2.61,667,3.617,671,3.799,735,5.063,783,3.866,885,3.844,1455,4.89,1505,4.739,1506,5.811,1507,5.811,1508,5.811,1509,4.273,1510,5.811,1526,8.026,1527,6.835,1528,3.411]],["t/205",[152,2.353,163,1.285,179,3.325,183,1.389,188,1.846,195,1.017,198,1.907,218,2.126,248,2.362,250,2.321,253,1.295,257,1.991,264,1.836,274,1.928,280,2.098,288,2.248,290,3.371,327,2.621,335,2.392,342,2.126,357,4.969,359,4.44,364,1.826,485,2.943,503,1.753,509,1.703,510,1.95,598,2.621,648,3.371,657,2.316,667,2.316,671,2.433,674,5.056,713,3.242,738,4.303,783,2.476,858,2.316,936,2.57,937,2.353,1053,3.242,1260,2.521,1476,3.131,1505,3.034,1506,3.721,1507,3.721,1508,3.721,1509,4.048,1510,3.721,1511,7.749,1517,4.616,1528,2.184,1529,4.377,1530,3.131,1531,3.981]],["t/207",[163,1.457,168,1.426,169,0.91,170,0.724,171,1.248,183,2.051,189,1.694,195,0.779,199,2.053,201,2.296,202,2.033,203,3.187,231,1.321,233,0.878,250,1.111,253,1.836,257,0.801,264,0.738,266,2.587,268,1.431,270,1.931,274,1.366,287,1.547,302,2.708,313,2.029,320,2.901,345,1.366,350,1.385,354,1.983,357,5.094,361,1.267,378,1.206,383,1.218,417,1.56,422,1.038,457,5.466,464,2.149,473,1.431,475,1.938,492,1.293,512,1.366,513,2.349,552,1.641,555,1.786,579,2.033,604,1.694,606,1.694,621,2.674,636,1.486,656,2.033,739,2.033,766,2.296,807,4.747,854,1.667,858,2.627,897,3.551,1097,1.82,1154,2.033,1247,4.22,1276,2.088,1324,2.388,1369,3.035,1489,5.277,1490,2.635,1509,1.938,1528,4.341,1532,4.22,1533,3.823,1534,4.22,1535,4.963,1536,3.1,1537,3.1,1538,3.1,1539,3.1,1540,3.1,1541,3.1,1542,3.1,1543,3.1,1544,2.82]],["t/209",[257,2.278,262,2.647,264,2.1,371,4.9,381,4.462,483,3.64]],["t/211",[168,1.906,170,1.55,171,1.137,183,1.424,184,1.594,188,1.908,195,0.71,199,1.309,201,3.352,203,2.323,204,2.473,236,2.242,237,2.318,241,2.089,262,1.359,264,1.078,269,1.092,276,1.516,288,2.323,293,1.491,302,2.57,305,2.433,313,2.57,371,4.811,375,2.895,377,2.606,378,1.761,382,2.04,383,3.087,417,1.422,434,1.636,476,3.029,483,1.869,492,1.888,509,1.761,510,2.016,511,2.968,512,1.993,513,3.141,517,2.198,518,3.502,519,2.473,544,2.51,579,2.968,583,2.227,593,2.29,604,2.473,622,2.29,636,2.169,885,1.971,904,2.258,1099,2.656,1368,3.137,1390,2.895,1545,4.525,1546,4.525,1547,3.237,1548,3.847]],["t/213",[162,1.691,168,1.414,170,1.15,183,1.056,186,2.424,195,1.295,204,4.507,233,1.998,236,2.383,237,2.464,239,1.953,248,1.796,253,1.457,257,1.823,262,1.479,269,1.991,276,1.65,299,3.523,370,3.922,371,5.298,372,4.186,373,8.192,378,1.916,383,1.934,438,1.916,476,3.471,478,2.949,479,2.424,483,2.034,520,2.034,621,3.04,862,3.317,885,2.145,937,2.648,1218,2.785,1390,5.276,1549,4.186,1550,7.056,1551,4.186,1552,4.186,1553,4.924]],["t/215",[162,2.246,168,1.274,171,1.115,172,2.347,175,2.712,191,1.831,195,1.346,225,2.712,241,2.048,242,2.277,250,1.589,252,2.424,268,2.048,282,2.772,323,2.838,345,1.954,350,1.238,371,2.465,382,2.948,413,2.604,422,1.486,512,1.954,520,2.7,545,2.914,552,2.347,593,2.245,605,2.154,665,2.772,848,1.618,850,2.555,865,3.634,874,5.335,904,2.213,929,3.285,1154,2.909,1189,3.173,1260,2.555,1294,3.416,1327,4.938,1361,3.285,1429,3.075,1430,5.754,1431,3.77,1432,3.77,1433,3.77,1434,3.77,1435,3.77,1436,3.77,1437,5.269,1438,3.77,1439,3.416,1440,5.559,1441,3.173,1554,4.435,1555,4.435,1556,4.435,1557,3.77]],["t/217",[170,1.549,189,3.624,223,4.055,284,2.889,342,3.221,350,2.688,351,6.695,369,2.99,371,4.809,492,2.767,552,3.51,557,4.35,604,3.624,854,4.652,861,4.146,862,4.467,980,5.638,1115,4.467,1408,5.999,1558,5.638,1559,6.632,1560,6.632,1561,6.632]],["t/219",[257,2.278,262,2.647,264,2.1,381,4.462,483,3.64,550,4.527]],["t/221",[184,2.809,214,5.144,324,3.514,361,3.26,545,3.554,550,4.095,605,3.874,672,5.907,848,2.909,1327,4.594,1562,6.22,1563,6.78]],["t/224",[168,1.468,170,1.194,171,1.285,183,1.965,236,1.727,237,1.786,262,1.535,269,1.234,276,1.713,288,2.625,293,1.685,305,2.749,324,3.709,378,3.276,383,3.945,413,3.001,438,3.276,446,4.65,503,2.903,517,2.483,518,4.309,519,3.961,550,3.721,553,3.353,576,3.196,667,2.706,837,4.119,860,2.945,885,2.227,967,4.119,1010,4.637,1178,3.787,1517,3.062,1562,5.387,1564,6.161,1565,5.112]],["t/226",[156,4.13,169,0.988,171,0.846,181,2.058,183,1.734,195,0.528,198,1.466,218,1.634,226,3.517,231,1.434,244,1.482,247,3.061,268,1.554,276,1.127,280,3.141,292,4.08,304,2.711,322,2.592,324,2.334,354,2.153,361,1.375,391,2.592,407,2.207,417,1.665,503,1.348,537,5.782,544,1.273,545,1.499,550,4.15,553,5.301,555,1.938,582,1.728,587,3.311,599,2.103,646,1.87,657,1.781,658,1.87,670,2.153,697,3.061,944,2.492,945,3.825,1035,2.711,1040,2.492,1087,1.679,1099,1.975,1137,2.711,1149,4.503,1189,2.407,1218,1.903,1327,1.938,1362,2.407,1443,2.711,1451,2.711,1562,4.192,1566,2.333,1567,3.061,1568,5.954,1569,6.76,1570,5.297,1571,4.503,1572,3.365,1573,3.061,1574,3.061,1575,2.861,1576,3.365,1577,3.061,1578,3.061,1579,3.061,1580,3.365,1581,3.061,1582,3.365,1583,3.365,1584,3.061,1585,3.061,1586,3.365,1587,2.407,1588,3.061,1589,3.365,1590,3.061,1591,3.061,1592,2.861]],["t/228",[123,1.916,153,1.866,163,1.429,169,1.096,170,0.513,171,1.224,172,3.406,180,2.388,181,2.977,183,1.501,184,1.715,190,1.22,195,1.234,215,2.993,231,0.936,233,1.057,241,3.783,244,0.967,252,1.2,262,0.659,264,0.523,269,0.901,272,1.44,280,1.052,284,1.626,290,1.691,292,1.691,308,1.242,310,1.242,318,1.769,335,1.2,361,0.897,382,0.99,383,0.862,390,1.479,391,1.691,399,1.44,413,1.289,437,1.479,438,1.452,509,0.854,510,0.978,518,0.978,537,3.868,544,1.412,545,3.502,549,1.242,550,3.833,553,3.766,555,2.15,587,1.372,591,1.289,593,1.111,656,1.44,659,4.253,660,3.115,673,3.375,766,1.626,853,3.935,857,1.769,909,1.691,945,1.766,947,1.162,1035,1.769,1075,1.57,1116,1.997,1122,2.874,1137,3.007,1189,2.67,1259,1.626,1308,3.854,1327,2.804,1330,1.866,1362,2.67,1487,1.866,1528,1.862,1547,1.57,1562,1.405,1566,2.587,1568,4.776,1569,3.395,1571,4.138,1573,1.997,1574,4.428,1575,4.881,1579,1.997,1581,1.997,1584,1.997,1585,3.395,1588,3.395,1593,5.742,1594,3.922,1595,2.195,1596,3.395,1597,1.866,1598,2.195,1599,3.732,1600,2.195,1601,1.997,1602,2.195,1603,2.195,1604,2.195,1605,2.195,1606,2.195,1607,2.195,1608,2.195,1609,2.764,1610,3.395,1611,2.195,1612,2.195,1613,2.195,1614,2.195,1615,1.997,1616,2.195,1617,2.195,1618,2.195,1619,2.195,1620,2.195,1621,2.195,1622,3.732,1623,1.22,1624,2.195]],["t/231",[184,2.809,214,5.144,324,3.514,361,3.26,545,3.554,550,4.095,605,3.874,672,5.907,848,2.909,1327,4.594,1562,6.22,1563,6.78]],["t/233",[168,1.85,169,1.891,170,1.982,171,2.134,180,4.121,195,1.011,199,2.456,216,3.967,233,2.403,269,2.049,303,4.026,517,3.128,544,2.436,550,3.307,556,4.801,587,4.026,848,2.349,934,4.091,935,4.738,1362,4.608,1547,4.608,1594,5.189,1625,5.859]],["t/235",[233,2.285,248,2.944,269,1.949,276,2.704,287,4.028,454,3.234,640,5.437,738,3.82,848,2.944,934,3.478,1110,4.834,1302,4.834,1626,6.503]],["t/237",[163,2.94,181,6.122,330,5.493,582,4.3,935,4.179,1115,5.641,1117,5.121,1627,7.618]],["t/239",[248,3.348,269,2.216,1110,5.497]],["t/242",[156,4.231,170,1.778,294,5.128,300,5.36,361,3.112,658,4.231,904,3.799,934,3.28,935,3.799,1369,4.655,1566,5.278,1628,6.472,1629,5.639,1630,6.134,1631,5.639]],["t/244",[233,2.258,248,2.909,269,1.925,276,2.672,287,3.98,454,3.195,640,5.372,738,3.775,934,4.188,1110,4.776,1302,4.776,1447,5.529,1626,6.426]],["t/246",[163,2.37,454,3.234,550,5.028,848,3.572,934,3.478,935,4.028,936,4.739,1075,5.774,1302,4.834,1547,5.774,1632,7.342]],["t/248",[248,3.395,269,2.247]],["t/250",[248,3.174,300,4.342,332,6.032,333,5.567,454,3.486,898,6.032,935,4.342]],["t/252",[156,4.091,300,5.296,310,4.164,361,3.009,411,4.091,454,2.949,555,4.24,658,4.091,904,3.674,935,3.674,1302,4.408,1566,5.104,1623,4.091,1628,7.864,1629,5.453,1630,5.931,1631,5.453]],["t/254",[199,2.062,214,3.771,248,2.599,269,1.72,270,2.773,299,6.483,392,3.771,412,3.714,503,2.855,545,3.176,550,3.659,848,2.599,907,5.098,934,3.071,935,3.556,1185,5.489,1327,5.22,1509,4.455,1562,5.799,1633,6.482,1634,5.742]],["t/257",[233,2.693,248,2.809,269,1.859,276,2.579,287,3.843,437,5.187,454,3.085,520,3.18,640,5.187,738,3.644,947,5.035,1110,4.611,1626,6.204,1635,5.931,1636,5.187]],["t/259",[248,3.348,269,2.216,1110,5.497]],["t/261",[163,1.928,181,4.016,183,1.409,248,2.395,269,2.075,316,2.74,397,3.855,437,4.424,454,2.631,520,2.712,547,3.714,738,3.108,740,3.855,825,4.698,907,4.698,947,4.55,1097,3.855,1110,3.933,1302,5.74,1441,4.698,1472,5.058,1635,5.058,1636,4.424,1637,5.974,1638,6.567,1639,4.864,1640,6.567,1641,4.105]],["t/263",[169,1.695,170,1.348,172,3.056,195,0.907,198,2.515,236,1.95,237,2.017,258,2.922,269,1.394,280,2.768,293,1.903,308,3.266,324,2.544,383,2.268,438,3.069,503,2.313,509,2.247,510,2.573,518,2.573,519,3.155,537,6.504,550,2.965,553,3.787,555,3.326,575,4.909,599,3.609,605,2.804,646,3.209,685,4.277,926,3.787,945,3.733,1008,4.447,1530,4.131,1562,3.694,1568,6.178,1634,4.652,1642,5.774,1643,5.774]],["t/265",[233,2.285,248,2.944,276,2.704,287,4.028,454,3.234,640,5.437,738,3.82,934,4.219,935,4.028,1110,4.834,1447,5.596,1626,6.503]],["t/267",[248,3.094,332,5.879,333,5.426,412,4.42,454,3.397,887,4.797,898,5.879,1302,5.079,1530,6.067]],["t/269",[248,2.777,264,1.813,503,3.784,509,2.962,550,3.909,591,4.469,885,3.316,907,5.446,934,3.28,935,3.799,1447,5.278,1530,6.758,1562,4.871,1594,6.134,1633,6.925,1644,7.612]],["t/271",[225,5.121,550,5.141,573,5.015,904,4.996,1282,5.641,1283,4.916,1645,5.806,1646,7.618]],["t/274",[184,2.809,214,5.144,324,3.514,361,3.26,545,3.554,550,4.095,605,3.874,672,5.907,848,2.909,1327,4.594,1562,6.22,1563,6.78]],["t/276",[168,1.85,169,1.891,170,1.982,171,2.134,180,4.121,195,1.011,199,2.456,216,3.967,233,2.403,269,2.049,303,4.026,517,3.128,544,2.436,550,3.307,556,4.801,587,4.026,848,2.349,934,4.091,935,4.738,1362,4.608,1547,4.608,1594,5.189,1625,5.859]],["t/278",[550,5.028,606,4.411,621,4.219,848,2.944,934,4.219,935,4.028,1627,7.342,1647,6.265,1648,8.071]],["t/280",[156,4.231,170,1.778,294,5.128,300,5.36,361,3.112,658,4.231,904,3.799,934,3.28,935,3.799,1369,4.655,1566,5.278,1628,6.472,1629,5.639,1630,6.134,1631,5.639]],["t/282",[163,1.91,183,2.173,195,1.341,199,2.472,214,3.442,231,2.772,233,1.841,250,2.331,411,3.614,509,2.53,510,2.898,545,2.898,550,3.339,644,4.161,667,3.442,848,2.372,934,3.681,935,3.245,1306,5.754,1530,4.652,1562,5.466,1649,5.916,1650,5.916,1651,5.916,1652,5.916]],["t/284",[163,3.067,233,1.996,253,2.086,269,2.173,397,4.14,437,4.75,520,2.912,947,5.712,1306,7.037,1635,5.431,1636,6.064,1653,7.051,1654,7.051]],["t/286",[163,1.928,183,2.181,199,1.9,226,3.108,231,2.799,233,1.859,250,2.354,264,1.564,270,2.555,350,1.833,426,5.291,550,3.372,555,3.783,644,4.202,657,3.475,667,3.475,934,2.83,1306,6.85,1445,5.974,1530,4.698,1562,5.501,1649,5.974,1650,5.974,1651,5.974,1652,5.974]],["t/288",[225,5.121,550,5.141,573,5.015,904,4.996,1282,5.641,1283,4.916,1645,5.806,1646,7.618]],["t/290",[257,2.248,262,2.613,264,2.073,368,5.012,369,3.923,381,4.404,483,3.593]],["t/292",[168,1.577,170,1.779,183,1.634,184,1.934,195,0.862,204,3,236,2.573,237,2.661,262,1.648,264,1.308,269,1.325,276,1.839,288,2.819,305,2.952,368,4.388,369,3.435,370,4.234,378,2.136,382,2.475,383,3.437,438,2.136,476,2.936,483,2.267,492,2.291,509,2.136,510,2.446,511,3.601,512,2.419,517,2.666,518,3.899,519,3,579,3.601,622,2.779,885,2.392,1099,3.223,1368,3.806,1390,3.513,1454,3.601,1655,5.49,1656,5.49]],["t/294",[162,1.812,171,1.326,172,1.772,175,2.047,179,1.719,186,1.648,189,1.83,191,1.383,195,1.345,213,2.047,225,3.226,231,1.427,236,1.131,237,1.169,241,3.723,242,2.709,244,2.325,248,1.221,250,1.2,252,1.83,268,1.546,282,2.093,298,1.928,323,2.142,350,0.934,361,1.368,368,3.761,369,1.509,380,2.093,382,2.379,413,1.965,418,2.053,423,4.064,503,1.341,520,2.697,545,3.302,552,1.772,593,2.671,605,1.626,621,1.443,645,1.861,665,3.298,848,2.941,850,1.928,865,4.481,874,5.135,904,1.671,907,2.395,951,2.196,1154,2.196,1189,2.395,1259,2.48,1260,1.928,1267,2.883,1294,2.579,1327,4.269,1361,2.48,1430,4.837,1431,2.846,1432,2.846,1433,2.846,1434,2.846,1435,2.846,1436,2.846,1437,4.251,1438,2.846,1439,2.579,1440,4.486,1441,2.395,1454,2.196,1568,2.142,1657,3.348,1658,2.846,1659,2.48,1660,3.045,1661,4.486]],["t/296",[264,2.403,357,5.512,368,4.885,369,3.823,370,4.713,417,2.666,418,3.3]],["t/298",[169,2.623,197,5.584,345,3.936,1662,6.881,1663,8.126]],["t/300",[162,2.489,183,1.965,191,2.993,193,3.353,197,3.196,231,2.179,242,2.625,250,1.832,269,1.234,350,1.427,352,2.892,412,3.777,509,1.989,510,2.278,621,2.203,858,2.706,936,3.001,945,2.42,974,2.794,1302,3.062,1446,4.119,1498,4.119,1516,3.787,1623,4.028,1641,3.196,1663,7.659,1664,5.112,1665,5.112,1666,3.938,1667,5.112,1668,3.938,1669,5.112,1670,5.112,1671,9.159,1672,5.112,1673,5.112,1674,5.112,1675,5.112,1676,4.346,1677,5.112,1678,5.112,1679,4.65,1680,4.65,1681,5.112,1682,5.112,1683,5.112,1684,5.112,1685,4.65,1686,4.119]],["t/302",[257,2.248,262,2.613,264,2.073,329,3.486,367,4.342,381,4.404,483,3.593]],["t/304",[170,1.841,186,3.88,233,2.231,256,4.884,267,5.464,329,3.157,345,3.472,367,4.816,483,3.255,660,5.043,1273,6.35,1429,5.464,1687,7.881]],["t/306",[162,2.054,163,1.756,167,4.43,182,4.606,184,2.846,195,0.939,198,2.605,248,2.181,257,1.545,264,1.924,298,3.445,329,4.099,359,4.653,367,5.262,502,2.665,503,2.396,573,3.581,583,2.944,606,3.268,937,3.215,1094,6.222,1256,5.984,1273,4.818,1528,2.984,1688,5.44,1689,4.838,1690,5.084]],["t/308",[163,1.435,168,1.404,169,2.061,183,1.506,186,2.406,233,1.384,236,2.371,237,3.32,242,2.51,248,2.996,249,2.927,264,1.165,269,1.695,271,3.621,276,1.638,316,2.04,329,3.596,331,4.204,337,2.628,342,2.374,350,1.364,351,3.206,367,4.744,390,3.293,412,3.658,417,2.206,547,2.765,582,2.51,583,3.455,596,3.939,783,2.765,936,4.121,947,5.394,1115,3.293,1341,3.497,1637,4.447,1691,3.939,1692,3.939,1693,4.447]],["t/310",[168,2.347,195,1.283,257,2.111,264,1.946,329,3.273,345,4.346,364,3.409,367,5.288,412,4.258]],["t/312",[162,1.608,163,1.375,183,2.005,191,2.81,195,1.069,199,1.355,214,2.478,233,1.326,258,4.057,264,1.116,310,3.849,311,2.405,313,2.225,329,3.211,345,2.063,350,1.307,353,3.469,364,1.954,367,5.144,411,2.603,422,2.28,473,2.162,559,2.405,657,2.478,739,3.072,945,2.217,1080,3.072,1259,3.469,1409,4.26,1413,2.649,1497,3.607,1509,2.927,1517,2.805,1694,4.683,1695,5.483,1696,4.683,1697,4.26,1698,4.26,1699,3.981,1700,3.981,1701,4.26,1702,4.26,1703,4.26,1704,4.26,1705,4.26,1706,3.981,1707,4.26,1708,4.26,1709,3.773,1710,4.26,1711,4.26,1712,3.981]],["t/314",[156,1.755,160,1.786,163,0.927,168,1.447,183,1.681,184,1.113,191,2.08,195,0.987,216,1.476,236,1.067,237,1.103,239,1.999,248,1.152,249,1.891,260,1.755,262,1.887,293,1.661,310,2.85,311,1.622,313,1.033,329,2.518,334,2.26,345,1.391,350,1.754,351,3.305,367,4.988,411,1.755,412,1.646,417,1.584,422,1.688,454,1.265,466,2.545,467,2.958,476,1.217,483,1.304,513,1.495,517,1.534,538,2.339,555,1.819,559,4.024,637,2.433,708,2.433,738,1.495,739,2.072,740,1.854,825,2.26,841,2.26,972,2.433,1010,2.021,1192,4.283,1267,1.726,1383,2.685,1509,1.974,1517,1.891,1591,2.873,1641,1.974,1695,2.545,1697,2.873,1698,2.873,1699,2.685,1700,2.685,1701,2.873,1702,2.873,1703,2.873,1704,2.873,1705,2.873,1706,2.685,1707,2.873,1708,2.873,1709,2.545,1710,2.873,1712,2.685,1713,7.172,1714,2.685,1715,2.26,1716,3.158,1717,3.158,1718,2.685,1719,3.158,1720,3.158,1721,2.873,1722,2.873,1723,2.685,1724,2.873,1725,7.836,1726,5.038,1727,3.158,1728,4.583,1729,2.339,1730,5.038,1731,3.158,1732,3.158,1733,4.583,1734,2.545]],["t/316",[189,4.358,199,2.308,216,3.728,367,3.98,377,4.594,378,3.103,422,2.672,440,5.907,473,3.682,512,3.514,559,4.095,929,5.907,1735,7.975,1736,7.975]],["t/318",[163,2.115,236,2.433,237,2.516,239,2.857,253,2.131,256,3.646,264,1.716,329,4.218,367,4.554,407,4.725,486,4.149,502,3.21,503,2.886,974,3.936,1266,5.335,1516,5.335,1737,9.124,1738,7.203]],["t/320",[574,4.85,1739,9.306]],["t/323",[160,3.413,216,2.82,257,1.559,260,3.354,264,1.437,293,1.989,298,4.681,303,3.772,316,3.391,334,4.317,428,5.125,432,3.861,580,4.97,645,3.354,658,3.354,762,4.317,904,3.011,1278,5.08,1442,4.648,1715,4.317,1740,6.034,1741,7.393,1742,5.489,1743,6.034,1744,7.393,1745,5.489,1746,5.489,1747,6.034,1748,6.034,1749,5.489,1750,5.489,1751,6.034]],["t/325",[169,2.007,239,2.711,257,1.766,287,3.411,302,2.885,303,4.273,306,2.823,316,2.852,409,4.18,428,4.599,546,3.799,620,4.18,853,4.18,890,4.89,922,5.507,929,6.536,1358,5.507,1372,8.026,1532,5.811,1551,5.811,1752,6.218,1753,6.835,1754,6.835,1755,6.835]],["t/327",[202,5.299,239,2.372,256,3.027,257,2.087,260,3.324,264,1.924,287,4.565,345,2.635,353,4.43,396,4.278,424,6.792,428,4.768,432,3.826,544,2.262,621,2.577,650,4.818,659,4.43,690,4.278,719,3.445,890,4.278,922,4.818,1068,4.278,1267,3.268,1641,3.738,1715,4.278,1752,5.44,1756,5.98,1757,5.98,1758,5.98,1759,5.98,1760,5.44,1761,5.98]],["t/329",[170,2.086,257,2.687,264,2.128,1762,8.126]],["t/331",[160,1.803,163,1.857,168,0.916,170,1.185,171,1.276,183,1.547,184,1.123,186,1.57,192,1.662,195,0.797,199,1.469,203,1.637,204,1.742,218,1.549,236,1.715,237,2.209,257,0.824,262,1.524,264,1.507,268,1.472,269,1.527,274,1.405,276,1.068,288,1.637,305,1.714,313,2.068,320,1.49,330,2.091,364,3.67,367,1.591,375,2.04,377,1.837,378,2.461,381,1.614,382,1.437,383,2.484,407,2.091,422,1.068,434,1.153,438,1.975,470,2.281,472,1.549,476,2.437,483,1.317,492,1.33,513,1.509,517,1.549,518,1.421,519,1.742,552,1.687,558,2.686,559,3.248,582,2.607,604,2.774,613,1.454,636,1.529,784,2.211,807,1.95,856,2.211,861,1.993,885,1.389,908,2.091,944,2.362,974,3.456,1091,2.091,1117,3.104,1225,1.687,1382,2.569,1389,3.76,1390,2.04,1393,2.569,1394,2.211,1396,2.362,1405,2.711,1407,2.711,1410,2.711,1411,3.91,1442,2.456,1488,2.9,1763,2.9,1764,2.9,1765,2.9,1766,2.569,1767,2.9,1768,2.9,1769,2.9,1770,2.456,1771,2.9,1772,2.9,1773,2.569,1774,2.9,1775,4.316,1776,4.316,1777,2.9,1778,2.9,1779,2.9]],["t/333",[163,2.526,168,1.105,169,1.13,170,0.899,181,2.353,183,0.826,195,1.254,233,1.09,236,1.986,237,2.491,241,2.714,257,2.063,264,0.917,269,0.929,272,2.524,310,2.177,313,1.922,364,2.976,378,2.775,383,1.512,412,2.006,434,1.391,438,2.287,473,3.687,476,2.749,483,1.589,549,2.177,558,2.037,559,3.019,613,2.682,622,3.61,623,2.668,636,1.845,667,4.226,735,2.851,784,2.668,974,3.213,1010,3.762,1117,4.883,1178,2.851,1180,3.101,1218,2.177,1362,2.753,1368,4.076,1389,2.851,1393,3.101,1394,2.668,1395,2.964,1557,3.272,1587,5.713,1780,4.459,1781,3.848,1782,3.501,1783,6.789,1784,3.272,1785,3.272,1786,3.501,1787,3.848]],["t/335",[163,2.326,170,1.85,171,1.153,179,2.355,188,1.934,195,0.72,218,2.228,239,1.82,244,2.021,257,1.185,265,2.642,270,1.785,284,1.998,293,2.209,300,3.345,310,2.594,313,1.5,329,1.838,345,2.021,389,3.398,433,3.18,434,1.658,473,2.118,512,2.021,514,3.935,544,1.735,557,3.009,570,2.549,613,2.092,849,2.747,885,1.998,908,3.009,945,2.171,974,3.663,1091,4.397,1218,2.594,1274,2.594,1309,2.867,1369,4.099,1377,6.101,1528,2.289,1587,5.667,1689,2.747,1722,4.172,1780,4.19,1784,3.9,1788,4.587,1789,4.587,1790,6.703,1791,5.401,1792,4.172,1793,4.587,1794,4.172,1795,3.282,1796,4.587,1797,4.172,1798,3.009,1799,4.172,1800,4.172]],["t/337",[162,2.587,163,1.586,179,2.773,195,0.848,257,2.241,262,1.622,342,2.623,361,2.208,378,2.931,383,3.407,393,4.352,485,2.876,492,3.143,518,3.357,613,2.464,639,4.352,642,3.543,655,4.352,671,4.821,686,4.352,784,3.745,928,3.055,1091,3.543,1159,3.376,1271,3.864,1274,3.055,1279,3.638,1309,3.376,1517,3.235,1528,2.695,1566,3.745,1689,3.235,1780,4.709,1785,4.592,1798,3.543,1801,5.58,1802,5.401,1803,4.913,1804,4.913,1805,3.864,1806,4.913]],["t/339",[163,2.261,241,4.766,257,1.989,382,3.471,1091,5.051,1308,5.697,1309,4.813,1528,3.843,1689,4.611,1780,5.947,1798,5.051,1805,5.509,1807,7.004]],["t/341",[163,1.668,188,1.551,195,1.089,218,1.787,235,2.834,239,1.459,257,1.467,264,2.122,270,1.432,280,1.764,284,2.474,293,1.213,300,1.836,302,1.203,313,1.857,316,1.535,335,2.011,342,1.787,364,2.37,378,1.432,383,1.445,389,2.725,406,2.632,434,1.33,454,1.474,474,2.632,486,2.119,503,1.474,509,1.432,510,1.64,518,1.64,519,2.011,544,1.392,552,1.947,558,1.947,559,1.889,606,3.104,613,2.591,646,2.045,657,1.947,685,2.725,885,1.603,904,1.836,928,2.081,937,1.978,1075,2.632,1087,1.836,1091,3.725,1096,2.632,1137,2.964,1159,2.3,1160,2.834,1170,4.241,1218,2.081,1274,2.081,1283,2.16,1292,2.964,1309,2.3,1340,2.551,1348,2.964,1369,3.473,1447,2.551,1528,2.834,1587,2.632,1689,2.203,1780,4.875,1798,2.413,1805,2.632,1808,5.138,1809,3.679,1810,5.166,1811,7.094,1812,3.679,1813,3.679,1814,3.679,1815,3.679,1816,3.679,1817,3.128,1818,3.347,1819,3.347,1820,3.347,1821,3.347,1822,3.347,1823,3.128]],["t/343",[123,3.756,163,1.177,168,1.742,183,1.301,184,1.412,195,0.952,226,1.898,239,1.591,250,2.174,252,2.191,257,1.036,264,1.743,268,1.851,269,0.968,273,2.869,298,2.31,302,3.128,313,1.983,320,1.874,324,1.767,345,1.767,350,1.693,361,1.639,434,2.193,438,2.846,454,1.606,472,1.948,473,2.8,475,3.791,476,1.545,485,2.793,547,3.431,558,2.122,559,2.059,572,2.401,583,1.974,596,3.231,613,3.337,618,2.566,630,2.63,646,2.229,684,2.701,891,3.409,927,3.231,940,3.231,943,2.78,945,1.898,975,3.409,983,5.156,1040,2.97,1274,3.431,1276,4.086,1528,2.001,1639,2.97,1689,2.401,1780,3.791,1798,2.63,1808,4.493,1823,3.409,1824,3.409,1825,4.01,1826,4.01,1827,3.648,1828,3.409,1829,3.648,1830,3.231]],["t/345",[163,2.986,195,1.031,204,3.589,241,3.969,257,1.697,258,3.324,268,3.032,302,2.811,345,2.893,613,2.996,622,3.324,667,4.55,945,3.108,1080,4.307,1154,4.307,1279,4.424,1413,3.714,1419,4.553,1780,4.105,1831,5.974,1832,5.974,1833,5.974,1834,5.974,1835,7.82,1836,5.974]],["t/347",[163,2.591,171,1.718,183,1.466,262,2.649,269,1.65,274,3.011,302,2.235,364,2.852,433,4.739,518,3.932,613,3.118,642,4.483,784,6.117,974,4.822,1274,3.866,1291,5.507,1780,4.273,1801,5.063,1837,6.835,1838,6.796,1839,5.265,1840,5.265]],["t/349",[257,1.881,307,4.776,313,2.381,350,2.564,364,3.038,472,4.462,473,3.362,513,3.446,558,3.853,613,3.322,646,4.047,748,5.608,1053,5.393,1390,4.659,1666,5.608,1841,7.81,1842,6.623,1843,6.623,1844,6.19]],["t/351",[163,2.58,193,3.631,195,0.869,233,2.169,236,1.87,237,1.933,257,1.98,264,1.319,269,1.336,302,1.81,316,3.197,364,3.667,486,3.188,547,4.97,583,2.725,630,3.631,943,6.575,1091,3.631,1118,4.263,1170,3.385,1274,3.131,1309,3.46,1341,3.96,1528,3.824,1639,5.675,1689,3.315,1780,5.929,1798,3.631,1805,3.96,1808,4.1,1830,4.46,1845,5.535,1846,4.706,1847,5.035]],["t/353",[163,2.372,183,1.283,231,2.549,242,3.071,257,1.545,316,3.371,409,3.657,476,2.305,520,2.47,547,3.383,582,3.071,613,2.728,636,2.867,748,4.606,802,5.084,885,2.605,943,4.146,947,4.275,1091,5.299,1097,3.511,1274,3.383,1309,3.738,1528,2.984,1635,4.606,1636,4.028,1639,4.43,1689,3.581,1780,6.124,1798,3.923,1805,4.278,1846,5.084,1848,5.44,1849,5.98,1850,4.818,1851,4.606]],["t/355",[162,0.855,163,2.794,168,0.715,169,0.406,170,0.581,171,0.348,179,0.71,183,1.335,186,0.681,192,0.721,195,0.651,199,0.72,203,0.71,204,0.756,233,0.705,236,1.147,237,1.672,241,2.21,248,0.505,250,0.496,257,0.877,262,1.246,264,0.593,265,0.797,268,0.639,269,0.334,272,0.907,274,0.609,276,0.834,288,0.71,299,0.99,302,0.452,305,0.744,313,1.744,316,1.039,320,0.646,330,0.907,335,0.756,342,0.672,354,0.885,364,3.007,367,0.69,375,0.885,377,0.797,378,2.26,381,0.7,382,0.624,383,1.63,392,0.732,409,0.846,422,0.463,434,0.9,438,0.538,454,0.554,470,0.99,472,0.672,473,1.916,475,0.865,476,1.599,483,0.571,486,1.434,492,0.577,513,0.655,517,0.672,518,1.109,519,0.756,520,0.571,547,2.347,552,0.732,558,2.534,559,2.983,582,1.278,604,1.361,618,0.885,621,1.463,622,0.7,623,0.959,636,1.193,639,1.114,667,2.534,735,1.025,748,1.918,784,2.354,802,1.176,807,0.846,861,0.865,885,1.085,908,1.633,937,1.339,940,1.114,943,3.319,947,0.732,974,0.756,1010,1.593,1053,1.025,1091,4.535,1096,0.99,1117,2.927,1154,0.907,1170,2.537,1178,1.025,1185,1.065,1218,0.782,1225,0.732,1274,3.017,1276,0.932,1308,0.828,1309,3.335,1369,0.846,1377,1.065,1382,1.114,1389,2.515,1390,0.885,1393,2.006,1394,1.726,1395,1.065,1396,1.025,1405,1.176,1407,1.176,1410,1.176,1411,1.918,1419,0.959,1442,1.065,1528,3.288,1557,1.176,1587,3.816,1635,1.065,1636,0.932,1639,3.073,1666,1.065,1689,3.478,1763,1.258,1764,1.258,1765,1.258,1766,1.114,1767,1.258,1768,1.258,1769,1.258,1770,1.065,1771,1.258,1772,1.258,1773,1.114,1774,1.258,1775,2.117,1776,2.117,1777,1.258,1778,1.258,1779,1.258,1780,5.705,1783,3.527,1784,2.117,1785,2.117,1786,1.258,1797,1.258,1798,3.81,1799,1.258,1800,1.258,1801,1.844,1805,3.425,1806,1.258,1807,1.258,1808,3.073,1810,1.258,1811,1.258,1817,1.176,1818,1.258,1819,1.258,1820,1.258,1821,1.258,1822,1.258,1823,2.117,1828,1.176,1829,1.258,1830,2.006,1831,1.258,1832,1.258,1833,1.258,1834,1.258,1835,2.265,1836,1.258,1838,1.065,1839,1.065,1840,1.065,1841,1.176,1842,1.258,1843,1.258,1844,1.176,1846,2.117,1847,1.258,1850,1.114,1851,1.065]],["t/357",[162,2.056,163,2.373,167,1.734,168,2.079,171,0.588,183,1.917,184,1.798,198,1.02,199,1.933,203,2.023,204,1.279,250,1.412,257,0.605,262,1.532,264,0.558,268,1.819,269,1.232,274,1.031,276,0.784,280,1.122,284,1.02,293,0.772,299,1.675,302,1.668,310,1.324,328,1.324,335,2.153,350,1.1,364,2.129,369,1.055,383,0.92,410,1.623,422,0.784,438,1.985,454,1.579,472,1.137,473,1.819,476,1.519,492,0.977,503,1.579,518,2.274,558,1.239,559,1.202,582,3.074,604,1.279,606,1.279,613,1.798,618,1.498,620,1.432,621,1.698,622,2.583,642,1.536,667,1.239,739,1.536,783,1.324,784,1.623,848,1.437,865,4.023,874,3.38,885,1.717,940,1.886,974,1.279,1012,2.36,1080,1.536,1091,1.536,1097,1.374,1117,1.432,1225,1.239,1278,1.463,1309,1.463,1319,4.338,1327,3.847,1391,4.61,1408,1.623,1413,1.324,1419,1.623,1528,1.168,1532,1.99,1647,2.521,1666,3.035,1689,1.402,1780,3.742,1783,1.99,1798,1.536,1801,1.734,1805,1.675,1808,1.734,1828,1.99,1838,3.035,1839,3.035,1840,1.803,1844,1.99,1852,2.129,1853,3.94,1854,5.985,1855,3.94,1856,2.129,1857,6.584,1858,2.341,1859,2.341,1860,2.341,1861,3.94,1862,2.341,1863,2.341,1864,2.341,1865,2.341,1866,1.886,1867,2.341,1868,2.341,1869,2.341,1870,2.341,1871,1.99,1872,2.341,1873,2.341,1874,3.94,1875,2.341,1876,1.99]],["t/359",[163,1.524,168,1.491,169,1.524,188,2.189,195,0.815,199,1.502,236,2.475,237,1.813,239,2.059,257,2.194,280,2.489,283,2.706,293,2.415,302,2.395,327,3.109,352,2.937,364,3.057,463,4.723,512,2.287,544,1.964,547,4.144,552,2.748,558,2.748,559,2.666,604,2.837,605,2.522,621,2.237,646,2.885,700,3.714,738,2.457,848,1.894,865,2.885,885,2.262,943,5.079,1001,3.322,1107,3.599,1152,7.723,1877,5.191,1878,5.191,1879,5.191,1880,7.326,1881,5.191,1882,5.191,1883,5.191,1884,5.191,1885,5.191,1886,5.191,1887,5.191]],["t/361",[179,4.817,342,4.556,343,5.864,344,5.213,449,6.847,454,3.015,613,3.434,674,6.153,860,4.336,1274,5.306,1564,6.399,1808,5.575,1888,7.527]],["t/363",[163,1.838,168,2.393,171,2.353,257,2.578,269,1.511,270,2.436,302,2.047,320,2.926,362,3.541,364,3.476,407,4.106,472,3.04,476,3.608,503,2.508,622,3.168,858,3.313,943,4.34,974,4.552,1117,3.828,1259,4.637,1260,3.606,1509,5.207,1516,4.637,1841,5.322,1889,4.637,1890,6.26]],["t/365",[257,2.339,264,2.497,266,4.719]],["t/367",[162,2.462,163,1.479,168,1.446,170,1.176,183,1.538,195,0.791,215,2.624,233,2.576,239,3.312,244,3.678,253,2.47,265,4.809,266,3.737,269,1.216,270,2.79,302,2.344,329,2.017,350,1.405,361,2.058,394,3.392,412,2.624,422,2.402,424,3.491,492,2.101,503,2.017,662,4.055,762,3.602,848,1.837,865,4.64,874,2.549,1001,3.222,1204,4.281,1455,3.602,1568,3.222,1891,5.035,1892,5.035,1893,5.035,1894,5.035,1895,4.58,1896,4.281,1897,7.169,1898,4.58]],["t/369",[163,2.555,253,2.574,266,4.535,270,3.385,320,4.067,424,6.032,1378,6.702]],["t/371",[162,2.675,168,2.752,170,1.819,184,2.744,231,3.32,266,4.994,270,3.031,320,4.478,424,5.401,879,5.401,1107,5.401,1324,6,1899,7.086]],["t/373",[162,1.763,163,2.135,168,1.475,169,0.95,171,0.813,183,1.37,195,0.508,233,0.916,236,1.093,237,1.13,239,1.283,242,2.637,250,1.159,253,2.15,257,1.327,258,1.637,262,1.917,264,1.521,266,2.676,269,1.541,270,2.484,293,1.066,302,1.679,311,1.661,313,1.058,320,1.512,345,1.425,350,0.903,362,1.83,371,1.798,378,1.998,382,2.315,383,3.115,418,1.259,424,5.04,438,1.259,472,1.571,475,3.21,476,1.247,479,1.592,492,1.35,518,3.761,519,3.489,580,1.978,605,1.571,613,1.476,621,1.394,622,1.637,632,2.75,642,2.122,657,1.712,667,1.712,858,3.379,904,2.562,928,1.83,937,3.433,1080,2.122,1099,1.899,1260,1.863,1271,2.314,1274,1.83,1276,3.459,1505,3.56,1509,2.022,1528,4.212,1548,2.75,1801,2.396,1838,3.955,1839,2.492,1840,2.492,1900,5.134,1901,2.942,1902,2.942,1903,6.384,1904,5.134,1905,2.75,1906,3.235,1907,2.179,1908,5.134,1909,3.235]],["t/375",[162,2.054,250,2.895,253,2.706,257,1.545,269,1.95,345,2.635,408,4.278,412,3.117,417,1.88,424,4.146,434,2.162,512,2.635,520,2.47,557,3.923,618,3.826,630,5.299,858,3.165,944,4.43,947,5.415,1204,5.084,1214,4.278,1441,4.278,1505,5.601,1636,4.028,1850,4.818,1851,4.606,1899,5.44,1910,5.98,1911,5.98,1912,5.44,1913,5.98,1914,5.44]],["t/377",[253,2.984,266,4.42,320,3.964,417,2.666,424,5.879,502,4.495,544,3.208]],["t/379",[163,2.825,183,1.228,184,2.016,218,2.78,239,2.271,253,1.693,257,2.026,264,2.131,265,4.517,270,2.227,313,3.548,320,3.665,434,3.98,512,2.522,645,3.182,662,3.238,848,2.088,908,3.755,1285,4.409,1369,3.501,1528,3.913,1915,4.867,1916,5.724]],["t/381",[123,2.293,152,2.4,163,2.29,168,1.282,171,1.122,183,1.673,184,1.573,186,2.198,189,2.44,223,2.73,231,1.903,236,2.634,237,2.295,238,4.061,239,2.607,244,1.967,248,1.629,250,1.6,251,2.481,253,1.321,257,1.153,264,2.049,269,1.078,276,1.496,302,2.149,345,1.967,350,1.246,437,3.007,438,1.737,475,4.875,476,1.721,503,2.633,509,1.737,547,2.525,582,2.293,621,2.831,681,3.007,848,2.397,858,3.478,874,3.947,887,2.525,937,2.4,938,2.857,1068,3.194,1165,4.061,1274,2.525,1276,4.427,1308,2.674,1444,3.095,1505,4.556,1528,3.279,1901,4.061,1902,4.061,1917,4.464,1918,4.464,1919,4.464,1920,4.464]],["t/383",[418,3.672]],["t/385",[168,1.799,171,1.052,183,0.898,194,2.678,199,2.172,200,3.83,203,3.216,214,2.215,224,3.558,269,1.01,280,2.006,322,3.224,392,2.215,399,2.745,512,1.844,605,2.033,645,2.326,662,4.713,736,2.994,739,4.109,945,1.981,1110,2.506,1413,2.367,1464,3.558,1467,3.807,1489,6.382,1509,2.616,1534,3.558,1629,3.1,1907,2.819,1921,6.714,1922,4.185,1923,4.185,1924,8.333,1925,4.185,1926,6.264,1927,8.333,1928,4.185,1929,8.922,1930,4.185,1931,6.264,1932,4.185,1933,4.185,1934,6.264,1935,4.185,1936,3.807,1937,3.807,1938,4.185,1939,4.185,1940,4.185,1941,4.185,1942,4.185,1943,6.264,1944,4.185,1945,3.558,1946,4.185,1947,4.185,1948,4.185,1949,4.185,1950,4.185,1951,4.185,1952,3.558]],["t/387",[168,2.467,239,3.407,250,3.078,265,4.948,329,3.441,354,5.496,582,4.411,1921,6.921]],["t/389",[183,1.652,206,5.931,241,3.555,244,3.392,265,4.435,276,2.579,287,3.843,329,3.085,361,3.147,375,4.927,582,3.954,583,3.79,593,3.897,660,4.927,1953,7.7,1954,7.7,1955,7.7]],["t/391",[1517,5.573,1921,7.498]],["t/393",[479,4.518,502,4.09,1921,7.395]],["t/396",[183,1.218,188,2.393,199,2.774,200,4.767,204,4.26,288,2.915,293,1.871,361,2.32,417,1.784,428,4.063,438,2.208,498,3.935,509,2.208,510,2.529,513,3.69,544,2.147,556,3.21,662,3.21,671,3.155,882,4.061,1327,3.269,1729,5.775,1936,5.163,1937,5.163,1956,4.372,1957,7.796,1958,7.796,1959,5.676,1960,5.676,1961,5.676,1962,5.676,1963,5.676,1964,5.676,1965,5.163,1966,4.988,1967,5.676,1968,5.676]],["t/398",[171,1.545,183,2.125,184,2.165,195,0.965,199,2.38,213,3.758,214,3.252,252,3.358,269,1.484,399,4.031,410,4.26,428,3.203,434,2.975,514,3.608,620,3.758,621,2.648,645,3.415,783,5.246,1008,4.733,1080,5.396,1413,4.653,1486,4.552,1597,5.224,1686,4.951,1966,3.932,1969,6.145,1970,6.145,1971,6.145,1972,4.26,1973,6.145]],["t/400",[168,1.04,169,1.063,171,1.41,183,1.659,184,1.275,195,0.568,199,1.047,213,3.429,236,1.223,237,1.959,241,1.671,244,1.595,250,1.297,269,0.874,272,3.678,283,1.886,286,2.374,302,1.183,329,1.45,413,2.125,428,1.886,436,2.438,438,2.671,454,1.45,473,1.671,478,4.111,479,3.379,555,2.085,557,2.374,618,2.316,645,4.649,660,3.588,665,3.506,738,1.713,951,5.487,967,2.916,1090,2.168,1097,2.125,1110,2.168,1115,2.438,1213,3.077,1291,2.916,1296,3.292,1308,4.63,1348,2.916,1428,5.208,1430,2.681,1472,2.788,1552,4.768,1729,4.154,1966,2.316,1974,3.292,1975,3.619,1976,3.619,1977,4.518,1978,5.608,1979,5.608,1980,3.619,1981,3.619,1982,3.619,1983,6.573,1984,2.681,1985,3.292,1986,3.619,1987,3.619,1988,5.608,1989,3.619,1990,3.619,1991,3.292,1992,3.619,1993,3.292,1994,3.619,1995,3.292,1996,3.292,1997,3.292,1998,3.292,1999,3.292]],["t/402",[168,2.347,236,2.759,237,2.853,350,2.28,557,5.359,671,4.541,1472,6.293,1966,6.311,1995,7.432,1996,7.432,1997,7.432]],["t/404",[156,3.479,171,1.574,191,2.585,200,5.093,204,3.421,219,3.541,324,2.758,398,5.044,428,3.263,485,2.39,495,5.044,503,2.508,587,3.913,646,5.203,662,3.541,671,3.479,904,3.124,947,3.313,1496,5.322,1685,5.694,1965,7.577,1966,4.005,2000,6.26,2001,6.26,2002,9.361,2003,8.329,2004,8.329,2005,6.26,2006,6.26]],["t/406",[183,1.753,269,1.972,361,3.339,478,4.893,479,4.022,561,4.465,1428,5.504,1566,5.664,2007,8.17,2008,5.504,2009,8.17,2010,8.17]],["t/408",[183,1.159,237,1.886,264,1.287,279,4.16,306,3.582,478,3.235,479,2.659,502,4.182,561,5.394,605,2.623,645,5.486,937,2.904,1319,4.592,1428,6.886,1907,3.638,1966,5.55,1974,6.852,1983,8.391,2011,4.352,2012,5.401,2013,6.852,2014,6.681,2015,3.745,2016,5.401,2017,5.401,2018,4.913]],["t/411",[156,3.128,172,2.979,188,2.373,199,2.243,269,1.871,293,1.855,306,2.324,316,2.348,324,2.48,350,1.571,417,2.436,478,4.642,479,3.815,513,2.664,544,2.129,561,5.474,660,3.601,807,3.442,904,3.868,1306,5.221,1428,5.221,1645,3.902,1966,4.959,1991,5.12,2019,5.628,2020,5.628,2021,5.628,2022,5.628,2023,5.628,2024,5.628,2025,5.628,2026,4.785,2027,5.12,2028,5.12,2029,5.628,2030,5.628]],["t/413",[306,3.739,417,2.846,544,3.425,561,4.948]],["t/415",[169,2.459,237,2.925,547,4.737,561,4.577,1428,5.641,1966,5.358,2031,7.12,2032,8.374,2033,8.374,2034,8.374]],["t/417",[313,2.96,434,3.274,570,5.032,574,4.719]],["t/420",[169,1.237,172,2.229,183,1.35,184,1.484,190,2.341,195,0.988,198,1.835,206,4.847,213,2.575,226,1.993,231,1.795,241,1.945,244,2.773,250,1.509,269,1.819,270,1.639,271,3.12,284,1.835,300,2.102,302,2.925,311,2.163,313,2.058,345,1.856,382,1.899,391,3.244,397,2.473,417,1.324,434,2.275,498,2.92,502,2.804,503,1.687,513,1.993,520,1.739,544,1.593,545,4.183,546,4.188,570,2.341,593,3.814,604,2.302,702,3.12,909,3.244,926,2.763,951,6.157,1094,3.244,1117,2.575,1444,2.92,1461,3.12,1485,2.92,1486,3.12,1504,3.831,2035,4.211,2036,4.211,2037,4.211,2038,4.211,2039,3.831,2040,7.534,2041,3.244,2042,5.35,2043,3.831]],["t/422",[172,4.377,225,5.058,241,3.819,244,4.378,269,1.997,434,2.99,520,3.416,593,4.186,2044,8.271,2045,6.664]],["t/424",[172,4.029,183,1.633,184,2.682,199,2.203,221,5.278,226,3.603,241,3.515,244,3.354,350,2.124,434,3.415,514,5.546,520,3.144,556,4.306,593,3.853,1272,5.128,2046,7.612]],["t/426",[186,3.88,190,4.38,195,1.238,198,3.434,284,3.434,302,2.577,313,3.155,380,4.927,434,2.85,546,4.38,570,4.38,695,6.35,849,4.72,1094,6.071]],["t/428",[123,2.422,183,1.012,198,2.055,269,1.651,302,2.886,311,2.422,313,3.064,320,3.197,324,2.078,345,2.078,369,2.126,382,2.126,390,3.177,397,4.725,422,1.58,434,3.389,438,1.835,503,1.889,545,3.934,546,4.906,570,2.621,657,2.496,850,3.94,852,4.009,853,2.884,885,2.98,926,3.093,945,3.237,1005,4.009,1283,4.016,1285,5.268,1286,5.511,1394,4.742,1395,5.268,1444,3.27,1447,3.27,1486,3.493,1915,5.815,2039,4.29,2047,4.716,2048,6.222]],["t/430",[269,1.667,313,3.389,320,4.152,397,5.214,434,3.211,849,4.136,850,5.116,1283,5.214,1285,5.319,1286,7.157,1915,7.551,2045,7.911,2049,6.905,2050,6.905,2051,6.905]],["t/432",[163,1.329,170,1.835,183,0.971,190,2.515,195,1.042,233,1.281,244,1.993,252,2.473,269,1.092,302,2.17,311,2.323,313,3.014,320,3.673,329,1.813,364,2.769,397,2.656,417,1.422,430,3.646,434,3.333,438,2.582,474,3.237,503,1.813,514,5.994,545,2.016,546,3.689,570,2.515,644,2.895,646,2.515,647,3.485,652,3.137,656,2.968,674,4.353,848,1.651,849,5.184,850,3.823,853,2.767,908,4.353,1285,5.112,1718,3.847,2045,3.646,2048,4.116,2052,3.847,2053,4.525,2054,4.525,2055,7.859]],["t/434",[162,1.62,171,1.186,183,1.726,195,1.386,198,2.055,257,1.218,266,2.458,270,2.661,302,1.542,306,3.87,313,3.196,320,3.197,361,1.928,397,2.769,417,1.482,422,2.291,434,2.473,472,2.291,503,1.889,512,2.078,545,3.934,546,2.621,570,5.209,583,2.322,848,2.495,850,3.94,855,3.493,856,3.27,860,2.717,874,2.387,885,2.055,887,2.668,904,2.354,908,5.279,1096,3.374,1369,2.884,1447,3.27,1744,4.29,2056,4.716,2057,4.716,2058,4.716,2059,4.716,2060,4.716]],["t/436",[168,2.636,257,2.371,319,6.799]],["t/438",[183,1.207,195,1.392,233,1.593,257,2.002,276,2.596,287,2.809,288,2.89,302,1.84,307,3.692,319,4.169,342,2.734,343,4.845,344,3.128,350,2.163,359,3.242,451,4.535,512,3.906,520,3.201,598,3.371,854,3.026,1039,4.169,1087,2.809,1266,4.169,1267,4.236,1283,3.304,1518,6.589,1905,4.785,2061,4.785,2062,7.75,2063,5.628,2064,5.628,2065,5.628,2066,5.628,2067,5.628,2068,5.628,2069,5.628,2070,5.628]],["t/440",[160,2.202,165,4.716,169,1.143,170,1.385,190,5.063,195,0.611,226,1.843,229,2.999,233,1.679,253,1.152,257,1.532,264,1.914,269,1.432,270,1.515,273,2.785,276,1.304,302,1.273,313,1.273,319,4.393,320,2.772,337,2.093,350,1.086,352,2.202,417,1.224,418,2.308,434,1.408,485,2.743,512,3.165,520,1.608,552,2.06,556,2.202,573,4.302,582,1.999,587,2.434,589,2.623,598,4.811,607,3.137,612,2.554,617,2.884,648,2.999,659,2.884,670,2.491,849,2.332,861,2.434,862,2.623,898,2.699,928,2.202,944,2.884,973,4.568,1001,3.795,1101,2.623,1214,2.785,1223,3.137,1260,2.243,1264,3.541,1547,2.785,1688,3.541,2071,7.632,2072,3.541,2073,3.137,2074,3.893,2075,3.893,2076,3.893,2077,3.893,2078,3.893,2079,2.999,2080,3.893]],["t/442",[192,4.719,197,5.66,199,2.62,574,4.719]],["t/444",[160,4.21,169,2.186,170,2.175,171,1.871,172,4.93,173,5.16,174,5.16,175,4.552,176,5.16,177,6.328,178,6.771,179,3.822,180,4.763,181,4.552,182,5.733,183,1.597,184,2.622,185,6.328]],["t/446",[68,2.215,162,0.895,169,2.373,170,0.608,171,0.655,173,4.915,174,4.915,175,1.593,179,2.212,184,1.518,186,1.282,190,2.395,191,2.928,192,4.708,193,1.709,194,2.757,195,0.865,196,3.471,197,5.646,198,1.135,199,2.21,200,1.593,201,1.93,202,1.709,203,2.212,204,1.424,205,2.37,206,2.007,207,2.37,208,2.215,209,2.215,210,6.027,211,3.663,213,1.593,214,1.379,215,3.695,216,2.014,217,6.493,218,1.265,221,2.987,231,1.11,232,3.471,241,2.543,248,1.572,278,3.319,324,1.898,342,2.093,350,0.727,359,3.687,417,0.819,418,1.014,437,4.775,498,2.987,583,2.712,591,2.529,593,2.181,599,4.001,610,3.471,705,2.099,1031,2.215,1443,2.099,1525,2.37,1623,1.448,1679,2.37,1680,6.449,2081,4.308,2082,4.308,2083,4.308,2084,4.308,2085,3.919,2086,4.308,2087,2.37,2088,2.007,2089,2.605,2090,2.605,2091,2.605,2092,4.308,2093,4.308,2094,6.449,2095,2.605,2096,2.605,2097,2.215,2098,2.605]],["t/448",[199,2.585,257,2.308,264,2.128,934,3.849,935,4.458]],["t/451",[183,1.663,189,3.076,194,3.601,195,0.884,199,1.629,212,4.785,213,3.442,276,2.97,300,2.809,350,1.571,352,3.183,473,2.599,503,2.255,517,2.734,526,5.374,621,2.425,671,3.128,783,3.183,866,3.791,934,2.425,1090,3.371,1278,3.518,1629,4.169,1686,4.535,2015,5.374,2099,5.628,2100,5.628,2101,5.628,2102,4.169,2103,5.628,2104,7.75,2105,5.12,2106,5.628,2107,5.628,2108,5.12,2109,5.628,2110,5.628,2111,7.75,2112,7.75,2113,5.628,2114,5.628]],["t/453",[183,1.306,194,5.232,195,1.284,199,3.133,212,6.952,213,5,417,2.902,428,3.174,513,2.882,544,3.733,600,4.102,662,3.444,740,3.575,934,4.666,1533,4.69,2115,6.089,2116,5.177,2117,6.089,2118,6.089]],["t/455",[300,4.644,934,4.01]],["t/457",[183,1.891,570,4.9,934,3.798,935,4.4,1136,7.495,2119,8.815]],["t/459",[163,1.185,168,1.159,183,0.865,189,2.205,191,1.666,195,0.633,199,2.124,215,2.102,239,1.6,241,3.777,250,1.446,253,2.42,257,1.042,265,2.324,316,2.542,328,2.282,330,2.646,355,3.809,417,1.268,422,3.094,472,1.959,513,2.884,520,1.666,544,2.305,547,3.446,606,3.33,616,3.25,621,2.625,630,3.996,660,2.581,848,2.223,934,4.131,935,3.664,947,3.224,1308,2.416,1353,7.851,1446,3.25,1485,4.224,1486,2.988,1647,3.898,1907,2.717,1912,3.67,1966,2.581,2031,5.18,2041,3.107,2120,3.67,2121,4.034,2122,4.034,2123,4.034,2124,4.034,2125,4.034,2126,4.034,2127,4.034,2128,4.034,2129,4.034,2130,4.034,2131,4.034]],["t/461",[422,3.033,934,3.901,1353,7.697,2011,7.295]],["t/463",[168,1.688,170,1.372,172,4.224,195,0.923,199,2.623,257,1.518,258,2.974,264,1.4,268,2.713,316,3.331,337,4.291,344,3.265,350,1.64,422,1.968,475,3.673,486,3.384,509,2.286,510,2.618,613,2.68,621,3.439,622,2.974,848,2.143,934,3.906,935,2.932,945,3.778,1343,6.786,1350,5.344,1647,3.759,2132,7.982,2133,5.875,2134,5.875,2135,5.875,2136,5.344]],["t/465",[467,5.315,713,6.707,1662,6.974,1795,6.477]],["t/467",[165,1.732,169,0.866,170,1.114,171,1.509,195,1.393,198,1.285,216,1.379,255,2.11,259,1.766,260,1.639,261,2.045,262,1.432,273,2.11,274,1.299,276,0.988,284,2.078,293,1.979,307,1.935,318,2.377,334,4.933,340,2.683,342,1.433,343,5.547,345,2.101,347,4.837,348,5.615,350,2.113,359,1.699,361,1.206,362,1.668,369,1.33,372,2.508,422,0.988,442,2.683,443,2.508,444,5.607,448,2.508,452,2.272,454,1.182,467,4.048,473,1.362,474,2.11,509,2.683,512,1.299,538,2.185,549,1.668,556,1.668,568,5.063,573,1.766,587,1.844,655,2.377,658,1.639,674,1.935,711,1.766,716,2.683,857,2.377,864,2.272,866,4.645,871,2.11,872,3.412,885,1.285,963,4.447,973,2.272,1041,2.508,1069,2.508,1219,2.683,1225,1.561,1259,2.185,1260,1.699,1272,1.987,1430,2.185,1455,2.11,1547,3.412,1577,2.683,1718,2.508,1733,2.683,1795,2.11,2137,4.338,2138,2.949,2139,2.949,2140,2.949,2141,2.949,2142,2.949,2143,2.949,2144,4.769,2145,4.769,2146,2.949,2147,2.949,2148,2.949,2149,2.949,2150,2.949,2151,2.683,2152,2.949,2153,2.949,2154,2.949,2155,2.949]],["t/469",[3,3.369,170,1.696,171,0.996,193,2.599,195,0.622,202,3.943,223,2.423,234,3.052,257,2.094,259,2.373,262,2.616,274,3.571,276,1.327,283,2.065,293,1.981,308,2.241,329,1.587,337,4.358,341,2.035,343,3.757,344,2.202,350,1.106,361,1.62,378,1.542,383,3.603,409,4.441,412,2.065,444,2.935,452,3.052,518,4.087,642,2.599,647,3.052,657,2.097,671,2.202,702,2.935,730,2.835,738,1.875,854,2.13,866,4.049,871,5.195,872,2.835,882,2.835,1012,2.373,1040,2.935,1087,3,1118,3.052,1153,3.193,1159,2.477,1272,2.669,1429,2.747,1480,3.604,1498,3.193,1578,3.604,1609,2.935,1641,2.477,1972,2.747,2013,3.604,2156,3.962,2157,3.604,2158,3.604,2159,3.962,2160,3.962,2161,3.604,2162,3.962,2163,3.962]],["t/471",[162,2.112,168,1.173,171,1.026,195,0.641,242,2.097,257,1.055,259,2.445,262,2.891,274,3.626,284,1.779,310,3.478,337,2.195,361,2.513,369,1.841,378,3.435,382,1.841,383,3.468,390,2.751,392,3.254,393,3.29,394,2.751,413,2.397,436,2.751,454,1.636,467,2.397,492,3.871,518,3.668,572,2.445,589,2.751,642,2.678,652,4.263,657,2.161,671,2.269,686,3.29,702,3.024,713,3.024,730,2.921,872,2.921,1061,3.714,1118,4.736,1159,2.552,1180,3.29,1218,2.31,1279,4.142,1280,3.714,1324,3.145,1454,2.678,1804,3.714,2079,3.145,2164,7.487,2165,3.714,2166,5.593,2167,5.593,2168,4.083,2169,4.083,2170,4.083,2171,4.083]],["t/473",[574,4.784,1106,5.737,2172,7.07]],["t/475",[123,2.842,160,3.131,169,1.625,170,1.789,171,2.384,172,4.055,173,6.092,174,6.092,175,5.374,177,4.706,179,2.842,184,1.95,191,2.286,192,2.885,197,4.79,217,4.706,228,4.263,230,5.035,231,2.359,232,6.174,234,4.263,235,4.263,243,3.729,438,3.69,549,3.131,594,4.706,658,4.259,1218,3.131,2173,5.535,2174,5.035,2175,5.535,2176,5.535,2177,5.535,2178,5.535]],["t/477",[171,2.054,181,6.032,182,6.293,183,1.753,188,3.445,233,2.313,293,2.693,394,5.504,556,4.621,1115,5.504,1185,6.293]],["t/479",[169,2.555,170,2.392,195,1.366,438,3.385,544,3.291,1558,7.397]],["t/481",[168,2.128,170,1.73,171,1.862,189,2.882,195,0.828,198,2.297,199,1.526,274,2.323,277,4.483,296,4.483,311,3.804,313,2.422,350,1.472,355,3.296,364,3.091,369,2.377,370,2.931,371,2.931,422,1.767,438,2.052,473,2.435,476,3.3,483,2.178,517,2.561,570,2.931,664,3.552,711,3.158,848,2.702,861,5.805,862,3.552,865,2.931,874,2.669,932,4.483,1012,3.158,1106,3.296,1225,2.791,1282,3.552,1283,3.096,1352,4.249,1411,4.062,2179,5.273,2180,4.797,2181,5.273,2182,5.273,2183,4.483]],["t/483",[168,2.291,195,1.252,274,3.514,364,3.328,368,4.594,369,3.595,371,4.433,467,4.682,684,5.372,854,4.288,1001,5.103,1067,5.529,1190,7.255,2184,7.975]],["t/485",[165,5.388,574,4.784,1749,8.349]],["t/487",[165,3.332,169,2.289,188,2.393,191,3.219,198,2.473,248,2.07,257,2.014,262,1.704,276,2.983,280,3.737,302,1.856,327,3.399,333,3.632,337,4.787,342,2.757,383,3.063,454,2.274,485,3.66,492,2.368,509,2.208,518,2.529,579,5.114,598,3.399,671,4.333,681,3.823,928,3.21,1053,4.204,1126,5.163,1263,5.163,1271,4.061,1429,3.935,1531,5.163,1668,6.005]],["t/489",[162,1.742,163,1.49,165,2.979,168,1.457,179,2.605,195,0.797,231,3.072,259,3.038,260,2.82,264,1.209,267,3.517,268,2.342,281,2.979,283,2.644,302,1.659,350,1.416,352,2.87,383,1.993,422,1.7,476,1.955,485,1.937,518,3.212,555,2.923,587,3.172,598,3.038,612,4.728,645,2.82,651,4.313,656,3.328,681,3.418,693,4.088,904,2.532,1067,3.517,1075,3.63,1101,3.418,1274,4.077,1486,3.758,1515,4.615,1544,4.615,1587,3.63,1766,4.088,1798,3.328,1907,3.418,2071,4.088,2185,5.073,2186,4.615,2187,5.073,2188,5.073,2189,4.313,2190,5.073,2191,5.073,2192,5.073,2193,5.073,2194,5.073,2195,5.073,2196,5.073,2197,5.073,2198,5.073]],["t/491",[165,2.42,169,0.725,170,0.577,171,1.036,172,1.307,183,0.884,189,1.35,195,0.388,216,1.155,218,1.2,231,1.053,233,0.699,244,3.644,248,0.901,253,1.22,257,1.371,264,0.589,269,1.662,270,1.604,276,0.828,283,1.288,284,1.076,300,1.233,302,1.735,306,1.02,308,1.397,311,1.269,313,2.898,316,1.031,320,1.155,324,2.337,397,2.42,408,1.767,417,1.296,434,3.205,454,2.125,476,1.589,485,0.943,503,1.652,509,0.961,512,1.088,514,4.042,545,3.068,546,3.442,549,1.397,570,1.373,598,2.469,604,2.899,606,2.899,607,3.322,612,2.704,631,1.991,674,3.48,693,1.991,709,1.903,717,1.903,849,3.709,850,3.056,853,1.511,862,3.574,866,1.664,885,1.796,886,2.247,887,1.397,908,2.704,945,2.931,949,2.638,1001,1.581,1097,1.45,1098,1.903,1159,1.544,1253,2.247,1308,3.709,1330,2.1,1369,1.511,1377,1.903,1394,2.858,1429,1.713,1439,1.903,1444,1.713,1528,2.057,1530,1.767,1568,1.581,1715,1.767,1795,1.767,2042,2.1,2052,3.505,2071,1.991,2183,2.1,2199,2.1,2200,2.47,2201,2.47,2202,2.47,2203,2.47,2204,2.47,2205,2.47,2206,2.47,2207,2.47,2208,2.47,2209,2.47,2210,2.47,2211,2.247,2212,2.47,2213,2.47,2214,2.47,2215,1.903,2216,2.47,2217,4.122,2218,2.47,2219,4.122,2220,2.47,2221,4.122,2222,2.47]],["t/493",[153,4.87,162,1.278,165,3.363,168,1.069,170,0.869,176,2.579,183,1.683,195,1.097,198,1.621,213,2.275,276,1.246,304,2.997,344,3.183,350,2.364,390,2.506,433,2.579,448,4.87,467,2.184,492,1.552,506,2.997,509,1.447,510,1.658,512,1.639,520,3.695,528,5.21,549,2.104,598,2.228,600,5.706,601,2.997,612,3.757,621,1.603,647,2.865,672,2.756,681,2.506,690,2.661,705,2.997,879,2.579,922,2.997,926,2.44,1001,2.38,1095,2.756,1170,2.275,1214,4.997,1218,2.104,1260,2.143,1429,2.579,1497,2.865,1517,2.228,1647,2.38,1689,2.228,1984,2.756,1998,3.384,2071,2.997,2199,3.163,2223,3.72,2224,3.72,2225,5.728,2226,5.728,2227,5.728,2228,5.728,2229,5.728,2230,5.728,2231,3.72,2232,5.21,2233,3.384,2234,3.72,2235,3.163,2236,3.384,2237,3.72,2238,5.21,2239,3.72,2240,3.384,2241,3.72,2242,3.384,2243,3.384,2244,3.72]],["t/495",[162,1.243,163,1.647,169,2.016,171,0.91,183,0.776,195,1.078,198,1.577,213,5.644,225,2.213,226,1.713,235,2.788,241,3.57,248,1.32,250,2.46,265,3.23,270,1.408,276,1.213,283,1.886,293,1.193,298,2.085,305,1.946,323,4.392,350,1.565,361,1.479,396,2.589,411,2.012,438,1.408,478,3.358,502,1.613,503,1.45,517,2.724,555,2.085,593,2.838,610,2.916,644,2.316,645,2.012,653,2.509,696,2.509,783,2.047,849,2.168,850,2.085,858,1.916,874,4.478,924,3.292,938,2.316,1087,1.806,1154,4.503,1170,3.429,1273,2.916,1327,2.085,1355,3.077,1437,2.916,1441,2.589,1509,2.263,1551,3.077,1568,2.316,1634,2.916,1658,3.077,1676,3.077,1709,2.916,1889,2.681,2014,2.788,2167,3.292,2245,3.292,2246,3.619,2247,3.619,2248,3.619,2249,3.619,2250,3.292,2251,3.619,2252,5.608,2253,3.619,2254,3.619,2255,3.619,2256,3.619,2257,3.619,2258,3.619,2259,3.619,2260,3.619,2261,3.619,2262,3.619,2263,3.292,2264,3.077]],["t/497",[162,2.774,163,1.756,165,5.371,170,1.886,179,3.071,183,1.733,195,0.939,219,3.383,243,4.028,262,1.796,269,1.95,270,2.327,378,3.981,383,4.02,492,3.371,517,2.905,591,3.511,619,3.738,656,3.923,659,5.984,693,4.818,2199,5.084,2265,8.078,2266,8.078]],["t/499",[418,3.672]],["t/501",[170,1.272,171,1.369,183,1.168,194,3.484,199,2.521,214,4.009,253,1.611,257,1.407,264,1.805,276,1.824,300,2.717,303,3.404,313,1.78,422,1.824,432,4.847,492,2.272,554,4.194,579,3.572,662,3.08,783,3.08,807,3.33,848,1.986,861,3.404,865,4.21,874,2.756,1053,5.611,1279,3.668,1369,3.33,1378,4.194,1856,4.953,1952,4.629,2102,4.033,2108,4.953,2267,6.891,2268,4.629,2269,5.445,2270,5.445,2271,5.445,2272,5.445,2273,4.953,2274,4.953,2275,5.445,2276,5.445,2277,5.445,2278,5.445]],["t/503",[123,2.821,162,1.886,163,1.035,168,1.939,169,1.035,170,1.283,171,1.697,183,1.772,184,1.935,195,1.197,215,1.837,218,1.712,223,2.155,233,0.998,236,1.19,237,1.231,262,1.058,264,1.608,269,1.629,270,1.371,274,1.553,276,1.84,281,2.069,284,2.393,288,1.81,305,1.895,316,1.471,324,1.553,329,2.201,337,1.895,350,1.533,367,3.368,378,1.371,383,2.994,492,2.292,503,1.412,517,1.712,518,3.008,519,1.926,545,3.008,550,4.242,553,2.312,554,2.715,576,2.203,582,1.81,605,1.712,645,1.959,738,1.668,740,2.069,860,2.03,876,2.84,885,2.393,1012,3.289,1080,2.312,1087,1.759,1256,2.61,1306,2.374,1308,2.111,1413,1.993,1517,2.111,1528,1.759,1568,4.319,1575,2.996,1692,2.84,1706,2.996,1898,3.206,2015,2.443,2267,3.206,2274,4.997,2279,4.997,2280,3.524,2281,3.524,2282,3.524,2283,3.524,2284,3.524,2285,3.206,2286,3.206]],["t/505",[156,3.922,183,2.282,191,2.034,269,1.189,281,2.891,300,4.115,310,2.785,324,2.169,329,1.973,367,4.95,422,1.65,454,2.827,509,1.916,510,2.194,736,3.523,738,3.34,740,4.143,877,5.999,1497,3.793,1516,5.227,1641,4.411,1695,3.968,1699,4.186,1700,4.186,1711,4.479,1712,4.186,2286,4.479,2287,4.186,2288,4.924,2289,4.924,2290,4.924,2291,4.924,2292,4.924,2293,4.924,2294,4.924,2295,4.924,2296,4.924,2297,4.924,2298,4.924,2299,4.924,2300,4.924,2301,4.924,2302,4.924,2303,4.924]],["t/507",[162,1.484,163,1.269,171,1.086,183,0.927,199,2.213,214,2.286,237,1.509,266,2.252,269,1.847,270,1.681,274,3.729,276,1.447,281,2.536,300,5.137,324,2.826,328,2.444,329,1.731,350,1.79,364,1.803,367,4.895,376,4.104,377,4.406,417,1.358,422,1.447,470,3.091,479,2.127,503,2.57,509,1.681,510,1.925,512,1.903,568,2.701,601,3.481,1225,2.286,1378,4.94,1413,2.444,1414,3.673,1415,3.673,1417,5.453,1418,3.673,1419,2.995,1424,3.673,1425,3.673,1446,6.163,1516,3.2,1695,6.163,1984,3.2,2287,3.673,2304,4.32,2305,4.32,2306,4.32,2307,4.32,2308,4.32]],["t/509",[169,2.093,237,3.48,253,2.681,269,1.72,319,5.279,329,3.631,367,4.973,399,4.674,486,4.105,489,5.742,490,5.742,582,3.659,630,4.674,947,4.796,2309,7.126,2310,7.126,2311,6.482]],["t/511",[163,2.094,183,1.072,188,2.108,195,0.785,199,1.446,216,2.336,253,1.478,256,3.609,268,3.293,269,1.207,270,1.945,293,1.647,297,3.702,300,2.494,313,1.634,328,2.827,329,2.857,343,4.458,344,3.964,367,4.975,417,1.571,422,2.389,454,2.002,478,2.993,479,2.46,551,4.249,553,4.678,667,2.645,700,3.576,738,2.365,876,4.027,904,3.559,1256,3.702,1309,3.124,1419,3.465,1641,4.458,2287,4.249,2312,4.998,2313,4.998,2314,4.998,2315,4.998,2316,4.998,2317,4.998,2318,4.998,2319,4.998,2320,6.487,2321,4.998,2322,4.998]],["t/513",[257,2.137,329,3.313,350,2.308,367,4.128,383,3.249,502,3.686,518,3.686,550,4.247,620,5.058,2279,7.524,2323,8.271]],["t/515",[257,2.219,262,2.579,264,2.046,350,2.397,351,5.634,381,4.347,483,3.547,2324,7.813]],["t/517",[152,1.157,162,1.947,163,1.409,170,1.486,171,0.541,183,1.216,184,0.758,186,1.06,188,0.908,190,1.196,195,0.338,198,0.938,199,0.623,218,1.046,229,1.658,233,1.605,236,0.727,237,0.752,243,2.473,250,1.316,253,1.086,256,1.09,264,0.874,266,1.913,269,1.158,276,0.721,281,1.264,293,0.71,302,2.264,313,1.853,327,1.289,342,1.783,343,2.294,344,2.667,350,2.063,351,5.109,352,1.218,354,1.377,359,1.24,366,3.89,417,0.677,418,0.838,422,1.607,428,1.122,438,0.838,454,1.922,472,1.783,475,2.999,476,2.185,479,1.06,492,0.898,503,0.862,512,0.948,544,0.814,549,1.218,552,1.139,557,1.412,580,1.316,589,1.45,613,1.674,646,1.196,674,3.147,711,1.289,736,2.626,738,2.683,783,1.218,846,1.83,848,1.339,856,1.492,858,1.139,865,2.04,874,1.09,909,1.658,937,1.973,945,1.019,1012,2.198,1075,1.54,1087,1.074,1101,2.473,1203,1.958,1225,2.539,1256,5.13,1271,1.54,1274,3.206,1276,3.819,1282,1.45,1283,1.264,1288,1.83,1292,1.735,1309,2.294,1412,4.819,1564,1.83,1734,1.735,1817,3.121,2072,1.958,2079,1.658,2268,1.83,2320,1.958,2324,7.393,2325,1.958,2326,2.153,2327,2.153,2328,2.153,2329,5.669,2330,2.153,2331,2.153,2332,2.153,2333,2.153,2334,4.798,2335,4.798,2336,4.798,2337,2.153,2338,2.153,2339,2.153,2340,2.153,2341,2.153,2342,2.153,2343,3.67,2344,2.153,2345,2.153,2346,2.153,2347,2.153,2348,3.67,2349,2.153,2350,2.153,2351,2.153,2352,2.153,2353,2.153,2354,2.153,2355,2.153,2356,2.153]],["t/519",[418,3.672]],["t/521",[162,1.322,163,1.13,168,1.689,170,0.899,172,2.037,175,2.353,184,1.356,186,1.894,191,1.589,192,2.006,195,1.254,197,2.406,198,2.561,199,2.31,244,1.695,250,1.379,255,2.753,262,1.765,274,4.286,276,1.969,335,2.103,342,1.869,350,1.074,364,1.606,368,2.217,369,3.6,370,2.139,376,2.462,377,3.386,381,1.948,383,2.309,417,1.21,473,1.777,492,2.453,545,1.715,550,1.976,576,2.406,579,3.856,582,1.976,604,3.213,622,1.948,674,2.524,848,2.602,865,3.267,874,4.042,885,2.561,1080,2.524,1087,1.921,1170,3.595,1266,2.851,1302,2.305,1391,2.964,1406,2.753,1413,3.325,1439,2.964,1494,3.101,1517,4.272,1661,3.272,1857,3.501,2357,5.879,2358,3.848,2359,3.848,2360,3.848,2361,3.848,2362,3.848,2363,3.848,2364,3.501]],["t/523",[162,2.235,163,1.294,168,1.869,171,1.636,198,3.371,199,2.638,216,2.059,268,2.034,269,1.571,270,2.532,274,1.941,300,2.199,324,1.941,364,2.715,376,6.106,377,4.457,418,2.532,422,1.476,454,1.765,473,2.034,479,2.169,549,2.492,559,3.342,604,3.556,605,2.14,620,2.694,701,3.394,709,3.394,711,2.638,872,3.152,1087,2.199,1107,3.054,1225,3.444,1412,5.532,1413,5.583,1414,5.532,1415,5.532,1417,5.532,1418,5.532,1419,3.054,1420,4.008,1421,4.008,1422,4.008,1424,3.746,1425,3.746,1426,4.008,1427,4.008,2365,4.406]],["t/525",[169,2.028,195,1.395,218,3.354,225,4.223,283,3.599,284,3.008,298,3.978,364,2.882,368,3.978,369,3.113,503,2.767,520,2.852,593,3.495,848,3.24,850,3.978,865,3.838,874,5.246,929,5.115,951,4.53,1442,5.319,1444,4.788,1461,5.115]],["t/527",[257,2.248,262,2.613,264,2.073,381,4.404,464,6.032,483,3.593,2366,8.701]],["t/529",[168,1.405,169,0.894,170,1.432,171,0.765,175,1.861,183,1.926,184,2.16,188,1.283,190,1.692,195,1.103,199,2.032,203,1.563,204,1.663,233,0.862,236,1.028,237,1.063,241,1.405,245,2.452,250,1.091,251,1.692,258,1.54,262,0.914,264,1.165,269,0.735,276,1.02,288,1.563,293,1.003,302,0.995,305,1.636,313,1.599,335,1.663,364,2.93,375,1.947,377,2.817,378,1.184,382,1.372,383,2.409,417,1.537,434,1.768,476,1.173,486,1.753,489,2.452,490,2.452,492,2.041,517,1.478,518,2.732,519,1.663,520,2.532,558,1.611,559,3.606,579,1.996,583,1.498,591,1.787,593,1.54,604,2.673,621,1.311,622,1.54,660,1.947,807,1.861,848,1.784,865,1.692,874,3.893,885,3.351,904,1.519,934,2.642,945,3.888,947,1.611,949,1.947,1067,2.11,1099,1.787,1368,2.11,1390,1.947,1391,2.344,1392,2.769,1406,2.177,1411,2.344,1623,1.692,1647,3.129,1659,2.254,1692,2.452,1775,2.587,1776,2.587,2136,2.769,2311,2.769,2367,5.213,2368,3.043,2369,3.043,2370,3.043,2371,3.043,2372,3.043,2373,3.043,2374,3.043,2375,3.043,2376,3.043]],["t/531",[183,2.127,203,3.621,242,4.622,266,4.692,298,4.062,350,1.968,434,3.255,636,3.38,2367,7.653,2377,6.414,2378,8.188,2379,7.051,2380,5.995,2381,6.414,2382,6.414,2383,6.414,2384,6.414,2385,7.051]],["t/533",[183,2.198,203,3.44,242,4.472,266,4.539,298,3.858,350,1.869,434,3.149,636,3.211,1408,4.644,2367,7.404,2377,6.093,2380,5.695,2381,6.093,2382,6.093,2383,6.093,2384,6.093,2386,8.708,2387,6.698,2388,6.698,2389,6.698,2390,6.698,2391,6.698]],["t/535",[165,3.818,170,1.995,188,2.742,199,1.882,237,2.983,250,2.331,255,4.652,258,3.291,264,2.035,266,4.452,293,2.143,350,1.815,361,2.658,417,2.044,544,2.46,552,3.442,580,3.977,583,3.201,602,6.327,604,3.554,605,3.159,621,2.802,926,4.266,2392,5.529,2393,6.503,2394,6.503,2395,6.503,2396,5.009]],["t/537",[188,3.717,195,1.384,293,2.906,357,4.818,2378,8.019,2397,8.815]],["t/539",[152,4.868,250,3.245,251,5.032,574,4.719]],["t/541",[162,1.428,170,0.971,194,2.661,216,1.944,226,1.969,233,2.519,239,1.65,244,2.747,250,3.189,253,3.157,257,1.075,263,4.905,265,3.591,268,1.92,302,2.445,326,3.204,328,2.353,329,1.666,350,1.74,362,2.353,392,2.201,409,5.08,485,3.699,495,3.351,496,3.989,502,2.778,520,2.575,521,2.976,583,2.047,1067,2.884,1090,2.491,1111,3.204,1117,2.543,1260,2.396,1266,3.081,1292,3.351,1324,4.802,1896,3.536,1972,2.884,2137,3.784,2245,3.784,2398,4.159,2399,4.159,2400,4.159,2401,4.159,2402,4.159,2403,6.234,2404,4.802,2405,3.536,2406,4.159]],["t/544",[233,2.635,253,2.753]],["t/546",[233,2.991,253,2.715]],["t/548",[123,4,233,2.205,253,2.304,310,4.406,313,2.547,514,4.573,583,3.835,606,5.236,848,2.842,849,4.665,1369,4.763,1377,7.38,2042,6.623,2407,7.086]],["t/551",[253,2.541,269,2.074,479,4.228,485,3.279,583,4.228,2008,5.786,2408,8.589,2409,8.589]],["t/553",[253,2.984,328,4.797,350,2.367,485,4.111,2410,6.833,2411,8.48]],["t/555",[123,3.659,183,1.529,195,1.119,253,2.681,422,2.387,465,5.279,466,5.742,485,3.805,486,5.74,487,6.482,488,6.482,489,7.302,490,5.742,491,6.482,496,4.56,1001,4.56,1218,4.031,1984,5.279]],["t/557",[191,3.64,253,2.608,485,3.366,496,5.641,521,6.307,600,5.938]],["t/559",[253,2.509,269,2.047,422,2.841,492,3.539,495,6.833,496,5.426,497,7.21,498,5.879,2412,8.48]],["t/561",[253,2.775,485,4.084,496,6.002,502,4.904,653,7.084,928,4.257,2413,5.798,2414,5.798]],["t/563",[188,3.531,190,4.654,253,2.477,293,2.76,479,4.122,485,3.822,496,5.358,1496,8.511]],["t/565",[184,2.205,195,0.983,232,5.044,236,2.114,253,2.464,269,1.511,270,3.241,422,2.097,476,3.608,486,3.606,498,6.918,502,2.789,559,4.807,622,3.168,1287,4.637,1342,4.34,1528,4.98,1721,5.694,2415,6.26,2416,6.26,2417,8.329,2418,6.26,2419,6.26,2420,6.26]],["t/567",[259,5.422,262,2.719,417,2.846,418,3.523]],["t/569",[168,2.636,199,2.656,215,4.784]],["t/572",[152,4.093,183,1.633,214,4.029,215,3.968,250,2.728,251,4.231,253,2.794,1952,6.472,1966,6.044,2102,5.639,2273,6.925,2421,6.925,2422,7.612,2423,7.612,2424,7.612,2425,7.612]],["t/574",[195,1.052,199,1.938,214,4.609,231,2.855,239,2.657,250,2.401,253,1.981,512,2.951,520,2.766,580,4.096,621,2.886,783,5.796,1090,4.011,1342,4.644,1378,5.159,1455,4.792,1464,5.695,1972,4.644,2041,5.159,2116,7.404,2421,6.093,2426,5.159,2427,6.698,2428,6.093,2429,6.093]],["t/576",[152,2.105,162,3.259,169,1.15,171,0.984,199,1.133,214,2.072,215,2.041,239,2.363,248,2.939,249,2.345,250,2.135,251,2.176,253,2.383,268,1.808,269,1.438,280,1.877,281,2.299,300,1.954,302,1.28,335,2.14,410,4.13,412,2.041,417,1.231,432,2.506,438,2.805,492,2.486,509,1.524,513,2.82,544,2.253,580,4.409,583,1.928,598,2.345,634,3.016,645,5.074,670,2.506,766,4.413,783,3.37,859,3.155,890,2.802,945,2.82,1080,5.285,1101,2.638,1110,2.345,1170,2.395,1375,5.419,1413,3.37,1597,3.329,1676,3.329,1689,2.345,1905,3.329,1972,2.715,2014,3.016,2105,3.562,2183,3.329,2250,6.559,2426,3.016,2429,3.562,2430,7.21,2431,3.916,2432,3.916,2433,3.562]],["t/578",[152,4.05,170,1.759,171,1.358,183,1.159,184,1.903,195,1.183,236,2.544,237,2.631,241,3.478,244,4.348,248,1.97,249,3.235,250,3.664,251,4.187,253,2.92,276,1.809,392,3.986,411,3.002,418,2.101,520,3.111,658,4.187,951,3.543,1090,3.235,1101,3.638,1185,4.16,1267,2.952,1568,4.82,1896,4.592,2434,5.401,2435,5.401]],["t/580",[168,2.636,257,2.371,502,4.09]],["t/582",[152,3.397,163,1.855,184,2.226,195,0.992,250,2.265,253,2.479,257,2.588,486,4.828,502,4.774,504,7.126,509,2.459,510,2.816,653,6.52,845,4.867,928,3.574,974,4.58,1260,3.64,1690,7.126,2413,4.867,2414,4.867,2436,5.372]],["t/584",[195,1.084,198,3.008,199,1.998,252,3.774,253,2.627,257,2.295,264,1.645,269,1.667,308,3.906,316,2.882,327,4.136,341,3.546,418,2.687,502,4.779,517,3.354,890,4.94,947,3.655,974,3.774,1087,3.446,1327,3.978,1342,4.788]],["t/586",[244,3.989,434,3.274,546,5.032,574,4.719]],["t/588",[169,2.431,171,2.081,184,2.185,191,2.561,192,4.856,193,4.068,195,1.3,216,3.869,226,2.935,231,2.643,233,2.344,244,3.647,248,2.262,268,2.863,302,3.046,323,5.297,394,4.178,397,4.86,546,5.179,591,3.641,599,3.877,1218,3.508,1623,3.447,2437,6.202]],["t/590",[192,3.754,231,3.07,244,4.02,248,3.653,276,2.413,302,2.355,323,4.609,392,3.812,411,4.003,434,2.604,438,2.803,454,2.886,526,4.994,546,4.003,593,3.646,1267,3.936,1268,6.124,1623,4.003,1770,5.548,2438,7.203]],["t/592",[163,1.873,170,1.49,192,3.325,195,1.002,233,2.388,264,1.52,269,1.54,276,2.137,302,2.086,313,2.086,422,2.825,434,3.417,478,3.82,514,3.745,546,4.688,657,3.376,849,3.82,860,3.675,1101,4.297,1170,3.901,1342,5.848,1361,6.248,1472,6.497,2158,5.803,2439,6.379,2440,6.379,2441,6.379]],["t/594",[163,2.162,169,2.162,181,4.501,183,1.579,188,3.104,192,3.837,233,2.619,248,3.374,270,2.864,276,2.466,302,2.407,511,4.829,537,4.959,546,4.091,966,5.67,1008,5.67,1592,6.258,2442,7.361,2443,7.361]],["t/596",[172,2.836,192,4.874,231,2.284,233,1.517,244,3.3,248,3.15,253,2.216,269,1.808,276,1.795,302,2.449,316,2.236,324,3.3,392,2.836,411,2.978,434,3.381,438,2.085,454,2.146,526,3.715,546,4.8,593,3.791,949,4.793,1267,2.928,1268,4.555,1308,6.106,1623,2.978,1770,4.127,2215,4.127,2444,7.856,2445,4.874,2446,5.358,2447,4.874]],["t/598",[163,1.457,172,5.059,192,4.71,226,2.348,233,2.008,244,3.125,248,3.02,253,2.098,269,2.182,270,1.93,276,1.662,302,2.319,316,2.07,324,3.125,392,2.625,434,3.267,546,4.602,556,2.806,598,2.971,825,3.549,949,4.539,1308,6.131,1623,2.757,2015,4.918,2215,3.821,2444,7.532,2445,4.513,2447,4.513,2448,8.28,2449,4.961,2450,4.961]],["t/600",[169,2.187,172,2.813,191,2.195,195,0.835,233,1.505,241,2.454,244,3.282,253,2.204,269,2.076,276,1.781,302,2.435,316,2.218,324,3.282,434,3.548,545,4.372,546,4.78,593,2.69,945,2.516,949,4.766,951,4.886,1308,6.091,1461,5.518,2088,4.094,2215,4.094,2451,5.315,2452,7.449,2453,5.315,2454,5.315,2455,5.315,2456,4.835]],["t/602",[172,3.03,233,2.22,241,2.643,244,3.94,253,2.32,269,1.893,313,1.872,316,2.389,324,3.455,434,3.644,514,6.11,546,3.182,593,2.897,674,5.144,849,3.428,949,5.017,1308,6.233,1795,5.61,2088,4.409,2211,5.207,2215,4.409,2456,5.207,2457,5.724,2458,5.724]],["t/604",[168,2.636,262,2.756,619,5.737]],["t/606",[162,3.535,195,1.284,219,3.444,226,2.882,236,2.762,237,2.856,249,3.646,259,3.646,262,1.828,274,2.683,302,1.991,332,4.221,341,3.127,361,2.489,364,2.541,371,5.131,394,4.102,423,4.69,549,3.444,591,3.575,619,5.771,681,4.102,1010,3.896,1454,5.363,1714,5.177,2433,5.539,2459,5.177,2460,6.089]],["t/608",[152,3.131,162,2.725,163,1.71,170,1.853,195,1.246,199,2.611,215,4.703,218,2.829,250,2.087,251,3.237,253,2.867,257,2.05,260,3.237,263,3.82,311,2.991,370,4.41,392,3.082,418,2.266,485,3.029,486,3.355,492,2.43,496,3.727,557,3.82,619,3.641,854,3.131,866,5.345,1260,3.355,1827,5.298,2461,5.824,2462,5.824,2463,4.951]],["t/610",[123,2.773,171,1.358,183,1.159,184,1.903,195,1.362,226,2.556,253,2.776,257,1.395,263,5.689,274,2.38,284,2.353,324,2.38,364,3.143,371,3.002,392,2.858,422,1.809,485,3.583,486,3.111,492,3.143,583,2.659,619,3.376,636,2.589,783,4.906,784,3.745,803,4.16,885,2.353,1528,2.695,1548,4.592,1689,3.235,1866,4.352,1871,4.592,2085,6.852,2463,4.592,2464,7.532,2465,5.401,2466,5.401,2467,4.913,2468,4.913]],["t/612",[253,2.643,263,5.86,2469,8.126,2470,8.933,2471,8.933]],["t/614",[162,2.916,163,2.151,170,1.212,181,3.175,195,1.15,214,3.877,246,4.723,248,2.673,262,1.559,269,1.769,276,1.739,291,4.183,330,3.405,390,3.497,392,4.493,397,5.415,418,2.02,485,1.982,557,3.405,573,3.109,582,3.762,583,2.556,618,3.322,673,5.079,729,4.183,730,3.714,851,6.229,949,6.458,1533,3.999,2102,5.427,2472,8.49,2473,5.191,2474,4.723,2475,5.191,2476,5.191,2477,5.191]],["t/616",[162,2.808,195,0.956,253,1.801,260,3.384,263,5.363,269,1.47,280,2.919,282,3.806,311,3.127,361,2.489,417,1.914,509,2.369,556,3.444,606,3.328,619,3.806,671,3.384,872,4.356,1175,5.177,1247,5.177,1517,4.897,1631,4.51,1876,5.177,2396,4.69,2404,7.601,2405,6.952,2478,6.089,2479,6.089,2480,5.539,2481,6.089,2482,5.539,2483,6.089,2484,6.089,2485,6.089]],["t/618",[123,3.583,183,1.497,195,1.096,226,3.303,253,2.919,485,3.767,486,4.019,636,3.345,885,3.04,1528,3.482,1689,4.179,1871,5.932,2396,6.887,2404,6.887,2463,5.932,2467,6.347,2468,6.347,2486,8.941,2487,6.977]],["t/620",[253,2.541,485,3.279,1999,7.813,2396,6.616,2404,6.616,2469,7.813,2488,8.589,2489,8.589]],["t/622",[162,3.422,167,5.279,195,1.119,260,3.961,262,2.14,293,2.349,335,3.894,436,4.8,502,4.038,503,2.855,619,4.455,668,5.742,885,3.105,1278,4.455,1972,4.941,2026,7.705,2490,9.062,2491,7.126,2492,7.126]],["t/625",[168,2.636,233,2.598,1106,5.737]],["t/627",[170,1.372,173,4.073,174,4.073,195,1.423,252,4.362,258,2.974,264,1.4,303,5.667,308,3.323,341,3.017,369,4.087,382,2.649,390,3.958,393,4.734,397,3.449,409,3.593,503,2.354,509,2.286,545,2.618,549,3.323,673,4.073,695,4.734,711,3.518,848,3.307,864,4.525,874,4.588,1294,4.525,1444,4.073,2235,4.995,2474,5.344,2493,5.875,2494,5.875,2495,5.875]],["t/629",[170,2.26,171,1.684,188,2.825,195,1.367,233,1.896,260,3.723,261,4.644,262,2.906,268,3.093,269,1.617,293,2.208,335,3.661,378,3.987,382,3.02,383,3.421,417,2.105,544,2.534,619,4.187,848,2.443,850,3.858,874,3.39]],["t/631",[160,3.183,162,1.933,195,0.884,236,1.901,237,1.966,250,2.017,253,1.665,257,2.002,258,2.849,259,3.371,262,3.008,263,3.692,265,3.242,293,1.855,332,3.902,341,3.98,364,2.348,368,3.242,369,2.537,378,3.016,382,2.537,383,3.045,392,2.979,417,1.769,418,2.19,485,3.385,492,2.348,509,2.19,510,2.508,544,2.129,619,4.845,642,3.692,662,3.183,671,3.128,684,3.791,702,4.169,1159,3.518,2459,4.785]],["t/633",[160,4.257,171,1.892,195,1.182,199,2.178,250,2.698,253,2.227,257,1.945,259,4.508,262,2.816,263,4.937,293,2.481,392,3.983,417,2.366,418,2.929,485,2.874,544,2.847,657,3.983,866,5.07]],["t/635",[169,2.877,170,1.886,171,1.503,179,3.071,180,3.826,191,2.47,192,4.768,272,3.923,308,4.569,324,2.635,370,5.084,408,4.278,419,5.084,547,4.569,572,3.581,575,5.084,576,3.738,577,4.818,578,5.44,579,3.923,580,3.657,581,5.44,582,3.071,583,2.944,584,5.44,585,5.44,586,5.084,587,3.738,588,5.44,589,4.028,590,5.44,591,3.511]],["t/637",[199,2.62,256,4.583,257,2.339,264,2.157]],["t/639",[170,2.321,199,2.393,229,6.371,243,6.694,256,4.186,270,3.218,284,3.603,589,5.572,2079,6.371]],["t/641",[184,2.762,195,0.899,199,1.656,256,3.969,264,2.131,293,1.887,354,3.663,369,2.581,380,3.578,417,1.799,428,2.984,432,3.663,473,2.643,513,2.709,544,2.165,606,3.128,613,4.08,615,5.207,636,2.744,708,4.409,740,3.361,934,2.467,937,3.078,969,7.604,1087,2.857,1121,6.667,1549,4.867,2496,5.724,2497,4.867,2498,8.944,2499,5.207,2500,5.724,2501,7.133,2502,7.841]],["t/643",[195,0.741,239,1.871,256,5.328,264,1.123,320,3.762,422,2.696,427,5.58,430,3.8,437,3.177,475,2.948,509,1.835,513,3.809,544,1.784,556,2.668,570,2.621,574,3.565,726,3.633,897,5.758,934,2.032,935,2.354,947,2.496,1303,4.29,1630,5.511,1645,3.27,1750,4.29,2189,4.009,2501,6.222,2503,6.84,2504,4.716,2505,4.716,2506,6.222,2507,6.84,2508,4.716,2509,4.716,2510,4.716,2511,4.716,2512,4.716,2513,4.716,2514,4.716,2515,4.716,2516,4.716,2517,4.716,2518,4.716,2519,4.716,2520,4.716,2521,4.29,2522,4.009,2523,3.8]],["t/645",[262,2.756,378,3.571,574,4.784]],["t/647",[163,0.866,168,0.847,169,2.025,170,0.689,171,0.741,183,0.633,184,1.68,195,0.943,233,1.35,239,1.17,248,1.74,249,1.766,253,0.873,257,1.232,258,1.493,269,1.665,270,1.148,276,1.598,283,1.537,288,1.515,294,3.212,302,0.964,305,1.586,313,0.964,326,2.272,331,2.856,333,1.887,337,4.771,350,0.823,362,1.668,378,2.683,383,3.349,413,1.732,422,0.988,434,1.066,438,1.148,440,2.185,472,1.433,476,1.137,485,1.126,502,1.314,518,4.195,519,3.768,556,1.668,558,1.561,612,1.935,623,2.045,629,3.532,631,2.377,634,4.624,635,2.683,636,1.414,637,2.272,638,5.461,639,3.842,640,4.645,641,5.104,642,4.966,643,4.054,644,3.051,645,1.639,646,1.639,647,3.673,648,2.272,649,2.508,650,2.377,651,2.508,652,2.045,653,2.045,654,2.683,655,2.377,685,3.532,871,3.412,882,4.295,885,1.285,981,1.987,1009,2.683,1010,1.887,1117,1.804,1279,1.987,1342,2.045,1522,2.377,1549,2.508,1851,2.272,2097,2.508,2524,2.683,2525,2.949,2526,2.949,2527,2.949,2528,2.683,2529,2.683,2530,2.683]],["t/649",[191,2.585,195,0.983,219,3.541,248,2.283,257,2.418,284,2.727,293,2.063,294,4.217,311,3.214,327,3.749,328,3.541,331,5.976,335,4.552,378,3.642,418,2.436,519,4.552,537,4.217,621,4.033,642,4.106,656,4.106,657,3.313,665,3.913,783,3.541,887,3.541,1307,5.694,2097,5.322]],["t/651",[183,1.26,233,2.26,248,2.912,276,2.674,284,3.477,288,3.017,305,3.159,326,4.525,331,3.518,352,3.323,378,2.286,383,3.136,518,3.557,519,4.362,605,2.854,629,6.715,634,6.983,701,4.525,738,2.781,898,4.073,1010,3.759,1110,3.518,1513,5.344,1567,5.344,1590,5.344,1659,4.352,2524,9.541,2531,5.875]],["t/653",[169,2.775,233,2.477,248,2.003,257,1.418,269,2.282,276,1.839,378,3.863,383,2.157,411,3.051,418,2.136,438,3.677,440,4.066,476,2.116,518,4.424,640,3.698,641,4.667,643,4.667,670,3.513,673,3.806,882,5.45,885,2.392,1267,3,1455,3.927,1522,4.423,2166,6.93,2528,4.994,2532,6.138,2533,5.49,2534,5.49,2535,5.49]],["t/655",[156,3.545,195,1.324,239,2.531,252,3.486,262,1.915,269,2.036,281,3.745,293,2.103,324,3.716,378,3.677,438,2.482,485,3.22,502,2.843,518,3.759,830,5.803,1454,5.532,1526,7.672,1601,5.803,1734,5.14,2164,7.672,2530,5.803,2532,5.14,2536,6.379,2537,5.803,2538,6.379]],["t/657",[410,6.277,502,4.035,574,4.719,974,4.948]],["t/659",[152,2.449,163,2.317,183,1.431,184,1.605,195,1.364,198,2.906,236,1.539,237,1.591,239,1.807,242,2.339,250,1.633,253,1.973,257,2.643,264,1.085,267,3.158,269,1.905,278,3.509,284,1.985,307,2.988,378,1.773,476,3.347,486,5.002,502,4.65,504,5.67,509,1.773,510,2.03,653,5.47,738,2.156,845,3.509,904,2.274,928,2.577,974,4.312,1087,2.274,1260,2.624,1528,2.274,1632,4.144,1690,5.67,2413,3.509,2414,3.509,2436,3.873]],["t/661",[123,4.443,195,1.041,253,2.849,284,2.889,307,4.35,485,4.222,496,5.536,502,4.928,653,6.677,928,3.751,1485,4.598,2413,5.108,2414,5.108]],["t/663",[171,1.754,218,3.389,233,3.046,253,2.645,378,2.715,383,2.741,433,4.838,502,4.905,928,3.947,974,4.886,1428,4.7,1485,4.838,1848,8.134,2539,6.347]],["t/665",[163,2.399,253,2.417,262,2.453,378,3.838,382,3.683,383,3.21,433,5.664,502,3.641,629,6.052,686,6.583,1485,5.664]],["t/668",[39,3.212,152,2.143,171,1.518,183,1.563,195,1.277,198,1.736,219,2.255,233,2.063,236,1.346,237,2.109,239,3.225,248,1.454,253,2.156,258,2.017,264,1.438,269,1.458,276,1.335,283,2.077,293,1.314,316,3.393,362,2.255,428,3.147,474,2.852,476,2.809,478,2.387,479,1.962,483,3.358,502,3.623,520,1.646,549,2.255,696,2.763,854,2.143,858,2.109,928,2.255,937,3.919,945,1.887,971,3.07,974,2.178,1097,4.279,1160,3.07,1266,2.952,1342,2.763,1430,5.399,1524,3.212,1566,2.763,1824,3.389,1889,2.952,1907,2.685,1984,2.952,2537,6.63,2540,3.626,2541,3.986,2542,3.986,2543,7.396,2544,3.986,2545,3.986,2546,3.626,2547,3.986,2548,3.986]],["t/670",[39,5.096,162,1.095,167,2.362,169,0.936,183,2.182,219,1.803,231,1.359,233,2.88,250,1.143,253,2.912,279,2.456,280,1.529,428,3.296,454,2.889,478,3.04,479,2.499,502,4.069,512,2.236,520,3.886,646,1.772,653,4.385,670,2.04,681,2.148,696,4.999,818,2.569,926,3.33,937,3.4,974,3.941,1097,3.713,1213,6.13,1271,2.281,1279,2.148,1428,3.419,1524,2.569,1889,3.76,1907,3.419,1984,5.341,1985,6.559,2041,5.554,2413,3.91,2414,3.91,2539,2.9,2540,4.618,2543,4.618,2546,2.9,2549,3.188,2550,3.188,2551,3.188,2552,3.188]],["t/672",[233,2.563,269,2.186,270,3.523,574,4.719]],["t/674",[163,2.158,169,0.969,170,0.77,176,2.287,180,2.111,184,1.162,188,2.199,191,1.362,195,1.4,216,1.542,219,1.866,231,1.406,233,3.144,248,1.902,257,1.347,258,1.669,269,2.231,270,1.283,271,2.443,274,1.453,275,2.804,282,2.062,286,2.164,287,1.646,288,2.678,289,2.541,291,2.658,302,1.078,305,1.773,328,2.95,329,1.321,330,2.164,331,3.123,332,3.615,333,3.337,335,1.803,342,1.602,359,1.9,362,2.95,378,1.283,417,2.033,427,2.287,502,1.47,598,3.123,621,1.421,623,3.615,634,2.541,657,1.746,670,2.111,858,1.746,864,2.541,898,3.615,907,2.36,946,4.017,1087,2.602,1117,2.017,1159,3.26,1267,1.803,1283,1.936,1390,2.111,1451,2.658,1658,2.804,1659,2.443,1668,2.541,1691,2.658,1729,2.443,1782,3,2172,2.541,2410,2.658,2553,3.298,2554,3,2555,3.298,2556,3.298,2557,3.298,2558,3.298,2559,3.298,2560,3.298,2561,3.298]],["t/676",[170,1.81,171,1.415,183,1.207,190,3.128,198,2.452,219,3.183,226,2.664,233,3.003,244,2.48,250,2.017,253,2.293,269,1.871,270,3.898,277,4.785,278,4.335,279,4.335,280,2.698,281,3.304,282,3.518,283,2.933,284,2.452,285,5.12,330,5.084,350,1.571,392,2.979,422,1.885,427,3.902,2172,6.828,2562,5.12,2563,5.628,2564,5.628,2565,5.628,2566,5.628]],["t/678",[163,1.237,169,2.936,184,1.484,195,0.988,226,1.993,233,2.754,248,2.748,270,2.449,286,5.868,287,3.141,288,4.292,289,5.804,290,3.244,291,3.393,294,2.837,298,2.426,305,2.264,311,3.232,324,1.856,328,3.56,331,5.006,332,4.363,333,4.027,354,2.695,410,2.92,418,1.639,438,3.252,598,2.522,621,3.602,636,2.019,660,2.695,665,2.633,858,2.229,946,3.244,1015,3.393,1659,3.12,1734,3.393,1770,3.244,1889,4.662,2014,3.244,2264,3.581,2521,8.538,2567,4.211]],["t/680",[163,1.91,183,1.832,233,3.116,248,2.372,284,3.721,328,4.832,331,3.894,335,3.554,411,3.614,502,4.689,516,5.009,580,3.977,610,5.24,898,4.509,946,5.009,1659,4.817,2073,5.24,2410,5.24,2568,6.503,2569,6.503]],["t/682",[170,1.81,171,1.415,190,3.128,233,3.142,236,1.901,244,2.48,250,2.017,253,2.293,270,2.19,295,4.785,299,5.545,321,7.05,350,1.571,370,4.308,392,2.979,399,3.692,422,1.885,502,4.464,520,3.661,605,2.734,606,3.076,621,2.425,818,4.535,1097,3.304,1342,3.902,1889,4.169,2172,4.335,2562,5.12,2570,5.628]],["t/684",[526,6.363,763,6.566,1662,7.07]],["t/686",[160,3.714,182,5.058,189,3.589,254,4.553,283,3.423,293,2.164,307,4.307,343,4.105,422,2.2,506,5.291,526,5.96,719,3.783,720,5.974,727,5.974,841,4.698,981,4.424,1095,4.864,1148,5.583,1178,4.864,1216,5.291,1518,5.583,2426,7.383,2532,5.291,2571,5.974,2572,6.567,2573,6.567,2574,6.567,2575,6.567,2576,6.567]],["t/688",[186,2.68,260,3.026,283,3.948,284,3.3,293,1.795,341,2.796,342,2.645,352,4.285,510,2.426,571,5.868,572,3.261,576,3.404,664,6.668,666,4.953,676,4.629,718,6.44,719,4.364,725,6.44,729,4.387,734,4.629,740,3.197,841,3.895,1086,4.194,1099,3.197,1272,3.668,1278,4.735,1714,4.629,2577,5.445,2578,6.891,2579,7.575,2580,4.629,2581,7.575,2582,4.953,2583,5.445,2584,5.445,2585,5.445,2586,5.445]],["t/690",[171,1.079,186,2.113,195,0.674,216,2.006,226,2.032,260,2.386,269,1.541,276,2.138,283,3.327,293,2.104,337,2.308,342,2.085,344,2.386,350,2.517,357,4.611,362,4.773,369,3.804,389,3.18,407,2.816,417,1.349,433,4.425,509,1.67,526,4.425,538,4.728,572,4.564,576,2.683,583,2.113,592,3.459,668,3.459,709,3.306,719,2.473,737,5.807,803,3.306,966,3.306,1087,2.142,1099,2.52,1272,2.892,1461,3.18,1476,3.071,1498,3.459,1522,3.459,1596,3.905,1609,3.18,1631,3.18,1895,3.905,1993,3.905,2180,3.905,2582,3.905,2587,4.293,2588,4.293,2589,7.62,2590,4.293,2591,4.293,2592,4.293,2593,4.293,2594,3.905,2595,4.293,2596,4.293,2597,4.293,2598,4.293,2599,4.293]],["t/692",[252,4.52,280,3.965,436,5.572,663,4.764,713,6.126,1473,5.734,2600,7.032,2601,7.524,2602,7.524,2603,7.361]],["t/694",[267,5.529,268,3.682,526,6.739,544,3.017,763,6.954,1252,6.78,1956,6.143,2604,7.975,2605,7.975,2606,7.975,2607,7.975,2608,7.975]],["t/696",[366,5.613,719,5.287,1662,7.07]],["t/698",[195,1.131,261,4.994,262,3.007,273,5.153,280,3.453,293,2.374,310,4.074,342,3.499,719,4.149,845,5.548,846,6.124,871,5.153,882,5.153,1106,4.503,1121,6.124,1723,6.124,2594,6.552,2609,7.203,2610,7.203,2611,7.203,2612,7.203]],["t/700",[262,3.068,620,4.603,663,4.336,664,5.07,860,4.336,882,5.385,1097,4.419,1101,5.07,1106,4.705,1135,6.399,1473,5.218,1623,5.213,1723,6.399,2603,5.575,2613,6.847,2614,6.399]],["t/702",[195,1.283,262,2.453,366,4.996,382,3.683,454,3.273,738,3.867,740,4.796,972,6.293,1010,5.228,1157,7.432,1237,6.946,2615,8.17]],["t/704",[195,1.062,262,2.632,344,3.761,345,2.981,348,6.077,452,5.212,454,2.711,467,5.146,622,3.425,667,3.581,719,3.897,738,3.202,740,3.972,750,5.452,825,4.841,1361,5.012,1641,4.229,2263,6.155,2616,6.766,2617,6.766,2618,6.766,2619,6.766,2620,6.155,2621,6.155,2622,6.155,2623,6.766]],["t/706",[195,1.422,357,4.948,454,3.627,2614,7.697]],["t/708",[195,1.663,199,1.502,202,3.405,242,2.666,344,2.885,346,4.414,347,4.183,348,3.599,359,2.991,362,2.937,403,4.414,452,3.999,454,2.08,517,2.522,663,2.991,664,3.497,700,3.714,719,4.891,738,2.457,740,3.048,750,4.183,825,3.714,885,2.262,1010,3.322,1040,3.846,1389,3.846,1497,3.999,1641,3.245,2052,4.414,2325,4.723,2396,6.54,2614,4.414,2620,4.723,2624,5.191,2625,5.191,2626,4.723,2627,5.191,2628,5.191,2629,5.191,2630,5.191,2631,5.191,2632,5.191,2633,5.191,2634,7.326,2635,4.723,2636,5.191,2637,5.191]],["t/710",[195,1.483,256,3.853,345,3.354,357,4.16,383,2.991,454,3.05,538,5.639,571,6.919,738,3.603,740,4.469,825,5.446,1641,4.759,2638,7.612,2639,7.612,2640,7.612]],["t/712",[252,4.52,280,3.965,436,5.572,663,4.764,713,6.126,1473,5.734,2600,7.032,2601,7.524,2602,7.524,2603,7.361]],["t/714",[544,3.09,570,4.541,663,4.706,1358,6.583,1473,5.664,1636,5.504,1852,7.432,2094,7.432,2641,8.17,2642,8.17,2643,8.17,2644,8.17]],["t/716",[418,3.672]],["t/718",[169,1.967,170,1.564,183,1.868,192,4.539,193,4.394,195,1.367,197,4.187,216,3.131,236,2.262,262,2.011,264,1.596,316,3.634,361,2.738,396,4.792,397,3.932,398,5.397,399,4.394,400,6.093,401,6.093,402,6.093,403,5.695,404,6.093,405,5.695,406,4.792,407,4.394,2645,6.093]],["t/720",[169,2.115,170,2.131,171,1.811,188,3.037,225,4.405,262,3.007,293,2.374,305,4.906,387,6.552,417,2.264,467,4.229,617,5.335,1282,4.852,1641,4.503,2554,6.552,2646,7.203,2647,7.203,2648,7.203,2649,6.124]],["t/722",[637,7.07,1662,7.07,2650,7.07]],["t/725",[292,4.372,316,3.716,329,2.274,341,4.572,378,3.033,379,4.573,464,6.173,605,2.757,658,3.155,853,3.471,1135,4.825,1136,4.825,1216,4.573,1220,5.163,1267,3.102,1399,5.163,1636,3.823,1773,4.573,2088,4.372,2174,5.163,2650,6.005,2651,5.676,2652,5.163,2653,5.676,2654,4.825,2655,5.676,2656,5.676,2657,4.825,2658,5.676,2659,5.676,2660,5.676,2661,5.676,2662,5.676,2663,5.163,2664,4.573,2665,5.676,2666,5.676,2667,5.676,2668,5.676,2669,5.676]],["t/727",[162,2.018,169,1.725,191,2.426,193,3.854,254,4.073,322,4.525,329,3.198,334,4.203,337,4.874,341,4.994,343,3.673,344,3.265,349,4.203,350,1.64,361,2.401,438,2.286,516,4.525,549,3.323,645,4.436,646,3.265,673,4.073,860,3.384,965,4.734,981,3.958,1069,4.995,1071,5.344,1178,4.352,1215,5.344,2580,4.995,2657,4.995,2664,4.734,2670,5.875,2671,5.875,2672,5.344,2673,5.875,2674,5.875,2675,5.344]],["t/729",[162,1.587,169,1.122,174,1.11,183,0.991,184,1,191,1.578,193,1.861,195,0.6,216,0.749,223,0.979,224,2.413,233,0.453,236,0.541,245,1.29,254,1.967,258,0.811,260,2.124,262,2.914,274,1.25,280,0.768,287,0.799,292,2.186,293,0.528,302,1.728,303,2.389,307,1.051,316,1.929,329,1.852,337,0.861,341,2.715,342,0.778,348,1.11,353,2.102,361,1.16,378,2.887,379,1.29,380,1.774,382,2.084,383,0.629,391,1.234,396,2.03,397,0.94,399,1.051,409,1.735,414,1.362,422,0.537,438,1.104,440,1.186,444,1.186,454,0.642,464,4.924,467,2.713,474,1.146,525,1.29,527,1.234,537,1.079,570,1.577,576,1.001,577,1.29,587,1.001,592,1.29,599,1.001,620,0.979,636,1.36,645,3.516,646,2.938,650,1.29,658,0.89,662,1.605,685,2.83,690,1.146,718,2.413,738,0.758,740,0.94,762,1.146,837,3.079,848,1.035,853,0.979,854,0.861,945,1.809,965,2.287,966,1.234,969,1.362,971,2.186,972,2.186,974,3.457,981,1.079,1093,1.457,1111,1.234,1170,1.735,1182,1.457,1184,3.249,1192,2.413,1218,0.906,1223,1.29,1267,0.875,1358,3.079,1362,1.146,1444,1.11,1454,1.051,1478,1.457,1485,1.11,1568,1.025,1610,1.457,1641,1.001,1691,1.29,1715,1.146,1742,2.582,1746,2.582,1766,1.29,1773,1.29,1803,1.457,1830,2.287,1972,1.967,2088,2.186,2120,1.457,2161,1.457,2165,1.457,2364,1.457,2407,1.457,2428,4.809,2529,1.457,2580,1.362,2645,1.457,2650,4.504,2652,2.582,2654,2.413,2657,1.362,2663,1.457,2672,1.457,2675,1.457,2676,4.622,2677,2.838,2678,1.602,2679,1.602,2680,2.838,2681,1.602,2682,1.602,2683,3.821,2684,1.602,2685,1.602,2686,1.602,2687,1.602,2688,1.602,2689,1.602,2690,1.602,2691,1.602,2692,1.602,2693,1.602,2694,1.602,2695,1.602,2696,3.476,2697,2.838,2698,2.838,2699,2.413,2700,2.838,2701,1.602,2702,1.602,2703,1.602,2704,1.602,2705,1.602,2706,1.602,2707,1.602,2708,1.29,2709,1.602,2710,1.602,2711,1.602,2712,1.602,2713,2.838,2714,1.602,2715,1.602,2716,1.602,2717,1.602,2718,1.602,2719,1.602,2720,1.602,2721,1.602,2722,1.602,2723,1.602,2724,1.602,2725,1.602,2726,1.602,2727,1.602,2728,2.582,2729,1.602,2730,1.602,2731,1.602,2732,1.602,2733,1.602,2734,1.602,2735,1.602,2736,1.602,2737,1.602]],["t/731",[170,1.117,171,1.737,183,1.026,195,0.751,233,1.956,257,2.296,260,2.659,268,2.209,280,2.293,290,5.323,308,2.706,316,2.884,341,3.549,342,3.357,378,1.861,396,3.422,445,4.351,454,2.768,464,4.791,473,2.209,478,2.865,506,3.854,553,3.138,573,4.138,620,2.925,636,2.293,645,2.659,659,3.543,735,3.543,738,2.264,740,2.808,748,3.685,807,2.925,946,3.685,1068,3.422,1609,3.543,1631,3.543,2578,4.351,2600,4.067,2650,3.685,2654,6.897,2696,4.351,2728,4.351,2738,6.91,2739,6.91,2740,4.783,2741,4.783,2742,4.783,2743,6.91,2744,4.783,2745,4.783,2746,4.783,2747,4.351]],["t/733",[663,4.948,1473,5.955,1956,6.616,2650,6.616,2748,8.589,2749,8.589,2750,8.589,2751,8.589]],["t/735",[168,3.035,274,4.044]],["t/737",[168,1.345,169,1.375,170,1.094,171,2.015,186,2.305,194,4.354,195,0.735,199,1.969,236,1.582,251,3.782,257,1.21,262,1.406,279,3.607,302,2.621,311,2.405,313,1.531,341,3.494,350,2.979,354,2.996,360,3.773,364,1.954,368,2.698,369,3.068,371,2.603,375,5.129,381,2.37,417,1.472,422,1.569,434,2.46,520,1.934,556,2.649,570,2.603,599,2.927,645,2.603,646,2.603,660,2.996,673,3.247,1008,3.607,1159,2.927,1225,2.478,1306,3.155,1390,2.996,1396,3.469,1408,3.247,1413,2.649,1476,3.35,2014,3.607,2426,3.607,2752,4.683,2753,4.683,2754,4.683,2755,4.683]],["t/739",[123,1.186,162,0.793,163,0.678,168,1.451,169,2.359,170,1.876,171,0.979,183,0.495,184,1.372,188,0.974,191,0.953,194,1.477,195,1.201,198,1.697,199,1.462,200,3.089,215,1.203,218,1.121,221,4.113,223,1.412,226,1.843,233,1.103,236,2.427,237,0.806,242,1.186,244,1.017,251,4.466,256,1.168,257,1.976,262,0.693,264,1.712,269,0.557,271,1.71,272,1.514,274,3.166,275,1.963,276,0.773,284,2.201,287,1.152,293,2.184,297,1.71,302,1.273,313,1.652,316,0.963,331,2.332,336,1.963,341,1.186,350,2.005,360,1.86,364,0.963,368,1.33,369,2.675,371,1.283,374,1.601,381,1.168,406,1.652,413,1.355,418,1.515,434,0.835,438,0.898,470,1.652,476,2.286,483,0.953,509,1.515,510,1.029,512,1.017,517,1.121,520,2.45,535,1.963,544,0.873,547,1.306,557,1.514,558,1.222,559,1.186,583,1.136,591,1.355,602,2.885,616,1.86,618,2.492,619,1.443,621,2.177,630,2.555,636,1.107,645,1.283,670,1.477,684,1.555,711,1.383,807,1.412,854,1.241,864,1.778,898,1.601,928,1.306,936,1.355,938,1.477,943,1.601,948,1.86,1012,1.383,1086,1.778,1087,1.152,1102,1.86,1106,1.443,1164,2.1,1216,1.86,1218,1.306,1225,1.222,1302,1.383,1345,3.138,1396,1.71,1404,2.1,1406,1.652,1408,2.7,1571,1.963,1609,1.71,1623,1.283,1691,1.86,1692,1.86,1709,3.138,1729,1.71,1762,2.1,1876,1.963,2426,1.778,2756,2.309,2757,2.309,2758,2.309]],["t/740",[163,1.329,168,2.257,169,1.949,170,1.057,171,1.137,181,2.767,186,3.267,188,2.799,195,0.71,231,1.929,233,1.879,236,2.242,237,1.58,241,2.089,242,2.323,248,3.362,249,2.71,251,2.515,257,2.572,258,2.29,262,1.359,264,1.078,269,1.092,274,1.993,284,1.971,293,2.187,313,1.479,342,2.198,350,1.263,352,2.559,381,2.29,417,1.422,434,1.636,476,1.744,512,1.993,552,2.395,555,2.606,598,2.71,622,2.29,623,3.137,663,2.606,701,3.485,717,3.485,849,2.71,885,1.971,947,2.395,1097,2.656,1099,3.896,1117,2.767,1260,2.606,1345,3.646,1348,3.646,1368,3.137,1476,3.237,1512,4.116,1729,3.352,1956,3.485,2172,5.112]],["t/742",[162,2.502,170,1.203,188,2.172,195,1.328,198,3.174,199,2.446,203,2.645,215,2.685,223,3.15,233,2.063,236,1.74,237,1.799,250,1.846,251,2.863,252,2.815,253,2.501,260,2.863,262,1.547,263,3.379,274,3.21,293,1.698,298,2.967,313,1.684,332,3.572,364,2.15,368,2.967,369,3.285,371,2.863,417,1.619,418,2.004,520,2.127,573,3.085,619,5.285,673,3.572,684,4.909,854,2.77,865,2.863,874,3.688,1010,3.296,1068,3.686,1326,4.686,1402,4.686,2404,3.968,2459,4.38,2759,5.152]],["t/744",[168,1.617,171,1.415,186,2.771,195,1.217,198,2.452,199,2.764,203,3.98,250,2.017,252,3.076,257,1.454,269,1.359,270,2.19,274,2.48,302,2.534,313,1.84,320,2.631,350,1.571,381,2.849,434,2.035,473,2.599,583,2.771,604,3.076,636,2.698,665,3.518,671,3.128,711,3.371,739,3.692,1012,3.371,1087,3.868,1106,3.518,1153,4.535,1413,3.183,1476,4.027,1494,4.535,1533,4.335,2102,4.169,2285,5.12,2664,4.535,2760,5.628,2761,5.628,2762,5.628,2763,5.628]],["t/746",[123,2.176,162,1.455,183,1.356,190,2.356,195,1.317,236,2.136,237,1.48,250,1.519,251,2.356,281,2.488,283,2.209,350,2.72,351,2.78,353,3.139,369,2.851,371,2.356,382,1.911,389,3.139,422,1.42,423,3.265,467,4.44,512,2.786,520,2.611,525,3.415,555,2.441,558,2.243,559,3.247,576,2.649,583,2.086,599,2.649,600,2.855,626,3.265,652,2.938,725,3.603,754,3.855,1012,5.022,1090,2.538,1094,3.265,1122,3.265,1170,2.592,1214,3.032,1225,3.346,1267,2.316,1429,2.938,1454,4.147,1693,3.855,1728,5.752,2087,3.855,2236,3.855,2238,5.752,2240,3.855,2242,3.855,2243,3.855,2436,3.603,2635,3.855,2764,4.238,2765,4.238,2766,4.238,2767,5.752,2768,4.238,2769,4.238,2770,6.323]],["t/748",[168,1.842,169,1.883,176,2.995,180,2.764,183,1.376,191,1.784,195,1.201,236,1.459,237,1.509,248,2.34,257,1.657,258,2.187,261,2.995,262,1.297,264,1.029,280,3.075,283,2.252,293,1.424,327,2.587,329,1.731,333,2.764,342,4.111,350,1.206,359,2.489,361,2.622,362,2.444,367,2.156,378,1.681,383,1.697,418,1.681,454,2.57,465,3.2,466,3.481,467,2.536,485,3.745,520,1.784,555,2.489,648,3.328,657,2.286,670,2.764,671,3.565,690,3.091,818,3.481,854,2.323,860,2.489,967,3.481,1087,2.156,1110,2.587,1159,2.701,1160,3.328,1256,3.2,1492,5.453,1517,3.841,1668,3.328,1795,3.091,2061,3.673,2073,3.481,2532,3.481,2771,5.834,2772,3.93,2773,3.93,2774,4.32]],["t/750",[168,1.384,169,2.04,170,1.125,180,3.083,183,1.034,195,1.279,219,2.725,248,1.757,261,3.34,264,1.148,274,2.123,276,1.614,280,3.33,283,2.511,293,1.588,308,2.725,327,2.885,329,1.93,333,3.083,342,4.591,350,1.345,359,2.775,418,2.703,422,1.614,429,4.096,438,1.875,454,2.783,485,3.404,509,1.875,510,2.147,591,2.829,649,4.096,1110,2.885,1160,3.711,1279,3.246,1448,6.319,1492,4.096,1517,2.885,1668,3.711,1715,6.379,1760,4.383,1794,4.383,1795,3.447,2061,5.906,2073,3.882,2480,4.383,2771,6.319,2772,4.383,2773,4.383,2775,4.818]],["t/752",[152,2.393,156,1.507,162,0.931,168,2.236,171,1.119,183,2.124,186,1.334,188,1.143,195,0.699,199,2.574,200,1.658,201,2.008,203,2.909,226,1.283,236,1.504,242,2.286,250,1.596,255,1.939,257,1.15,258,1.372,266,3.418,268,2.615,272,1.778,274,1.961,280,1.299,282,1.694,284,1.181,293,0.893,302,1.455,311,1.392,313,2.368,320,2.081,350,1.83,361,1.108,364,2.736,368,2.564,369,2.956,370,1.507,371,2.474,377,2.564,392,2.356,417,0.852,422,1.491,434,2.371,438,1.055,472,1.317,473,1.252,492,1.858,503,1.086,509,1.055,510,1.208,513,1.283,558,1.435,559,1.392,570,2.474,572,1.623,573,1.623,574,1.413,583,2.191,604,1.481,606,1.481,613,2.031,621,1.918,636,2.715,657,1.435,711,2.666,726,2.088,807,2.722,854,1.457,939,4.049,945,1.283,1153,3.587,1170,1.658,1223,2.184,1225,3.47,1278,2.783,1279,1.826,1302,1.623,1389,2.008,1391,3.429,1408,1.879,1528,1.353,1972,1.879,2664,2.184,2708,2.184,2776,2.711,2777,2.711,2778,2.711,2779,2.711,2780,2.711,2781,2.711,2782,2.711,2783,2.711,2784,2.711,2785,2.711,2786,2.711,2787,2.711]],["t/754",[264,2.157,306,3.739,560,5.939,574,4.719]],["t/756",[170,1.629,171,1.754,188,2.942,199,2.587,264,2.13,293,2.3,341,3.583,417,2.193,465,5.168,560,5.865,606,3.813,621,3.006,897,4.992,934,4.635,935,4.462,1647,5.721,2788,6.977]],["t/757",[162,2.08,168,0.859,169,1.779,170,1.415,171,1.523,183,1.635,186,1.471,190,1.661,192,1.558,195,0.757,197,1.869,199,1.395,213,1.828,214,1.582,236,2.046,237,1.044,241,1.38,250,1.728,251,3.367,253,2.056,264,1.443,265,1.722,293,0.985,294,2.014,324,1.317,345,1.317,350,1.345,361,1.222,394,2.014,413,1.755,465,2.214,475,1.869,479,2.373,513,1.415,520,1.991,547,1.691,560,5,576,1.869,582,1.535,597,2.541,599,1.869,613,1.364,622,1.513,630,3.162,662,1.691,705,2.408,717,2.302,848,1.758,854,1.607,858,2.551,862,2.014,874,2.44,885,1.302,897,2.138,904,2.406,934,2.077,935,1.492,945,3.857,947,2.551,1087,1.492,1097,1.755,1099,1.755,1271,3.449,1276,2.014,1343,5.908,1406,2.138,1441,2.138,1487,2.541,1505,4.818,1552,2.541,1636,2.014,1850,2.408,1851,2.302,1914,2.719,2031,4.098,2405,2.541,2571,2.719,2789,2.138,2790,6.057,2791,2.989,2792,2.989,2793,2.989,2794,2.989,2795,2.989,2796,2.989,2797,2.989,2798,2.989,2799,2.989,2800,2.989,2801,2.989,2802,2.989,2803,6.057,2804,2.989,2805,4.82,2806,2.989,2807,2.989,2808,2.989,2809,2.989]],["t/759",[156,2.386,170,1.002,171,1.079,175,2.625,183,2.028,195,0.674,233,1.215,256,4.271,264,1.023,268,2.947,310,4.773,349,4.567,350,1.198,422,2.138,428,3.327,454,1.72,485,2.909,503,1.72,517,2.085,560,5.535,600,2.892,622,2.173,1225,2.272,1306,2.892,1666,5.87,1686,3.459,2186,3.905,2523,3.459,2649,3.649,2789,5.452,2810,4.293,2811,6.383,2812,4.293,2813,4.293,2814,4.293,2815,4.293,2816,4.293,2817,4.293,2818,4.293,2819,4.293,2820,4.293,2821,4.293,2822,4.293,2823,4.293,2824,4.293,2825,4.293,2826,4.293,2827,4.293,2828,4.293,2829,4.293,2830,4.293,2831,4.293,2832,4.293,2833,4.293,2834,4.293,2835,4.293,2836,4.293,2837,4.293,2838,4.293,2839,4.293,2840,4.293]],["t/761",[560,6.02,934,3.955,2011,7.395]],["t/763",[188,2.797,239,3.432,264,1.58,268,3.062,293,2.186,369,2.99,427,4.598,476,2.556,513,3.139,545,2.955,560,6.317,622,3.357,897,4.745,934,4.562,935,5.284,949,4.243,1099,3.893,1260,3.82,1278,4.146,1647,4.243]],["t/765",[306,4.295,350,2.493,561,4.882,574,4.656]],["t/767",[168,2.114,170,1.719,183,1.579,199,2.13,214,3.896,215,3.837,302,2.407,306,4.177,341,3.78,428,3.837,561,5.055,636,3.529,658,4.091,762,5.266,841,5.266,1167,6.258,1623,4.091,2789,5.266]],["t/769",[194,5.292,204,4.52,736,5.917,1533,6.371,1534,7.032,1907,5.572,1945,7.032,2841,9.039,2842,8.271,2843,8.271]],["t/771",[199,2.656,2116,7.803,2844,9.178]],["t/773",[300,4.128,417,2.6,513,3.915,544,3.129,545,3.686,561,5.821,1090,4.953,1447,5.734,2845,8.271]],["t/775",[156,3.512,171,1.588,184,2.226,190,4.658,195,0.992,199,2.721,215,3.293,216,2.953,244,2.784,253,2.479,306,2.609,350,1.763,352,3.574,417,1.986,418,2.459,422,2.117,544,3.17,561,5.14,928,3.574,1087,3.154,1101,4.257,1645,4.381,1792,5.748,2026,5.372,2027,5.748,2028,5.748,2522,5.372,2523,5.091,2846,6.319]],["t/778",[195,1.107,199,2.869,264,1.68,300,3.519,561,5.419,621,3.879,848,2.572,934,4.756,935,4.949,1647,4.512,2847,7.051,2848,7.051,2849,7.051]],["t/780",[170,1.98,199,2.454,225,5.186,229,6.532,233,2.401,256,4.292,589,5.713,2079,6.532,2850,8.48]],["t/782",[200,5.186,204,4.635,300,4.232,412,4.42,561,4.635,1494,6.833,1629,6.282,2380,7.21,2841,7.714]],["t/784",[195,1.384,233,2.496,422,2.953,444,6.53,852,7.495,1977,7.103]],["t/786",[183,1.652,195,1.209,233,2.693,239,3.055,244,3.392,253,2.278,329,3.085,476,2.968,861,4.813,865,4.28,874,3.897,1568,4.927,1866,6.204,1977,6.204,2008,5.187,2851,7.004]],["t/788",[155,8.925,162,3.493,183,1.598,184,2.624,195,0.835,278,4.094,306,4.052,417,2.341,422,1.781,479,4.233,545,3.319,561,4.071,636,2.548,937,4.005,1287,3.937,1327,4.291,1623,2.954,1645,5.164,2008,5.018,2789,3.803,2852,6.776,2853,8.477,2854,5.315,2855,6.776,2856,7.449,2857,4.835]],["t/790",[162,2.677,165,4.577,183,1.672,219,3.21,306,3.959,316,2.368,479,3.838,561,4.26,607,6.281,636,2.721,845,4.372,1051,9.14,1623,4.333,1645,5.405,2008,5.252,2789,4.061,2852,7.092,2853,8.72,2855,7.092,2857,5.163,2858,5.676,2859,7.796,2860,7.796,2861,5.676,2862,5.676]],["t/793",[328,5.264,561,5.086]],["t/795",[195,1.299,306,4.563,308,4.678,502,3.686,520,3.416,561,5.43,1517,4.953]],["t/797",[306,3.79,502,4.09,561,5.016]],["t/799",[123,3.172,156,4.126,163,2.599,168,1.18,183,1.325,191,1.697,195,0.645,233,1.163,250,2.214,253,1.215,262,1.234,269,0.992,274,1.81,302,2.02,306,3.409,311,2.11,313,1.343,350,1.146,390,2.767,417,1.291,422,1.376,476,2.381,479,3.041,485,2.834,503,1.646,518,2.753,521,2.939,561,4.837,601,3.31,629,3.043,632,3.493,848,1.499,937,2.209,1041,3.493,1090,3.699,1274,2.324,1288,3.493,1309,2.568,1623,2.283,1645,2.848,1801,3.043,1838,3.164,1839,3.164,1840,3.164,2008,2.767,2863,4.108,2864,6.177,2865,6.177,2866,4.108,2867,4.108,2868,4.108,2869,4.108,2870,7.423,2871,4.108,2872,6.177,2873,3.737,2874,4.108,2875,7.423,2876,4.108,2877,4.108,2878,4.108,2879,4.108,2880,4.108]],["t/801",[306,4.295,350,2.493,574,4.656,2881,7.198]],["t/803",[165,3.449,171,2.007,233,2.26,262,2.722,269,1.418,286,5.236,306,2.426,329,2.354,383,2.308,422,1.968,476,2.264,492,2.452,497,4.995,518,2.618,520,2.426,644,3.759,681,3.958,684,3.958,861,3.673,865,4.436,874,2.974,879,4.073,1090,3.518,1115,3.958,1214,5.71,1408,4.073,1866,4.734,1945,4.995,1977,4.734,2008,3.958,2851,5.344,2881,4.734,2882,5.875,2883,5.875,2884,5.875,2885,5.875,2886,5.875]],["t/805",[162,2.13,168,1.781,171,1.559,195,0.974,215,3.232,250,2.223,253,2.449,269,1.998,284,2.702,306,3.418,341,3.185,350,2.31,378,2.413,433,4.3,479,3.053,485,3.557,502,3.689,613,3.776,695,4.997,783,3.508,928,3.508,1090,3.714,1287,4.594,2008,4.178,2881,7.508,2887,7.53,2888,5.641]],["t/807",[253,2.574,306,3.593,479,4.283,502,3.877,2008,5.861,2887,7.915,2888,7.915]],["t/809",[568,5.66,574,4.719,662,5.121,663,5.215]],["t/811",[183,1.409,184,2.313,359,3.783,411,3.65,417,2.064,422,2.2,568,4.105,571,4.424,573,3.933,662,5.421,663,5.521,848,2.395,900,5.291,1087,3.277,1302,3.933,1473,4.553,1558,5.583,1724,5.974,1956,5.058,2151,7.82,2889,6.567,2890,6.567,2891,6.567,2892,6.567,2893,6.567,2894,6.567,2895,6.567]],["t/813",[2896,9.306,2897,9.306]],["t/815",[188,3.622,293,2.831,417,2.7,568,5.369,663,4.948,900,6.921,1357,7.302,1473,5.955]],["t/817",[287,4.458,568,5.584,1225,4.728,1261,8.126,2898,8.933]],["t/819",[287,4.458,568,5.584,1225,4.728,2899,8.933,2900,8.933]],["t/821",[287,4.287,338,6.921,417,2.7,470,6.145,568,5.369,900,6.921,2901,8.589,2902,8.589]],["t/823",[366,5.537,1267,4.948,2903,7.697,2904,9.054]],["t/825",[366,5.252,568,5.369,1225,4.546,1267,4.694,2903,7.302,2905,8.589,2906,8.589,2907,8.589]],["t/827",[287,4.232,366,5.186,663,4.885,763,6.067,1267,4.635,2903,7.21,2908,8.48,2909,8.48,2910,8.48]],["t/829",[306,4.328,338,7.295,574,4.719]],["t/831",[171,1.83,195,1.58,216,3.403,306,3.007,308,5.196,329,2.917,350,2.032,362,4.118,711,5.502,860,4.194,1087,3.634,2613,6.623,2708,7.402,2911,7.281,2912,7.281,2913,7.281,2914,7.281]],["t/833",[170,2.086,198,3.892,264,2.128,549,5.053,1454,5.86]],["t/835",[162,2.447,170,1.664,188,3.005,223,4.358,264,1.698,293,2.349,306,2.943,409,4.358,417,2.24,418,2.773,545,4.441,613,3.251,646,3.961,726,5.489,848,2.599,885,3.105,887,5.126,937,3.831,1295,5.799,1741,6.482]],["t/837",[170,1.738,188,3.139,264,1.773,293,2.453,303,4.653,306,3.074,319,5.513,417,2.34,418,2.896,474,5.325,560,6.669,851,6.328,1092,5.513,1615,6.771,1791,5.997,2789,5.325,2915,7.443,2916,7.443]],["t/839",[162,2.212,170,1.504,195,1.011,243,5.718,284,2.806,302,2.775,306,4.331,350,2.369,428,4.424,434,2.329,561,4.639,613,3.872,636,3.088,658,3.58,762,4.608,841,4.608,890,6.072,937,3.463,1167,5.475,1287,4.771,1623,3.58,2789,4.608,2917,5.859]],["t/841",[170,2.032,195,1.366,264,2.073,306,3.593,549,4.921,572,5.211,2392,7.397]],["t/843",[306,3.593,366,5.321,428,4.535,719,5.012,888,7.915,1225,4.605,2603,6.445]],["t/845",[161,6.623,239,3.09,264,2.283,306,3.217,350,2.174,417,3.012,428,4.994,544,2.946,554,6,560,6.285,613,3.553,1791,6.276]],["t/847",[195,1.223,293,2.567,306,3.957,350,2.674,367,3.888,428,4.06,841,5.573,1791,6.276,2268,6.623,2603,5.77,2626,7.086,2918,9.581,2919,7.086]],["t/849",[171,1.718,184,2.408,195,1.073,250,2.45,269,1.65,287,3.411,302,2.235,306,2.823,350,2.462,376,4.373,380,4.273,427,4.739,434,2.471,464,4.739,527,5.265,600,4.604,613,3.118,1012,4.093,1214,4.89,1218,3.866,1237,5.811,2157,6.218,2603,6.536,2699,5.811,2920,5.507,2921,6.835]],["t/851",[239,2.797,287,4.492,306,3.717,320,3.296,358,6.414,427,4.888,464,4.888,465,5.223,527,5.431,560,4.625,658,5.003,926,4.625,1117,4.312,1383,5.995,1456,6.414,1609,5.223,2015,4.888,2699,5.995,2881,5.681,2917,6.414,2922,7.051,2923,7.051]],["t/853",[298,4.649,306,4.044,417,2.537,422,2.704,432,5.165,527,6.217,531,7.342,613,3.682,1154,5.294,2497,6.862,2621,7.342,2919,7.342]],["t/855",[170,1.596,179,3.51,243,5.944,264,1.628,270,2.659,298,3.937,303,4.273,306,2.823,350,1.907,380,4.273,428,4.599,571,5.944,580,4.18,658,3.799,662,3.866,663,3.937,1148,5.811,1278,4.273,1282,4.604,1283,4.013,1715,4.89,1745,6.218,2497,5.811,2708,5.507,2924,6.835]],["t/857",[306,4.295,350,2.493,574,4.656,2920,7.198]],["t/859",[183,1.711,199,2.308,215,4.157,341,4.095,417,2.507,513,3.775,544,3.677,600,5.372,2102,5.907,2920,7.832,2925,7.975,2926,7.975]],["t/861",[195,1.404,198,3.04,219,3.947,233,1.975,306,3.692,311,3.583,376,5.721,378,2.715,613,3.183,629,5.168,1413,5.057,2522,5.932,2523,5.622,2873,6.347,2920,7.95,2927,6.977,2928,6.977,2929,6.977,2930,6.977,2931,6.977]],["t/863",[264,2.128,306,3.689,545,3.981,574,4.656,1295,5.716]],["t/865",[163,1.385,169,1.385,170,1.597,171,1.186,183,1.467,188,1.989,189,2.577,199,1.979,231,2.01,233,1.335,239,1.871,248,1.72,249,2.824,264,1.629,269,1.651,270,1.835,271,3.493,276,1.58,286,3.093,293,1.554,306,1.948,313,1.542,341,2.422,417,1.482,512,2.078,545,3.934,583,2.322,606,2.577,621,2.032,676,4.009,853,2.884,887,4.993,897,3.374,934,3.804,935,2.354,945,2.232,951,4.487,1115,3.177,1295,6.452,1461,3.493,1647,5.15,1660,4.29,1824,4.009,2506,4.29,2932,4.716,2933,4.716,2934,4.716,2935,4.716,2936,4.716]],["t/867",[233,3.016,253,2.447,264,2.367,284,3.603,509,3.218,510,3.686,605,4.017,1295,5.292]],["t/869",[163,1.124,169,1.124,170,0.894,183,2.248,184,1.348,199,1.107,216,2.736,239,1.518,264,1.897,268,1.767,269,0.924,276,1.961,313,1.251,324,1.686,328,2.164,376,2.448,399,2.51,417,1.203,418,1.489,479,1.884,503,1.533,512,1.686,545,2.608,552,2.025,598,2.292,599,2.392,622,1.937,848,1.396,853,4.868,859,3.083,887,4.853,934,2.522,945,5.02,951,2.51,1013,4.508,1291,3.083,1295,6.368,1966,2.448,2015,2.653,2235,3.253,2937,3.826,2938,3.826,2939,3.826,2940,3.253,2941,3.481,2942,3.481,2943,3.481,2944,3.481,2945,3.826,2946,3.826,2947,3.826,2948,3.826,2949,3.826,2950,3.826,2951,3.826,2952,3.826,2953,3.826,2954,3.826,2955,3.826]],["t/871",[170,1.251,183,2.187,199,1.55,239,2.125,264,1.784,307,3.514,375,3.428,573,3.209,848,1.954,887,4.237,934,2.309,945,4.426,1013,6.652,1287,3.969,1295,6.833,1647,5.984,2015,3.715,2649,4.555,2940,7.951,2941,4.874,2942,4.874,2943,4.874,2944,4.874,2956,5.358,2957,5.358,2958,5.358,2959,5.358,2960,5.358,2961,5.358,2962,5.358]],["t/873",[170,2.034,171,1.684,183,1.868,306,3.596,350,2.701,351,6.347,409,4.096,412,3.491,438,2.606,492,2.795,545,2.985,848,2.443,853,4.096,887,3.789,938,4.286,945,3.17,1075,4.792,1295,4.286,1623,3.723,2940,5.695,2963,6.698,2964,6.698,2965,6.698]],["t/875",[887,5.121,934,3.901,1295,5.793,2011,7.295]],["t/877",[50,1.471,123,0.888,163,1.939,168,0.267,169,0.713,171,0.234,183,2.385,184,0.609,189,0.946,190,1.349,191,1.002,192,0.485,193,1.135,195,0.706,196,3.272,197,1.517,199,0.879,200,1.058,203,0.888,204,1.66,208,1.471,209,1.471,210,1.471,211,1.471,216,0.809,218,0.452,231,0.396,233,0.49,239,2.322,241,0.429,244,0.762,253,1.644,254,0.645,257,1.049,258,0.471,264,1.248,266,2.342,268,0.429,269,0.864,270,0.362,280,0.446,284,0.405,288,0.477,293,0.57,298,0.997,302,0.566,303,0.581,306,2.163,313,1.597,316,0.722,320,2.1,324,0.41,328,0.526,329,0.972,333,1.107,350,1.462,351,0.61,352,0.526,361,0.38,380,0.581,382,0.419,394,1.166,398,1.394,412,0.485,417,1.277,422,0.311,430,2.884,434,0.626,438,0.673,454,0.372,472,0.84,473,0.429,476,0.358,478,0.557,486,0.536,498,0.645,502,0.771,503,0.372,509,0.362,510,0.414,514,0.546,544,0.352,545,1.081,546,2.911,547,2.542,552,0.492,556,0.526,572,0.557,589,0.626,593,1.228,596,0.749,605,0.452,613,0.424,620,0.569,621,0.746,630,2.664,644,0.595,667,0.492,672,0.689,684,1.166,696,0.645,700,1.238,736,1.238,738,0.44,739,1.135,766,1.797,783,1.718,848,1.306,850,2.813,853,0.569,855,1.797,856,0.645,858,2.585,860,0.536,871,0.665,879,0.645,887,0.979,904,0.863,908,2.348,926,0.61,934,0.401,938,0.595,945,1.922,947,1.894,966,0.716,1065,2.207,1080,0.61,1096,1.238,1122,0.716,1249,1.574,1252,0.79,1267,0.508,1274,0.526,1282,0.626,1283,0.546,1295,0.595,1306,1.635,1308,1.453,1355,0.79,1394,0.645,1413,0.526,1419,1.2,1428,0.626,1455,0.665,1485,0.645,1505,0.645,1509,0.581,1524,0.749,1528,2.614,1587,0.665,1592,0.79,1623,1.989,1634,0.749,1639,0.689,1661,0.79,1907,0.626,1983,0.79,2015,2.106,2018,0.846,2041,4.909,2043,1.574,2045,0.749,2189,0.79,2232,0.846,2233,0.846,2264,0.79,2392,0.79,2410,0.749,2482,1.574,2499,0.846,2622,2.207,2747,0.846,2767,1.574,2966,0.93,2967,0.93,2968,1.73,2969,1.73,2970,1.73,2971,1.73,2972,1.73,2973,0.93,2974,0.93,2975,2.427,2976,0.93,2977,1.73,2978,0.93,2979,4.088,2980,0.93,2981,0.93,2982,0.93,2983,1.73,2984,1.73,2985,0.93,2986,0.93,2987,0.93,2988,0.93,2989,0.93,2990,2.427,2991,1.73,2992,0.93,2993,0.93,2994,1.73,2995,0.93,2996,0.93,2997,0.93,2998,0.93,2999,0.93,3000,4.494,3001,0.93,3002,0.93,3003,0.93,3004,0.93,3005,0.93,3006,0.93,3007,3.038,3008,0.93,3009,0.93,3010,1.73,3011,3.038,3012,2.427,3013,0.93,3014,2.427,3015,0.93,3016,0.93,3017,0.93,3018,0.93,3019,1.73,3020,0.93,3021,0.93,3022,0.93,3023,0.93,3024,0.93,3025,1.73,3026,1.73,3027,0.93,3028,0.93,3029,0.93,3030,0.93,3031,0.93,3032,0.93,3033,0.93,3034,0.93,3035,0.93,3036,1.73,3037,0.93,3038,0.93,3039,0.93,3040,0.93,3041,0.93,3042,1.73,3043,0.93]],["t/879",[196,5.452,197,4.229,218,3.286,264,1.612,313,2.212,329,2.711,350,1.888,473,3.124,545,3.015,848,2.468,885,2.948,887,5.818,935,4.375,1295,6.985,2979,8.845,3044,9.723]]],"invertedIndex":[["",{"_index":183,"t":{"15":{"position":[[212,2]]},"17":{"position":[[63,2],[300,2],[424,2]]},"19":{"position":[[615,1]]},"32":{"position":[[118,2],[295,2],[437,3]]},"45":{"position":[[86,1],[113,2]]},"49":{"position":[[269,2],[471,1],[481,1],[717,2]]},"53":{"position":[[454,2],[656,1],[666,1]]},"61":{"position":[[108,1]]},"70":{"position":[[411,2],[599,1],[609,1]]},"74":{"position":[[1125,1]]},"76":{"position":[[952,1]]},"82":{"position":[[903,1],[1002,2],[1079,1],[1092,2],[1147,1],[1153,1],[1162,2],[1165,1],[1207,1],[1451,1],[1462,1],[1473,1],[1488,1],[1501,1],[1543,1],[1559,1],[1565,1]]},"86":{"position":[[174,1],[455,1],[610,1],[770,1],[934,1],[977,1],[1270,1]]},"88":{"position":[[33,1],[142,1],[310,1],[405,1],[407,1],[590,1],[740,1],[766,1],[800,1]]},"97":{"position":[[444,3],[506,2]]},"111":{"position":[[190,1],[206,1],[222,1],[237,1]]},"121":{"position":[[408,1],[485,1],[589,1],[984,1],[1004,1],[1019,1],[1085,1],[1184,1],[1228,1],[1267,1],[1310,1],[1358,1],[1410,1],[1482,1],[1555,1],[1612,1],[1656,1],[1721,1],[1736,1],[1809,1],[1863,1],[1925,1],[1939,2],[1957,2],[1972,1],[1977,2],[2054,2],[2071,1],[2096,1]]},"123":{"position":[[290,1],[292,1],[315,1],[338,1]]},"125":{"position":[[463,2]]},"131":{"position":[[1635,2],[2181,1],[2225,1],[2359,1],[2468,1],[2548,1],[3472,1],[3559,1],[3622,1],[3707,1]]},"135":{"position":[[88,3]]},"137":{"position":[[184,1],[884,3]]},"143":{"position":[[196,1],[368,1]]},"147":{"position":[[183,1],[898,1],[1009,1],[1034,1],[1036,3],[1047,1]]},"153":{"position":[[289,2]]},"155":{"position":[[261,2]]},"165":{"position":[[776,1]]},"171":{"position":[[964,1],[974,1]]},"173":{"position":[[1291,2]]},"183":{"position":[[889,2]]},"189":{"position":[[982,3]]},"199":{"position":[[573,2]]},"201":{"position":[[932,1],[998,2]]},"203":{"position":[[337,5]]},"205":{"position":[[275,1],[293,1]]},"207":{"position":[[447,1],[553,1],[555,1],[571,1],[573,2],[630,1],[632,2],[740,1],[761,1],[763,2]]},"211":{"position":[[610,1],[620,1]]},"213":{"position":[[182,1]]},"224":{"position":[[261,1],[271,1],[577,1],[579,3]]},"226":{"position":[[225,1],[1148,1],[1150,3],[1278,1],[1280,3]]},"228":{"position":[[493,1],[628,1],[1124,1],[1126,3],[2206,1],[2208,3]]},"261":{"position":[[88,1]]},"282":{"position":[[162,1],[207,1],[225,1],[251,1]]},"286":{"position":[[84,1],[129,1],[147,1],[173,1]]},"292":{"position":[[469,1],[479,1]]},"300":{"position":[[61,1],[275,1],[488,1],[555,1]]},"308":{"position":[[98,1],[140,1]]},"312":{"position":[[481,1],[614,1],[691,1],[740,1],[793,1]]},"314":{"position":[[745,1],[878,1],[955,1],[1004,1],[1024,1]]},"331":{"position":[[501,1],[601,2],[815,1],[825,1]]},"333":{"position":[[359,2]]},"343":{"position":[[1000,1],[1017,1]]},"347":{"position":[[78,1]]},"353":{"position":[[287,2]]},"355":{"position":[[417,1],[517,2],[731,1],[741,1],[1654,2],[3472,1],[3489,1],[4329,2]]},"357":{"position":[[698,1],[791,1],[811,1],[848,1],[912,1],[982,1],[984,1],[1109,1],[1834,1],[1851,1],[1868,1]]},"367":{"position":[[130,1],[475,1]]},"373":{"position":[[434,1],[558,1],[1333,1]]},"379":{"position":[[129,1]]},"381":{"position":[[11,1],[702,1],[870,5]]},"385":{"position":[[117,1]]},"389":{"position":[[137,1]]},"396":{"position":[[277,1]]},"398":{"position":[[108,1],[427,1],[438,1],[462,1]]},"400":{"position":[[0,1],[498,1],[516,1],[629,1]]},"406":{"position":[[0,1]]},"408":{"position":[[105,1]]},"420":{"position":[[621,5],[749,1]]},"424":{"position":[[119,5]]},"428":{"position":[[207,2]]},"432":{"position":[[33,2]]},"434":{"position":[[94,2],[131,2],[272,5]]},"438":{"position":[[419,1]]},"444":{"position":[[212,3]]},"451":{"position":[[351,2],[436,2]]},"453":{"position":[[170,1]]},"457":{"position":[[53,1]]},"459":{"position":[[676,1]]},"477":{"position":[[15,2]]},"491":{"position":[[52,1],[262,1]]},"493":{"position":[[52,1],[679,1],[946,1],[988,1]]},"495":{"position":[[7,2]]},"497":{"position":[[74,1],[105,1]]},"501":{"position":[[169,3]]},"503":{"position":[[73,2],[841,3],[1077,1],[1087,1],[1115,2]]},"505":{"position":[[276,1],[312,1],[377,1],[529,1],[581,1],[600,1],[644,1],[670,1],[723,1]]},"507":{"position":[[255,1]]},"511":{"position":[[116,1]]},"517":{"position":[[357,1],[1916,2],[1985,2],[2107,2]]},"529":{"position":[[882,1],[892,1],[1222,1],[1257,1],[1293,1],[1353,1],[1382,1],[1414,1]]},"531":{"position":[[114,1],[156,1],[205,1]]},"533":{"position":[[124,1],[166,1],[246,1],[275,1]]},"555":{"position":[[11,1]]},"572":{"position":[[145,2]]},"578":{"position":[[438,2]]},"594":{"position":[[38,1]]},"610":{"position":[[506,1]]},"618":{"position":[[202,1]]},"647":{"position":[[726,1]]},"651":{"position":[[171,3]]},"659":{"position":[[312,1],[372,1]]},"668":{"position":[[369,1],[461,1],[480,1]]},"670":{"position":[[285,1],[354,1],[451,1],[529,1],[695,1],[785,1],[862,1],[884,1],[907,1],[923,1],[1051,1],[1139,1],[1186,1]]},"676":{"position":[[228,1]]},"680":{"position":[[17,1],[189,1]]},"718":{"position":[[148,2],[325,2]]},"729":{"position":[[1876,1],[2227,1],[2609,1],[2891,1]]},"731":{"position":[[159,1]]},"739":{"position":[[602,2]]},"746":{"position":[[398,1],[603,1]]},"748":{"position":[[383,2],[477,2]]},"750":{"position":[[298,2]]},"752":{"position":[[470,1],[472,1],[482,1],[484,1],[697,1],[933,2],[936,1],[1114,1],[1116,1],[1118,1],[1256,2],[1259,1],[1389,1],[1408,1]]},"757":{"position":[[330,1],[443,1],[495,1],[562,1],[595,1]]},"759":{"position":[[489,1],[491,1],[712,1],[796,1],[844,1],[902,1]]},"767":{"position":[[5,2]]},"786":{"position":[[158,1]]},"788":{"position":[[150,1],[430,1]]},"790":{"position":[[103,1],[254,1]]},"799":{"position":[[299,1],[571,2]]},"811":{"position":[[284,1]]},"859":{"position":[[80,1]]},"865":{"position":[[630,2],[661,2]]},"869":{"position":[[341,1],[378,1],[419,1],[463,2],[466,1],[485,1],[519,1],[549,1],[577,1],[603,1],[733,1],[852,1]]},"871":{"position":[[244,1],[399,1],[436,1],[477,1],[522,1],[569,2]]},"873":{"position":[[286,1],[295,3]]},"877":{"position":[[393,1],[464,4],[490,3],[494,1],[601,3],[622,2],[2058,2],[2091,2],[2248,1],[2250,2],[2330,1],[2332,2],[2414,3],[2444,2],[2447,1],[2449,2],[2516,1],[2624,3],[2653,2],[2673,3],[2701,2],[2704,1],[2706,2],[2785,1],[2787,2],[2919,2],[2922,1],[3061,1],[3165,1],[3243,1],[3411,2],[3414,2],[3422,1],[3457,1],[3512,2],[3515,2],[3523,1],[3553,1],[3574,1],[3712,3],[3729,2],[3737,3],[3749,2],[3752,1],[3791,1],[3863,1],[3952,1],[3967,1],[4127,3],[4144,2],[4152,3],[4164,2],[4177,3],[4194,2],[4207,3],[4229,2],[4242,3],[4268,2],[4271,1],[4323,3],[4372,2],[4407,1],[4478,4],[4504,3],[4508,1],[4615,3],[4636,2],[4676,1],[4743,1],[4791,1],[4932,3],[4957,2],[4977,3],[5001,2],[5046,3],[5074,2],[5077,1],[5183,1],[5319,1],[5384,3],[5417,2],[5458,3],[5488,2],[5510,1],[5630,1],[5692,2],[5705,1],[5718,2],[5762,2],[5765,1],[5767,2],[5978,2],[6029,1],[6031,2],[6037,3],[6051,2],[6054,2],[6057,2],[6136,25],[6196,25],[6227,3],[6241,2],[6249,85],[6424,6]]}}}],["0",{"_index":1053,"t":{"121":{"position":[[1980,2],[2057,3]]},"205":{"position":[[235,1]]},"349":{"position":[[222,1]]},"355":{"position":[[3886,1]]},"487":{"position":[[491,2]]},"501":{"position":[[351,1],[396,1]]}}}],["0.0.1",{"_index":2060,"t":{"434":{"position":[[724,5]]}}}],["0.00504448",{"_index":987,"t":{"121":{"position":[[452,11]]}}}],["0.07772372",{"_index":993,"t":{"121":{"position":[[523,11]]}}}],["0.15.2",{"_index":2849,"t":{"778":{"position":[[193,6]]}}}],["0.22607761",{"_index":991,"t":{"121":{"position":[[499,11]]}}}],["0.37513511",{"_index":990,"t":{"121":{"position":[[487,11]]}}}],["0.468925",{"_index":989,"t":{"121":{"position":[[476,8]]}}}],["0.5",{"_index":755,"t":{"82":{"position":[[1431,3]]}}}],["0.5,}torch.save(checkpoint",{"_index":831,"t":{"88":{"position":[[517,27]]}}}],["0.61587933",{"_index":994,"t":{"121":{"position":[[535,11]]}}}],["0.66214299",{"_index":996,"t":{"121":{"position":[[559,11]]}}}],["0.82861156",{"_index":995,"t":{"121":{"position":[[547,11]]}}}],["0.83035297",{"_index":992,"t":{"121":{"position":[[511,11]]}}}],["0.87087911",{"_index":988,"t":{"121":{"position":[[464,11]]}}}],["0.90979423",{"_index":997,"t":{"121":{"position":[[571,12]]}}}],["000:/home/jovyan",{"_index":2466,"t":{"610":{"position":[[352,16]]}}}],["01",{"_index":209,"t":{"17":{"position":[[259,2]]},"446":{"position":[[1543,2]]},"877":{"position":[[581,2],[4595,2]]}}}],["024",{"_index":455,"t":{"39":{"position":[[124,3]]}}}],["1",{"_index":454,"t":{"39":{"position":[[122,1],[134,1],[143,1]]},"82":{"position":[[849,2]]},"121":{"position":[[849,1]]},"131":{"position":[[3945,1]]},"173":{"position":[[742,1]]},"201":{"position":[[620,3],[653,6],[1199,1],[1225,1]]},"235":{"position":[[63,4]]},"244":{"position":[[63,4]]},"246":{"position":[[135,4]]},"250":{"position":[[39,4]]},"252":{"position":[[42,4]]},"257":{"position":[[117,4]]},"261":{"position":[[17,4]]},"265":{"position":[[63,4]]},"267":{"position":[[58,4]]},"314":{"position":[[1006,3]]},"341":{"position":[[980,1]]},"343":{"position":[[491,1]]},"355":{"position":[[2951,1]]},"357":{"position":[[934,3],[938,2]]},"361":{"position":[[117,3]]},"400":{"position":[[530,6]]},"467":{"position":[[1271,1]]},"471":{"position":[[5,1]]},"487":{"position":[[517,2]]},"491":{"position":[[79,1],[124,2],[158,2]]},"505":{"position":[[583,3],[646,3]]},"511":{"position":[[338,3]]},"517":{"position":[[862,1],[925,1],[1282,1]]},"523":{"position":[[366,1]]},"590":{"position":[[224,2]]},"596":{"position":[[363,2]]},"670":{"position":[[386,6],[561,6],[817,6],[1083,6]]},"702":{"position":[[0,2]]},"704":{"position":[[0,2]]},"706":{"position":[[0,2]]},"708":{"position":[[0,2]]},"710":{"position":[[0,2]]},"729":{"position":[[65,2]]},"731":{"position":[[71,2],[187,1]]},"748":{"position":[[509,1],[712,1]]},"750":{"position":[[364,2],[391,1]]},"759":{"position":[[669,2]]},"877":{"position":[[6128,2]]}}}],["1.14.4",{"_index":2100,"t":{"451":{"position":[[44,6]]}}}],["1.2",{"_index":761,"t":{"82":{"position":[[1561,3]]}}}],["1.34.1",{"_index":1950,"t":{"385":{"position":[[823,6]]}}}],["1.87",{"_index":759,"t":{"82":{"position":[[1490,4]]}}}],["1.87/724)100",{"_index":760,"t":{"82":{"position":[[1545,13]]}}}],["10",{"_index":700,"t":{"82":{"position":[[44,2],[74,2]]},"121":{"position":[[1974,2]]},"145":{"position":[[534,2]]},"359":{"position":[[527,2]]},"511":{"position":[[293,4]]},"708":{"position":[[380,3]]},"877":{"position":[[659,2],[4673,2]]}}}],["100",{"_index":748,"t":{"82":{"position":[[1221,3]]},"349":{"position":[[246,3]]},"353":{"position":[[344,3]]},"355":{"position":[[3910,3],[4386,3]]},"731":{"position":[[243,3]]}}}],["1000",{"_index":1251,"t":{"131":{"position":[[4056,4],[4095,4]]}}}],["10000",{"_index":2584,"t":{"688":{"position":[[363,5]]}}}],["100_000",{"_index":1019,"t":{"121":{"position":[[1021,8]]}}}],["100g",{"_index":2228,"t":{"493":{"position":[[146,4],[849,4]]}}}],["10chunk_siz",{"_index":1018,"t":{"121":{"position":[[1006,12]]}}}],["11",{"_index":2636,"t":{"708":{"position":[[404,3]]}}}],["12",{"_index":694,"t":{"80":{"position":[[647,2],[698,2]]},"121":{"position":[[361,2]]}}}],["120",{"_index":450,"t":{"39":{"position":[[91,3]]}}}],["1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890",{"_index":150,"t":{"6":{"position":[[0,250]]}}}],["128",{"_index":449,"t":{"39":{"position":[[77,4]]},"361":{"position":[[148,5]]}}}],["13444.5",{"_index":2069,"t":{"438":{"position":[[466,7]]}}}],["137.120.1.1",{"_index":1672,"t":{"300":{"position":[[221,11]]}}}],["137.120.1.5",{"_index":1673,"t":{"300":{"position":[[244,11]]}}}],["15",{"_index":1883,"t":{"359":{"position":[[466,2]]}}}],["15min",{"_index":1436,"t":{"175":{"position":[[348,6]]},"215":{"position":[[297,6]]},"294":{"position":[[1040,6]]}}}],["16",{"_index":441,"t":{"39":{"position":[[0,2]]}}}],["16g",{"_index":2140,"t":{"467":{"position":[[150,3]]}}}],["18.04",{"_index":1922,"t":{"385":{"position":[[130,5]]}}}],["18.10",{"_index":1928,"t":{"385":{"position":[[198,5]]}}}],["192",{"_index":453,"t":{"39":{"position":[[115,3]]}}}],["1error",{"_index":2200,"t":{"491":{"position":[[84,7]]}}}],["2",{"_index":738,"t":{"82":{"position":[[912,2]]},"88":{"position":[[55,3]]},"173":{"position":[[747,1]]},"181":{"position":[[507,1]]},"187":{"position":[[349,1]]},"189":{"position":[[1031,1]]},"205":{"position":[[403,2],[441,3],[471,3],[769,3],[802,6]]},"235":{"position":[[112,4]]},"244":{"position":[[114,3]]},"257":{"position":[[143,4]]},"261":{"position":[[55,4]]},"265":{"position":[[107,4]]},"314":{"position":[[1010,2]]},"359":{"position":[[546,1]]},"400":{"position":[[575,1]]},"469":{"position":[[709,1]]},"503":{"position":[[107,1]]},"505":{"position":[[587,2],[650,2]]},"511":{"position":[[272,1]]},"517":{"position":[[1079,3],[1314,1],[1458,1],[1638,1]]},"651":{"position":[[342,1]]},"659":{"position":[[245,1]]},"702":{"position":[[22,2]]},"704":{"position":[[15,2]]},"708":{"position":[[33,2]]},"710":{"position":[[26,2]]},"729":{"position":[[834,2]]},"731":{"position":[[401,2]]},"877":{"position":[[293,1]]}}}],["2,000",{"_index":1755,"t":{"325":{"position":[[92,6]]}}}],["2.2",{"_index":2012,"t":{"408":{"position":[[23,4]]}}}],["2.4.2",{"_index":2845,"t":{"773":{"position":[[28,5]]}}}],["2.4.5",{"_index":2304,"t":{"507":{"position":[[622,6]]}}}],["20",{"_index":751,"t":{"82":{"position":[[1374,2]]}}}],["20.04",{"_index":3005,"t":{"877":{"position":[[3326,5]]}}}],["20/3600",{"_index":758,"t":{"82":{"position":[[1480,7]]}}}],["200g",{"_index":2138,"t":{"467":{"position":[[96,4]]}}}],["2023.5.0",{"_index":954,"t":{"119":{"position":[[58,10]]}}}],["24",{"_index":677,"t":{"80":{"position":[[359,2]]},"82":{"position":[[1084,5]]}}}],["260",{"_index":1240,"t":{"131":{"position":[[3748,3]]}}}],["2gi",{"_index":1564,"t":{"224":{"position":[[441,4],[602,3]]},"361":{"position":[[129,5]]},"517":{"position":[[1292,3]]}}}],["2nd",{"_index":1529,"t":{"205":{"position":[[27,3]]}}}],["3",{"_index":740,"t":{"82":{"position":[[963,2]]},"121":{"position":[[708,2]]},"261":{"position":[[101,4]]},"314":{"position":[[1013,2]]},"453":{"position":[[168,1]]},"503":{"position":[[1139,1]]},"505":{"position":[[590,2],[653,2]]},"641":{"position":[[92,1]]},"688":{"position":[[49,1]]},"702":{"position":[[51,2]]},"704":{"position":[[32,2]]},"708":{"position":[[58,2]]},"710":{"position":[[48,2]]},"729":{"position":[[1452,2]]},"731":{"position":[[541,2]]}}}],["3,3",{"_index":2063,"t":{"438":{"position":[[358,3]]}}}],["3.0.1",{"_index":2287,"t":{"505":{"position":[[102,5]]},"507":{"position":[[78,6]]},"511":{"position":[[65,5]]}}}],["3.3.8",{"_index":1628,"t":{"242":{"position":[[98,6]]},"252":{"position":[[36,5],[111,5]]},"280":{"position":[[98,6]]}}}],["3.9",{"_index":1421,"t":{"173":{"position":[[1013,4]]},"523":{"position":[[631,4]]}}}],["30",{"_index":754,"t":{"82":{"position":[[1413,2]]},"746":{"position":[[263,2]]}}}],["300gi",{"_index":1888,"t":{"361":{"position":[[162,7]]}}}],["30min",{"_index":1293,"t":{"145":{"position":[[541,5]]}}}],["32.2",{"_index":1246,"t":{"131":{"position":[[3794,4]]}}}],["328",{"_index":1244,"t":{"131":{"position":[[3777,3]]}}}],["32gb",{"_index":462,"t":{"39":{"position":[[183,4]]}}}],["3306",{"_index":2131,"t":{"459":{"position":[[917,4]]}}}],["336",{"_index":757,"t":{"82":{"position":[[1464,3],[1475,4]]}}}],["4",{"_index":1641,"t":{"261":{"position":[[132,4]]},"300":{"position":[[678,1]]},"314":{"position":[[1016,2]]},"327":{"position":[[382,1]]},"469":{"position":[[781,2]]},"505":{"position":[[593,2],[656,2]]},"511":{"position":[[317,1],[361,1]]},"704":{"position":[[62,2]]},"708":{"position":[[84,2]]},"710":{"position":[[67,2]]},"720":{"position":[[146,1]]},"729":{"position":[[1823,2]]}}}],["4.1",{"_index":2721,"t":{"729":{"position":[[1858,4]]}}}],["4.11",{"_index":425,"t":{"37":{"position":[[11,5]]}}}],["4.2",{"_index":2726,"t":{"729":{"position":[[2206,4]]}}}],["4.3",{"_index":2120,"t":{"459":{"position":[[27,3]]},"729":{"position":[[2597,4]]}}}],["4.4",{"_index":2732,"t":{"729":{"position":[[2873,4]]}}}],["40",{"_index":463,"t":{"39":{"position":[[245,2]]},"359":{"position":[[509,2]]}}}],["403",{"_index":2265,"t":{"497":{"position":[[26,3],[112,3]]}}}],["403486.8",{"_index":2067,"t":{"438":{"position":[[437,8]]}}}],["42.1",{"_index":1254,"t":{"131":{"position":[[4192,4]]}}}],["443",{"_index":585,"t":{"65":{"position":[[370,3]]},"635":{"position":[[370,3]]}}}],["4eof",{"_index":2322,"t":{"511":{"position":[[402,4]]}}}],["4gi",{"_index":2320,"t":{"511":{"position":[[306,5],[350,5]]},"517":{"position":[[1468,3]]}}}],["5",{"_index":825,"t":{"88":{"position":[[418,2]]},"261":{"position":[[178,4]]},"314":{"position":[[1019,2]]},"598":{"position":[[451,2]]},"704":{"position":[[96,2]]},"708":{"position":[[118,2]]},"710":{"position":[[91,2]]}}}],["50",{"_index":2729,"t":{"729":{"position":[[2668,2]]}}}],["512",{"_index":447,"t":{"39":{"position":[[61,3],[238,3]]}}}],["515543.2",{"_index":2066,"t":{"438":{"position":[[421,8]]}}}],["5]distdata",{"_index":2302,"t":{"505":{"position":[[659,10]]}}}],["5]i",{"_index":2300,"t":{"505":{"position":[[596,3]]}}}],["5_000_000num_col",{"_index":1017,"t":{"121":{"position":[[986,17]]}}}],["5gi",{"_index":1786,"t":{"333":{"position":[[920,3]]},"355":{"position":[[2215,3]]}}}],["6",{"_index":2620,"t":{"704":{"position":[[132,2]]},"708":{"position":[[137,2]]}}}],["6.0.1",{"_index":2377,"t":{"531":{"position":[[39,5]]},"533":{"position":[[39,5]]}}}],["6.0.3",{"_index":2379,"t":{"531":{"position":[[49,5]]}}}],["600",{"_index":2206,"t":{"491":{"position":[[178,3]]}}}],["6000",{"_index":1683,"t":{"300":{"position":[[593,4]]}}}],["64",{"_index":448,"t":{"39":{"position":[[68,2]]},"467":{"position":[[113,2]]},"493":{"position":[[157,2],[860,2]]}}}],["68.1",{"_index":1243,"t":{"131":{"position":[[3761,4]]}}}],["6a",{"_index":2630,"t":{"708":{"position":[[171,3]]}}}],["6b",{"_index":2631,"t":{"708":{"position":[[200,3]]}}}],["6c",{"_index":2632,"t":{"708":{"position":[[233,3]]}}}],["7",{"_index":750,"t":{"82":{"position":[[1325,1]]},"163":{"position":[[273,1]]},"704":{"position":[[146,2]]},"708":{"position":[[263,2]]}}}],["7*24/0.5",{"_index":756,"t":{"82":{"position":[[1453,8]]}}}],["777",{"_index":2885,"t":{"803":{"position":[[445,3]]}}}],["8",{"_index":452,"t":{"39":{"position":[[113,1],[236,1]]},"467":{"position":[[165,1]]},"469":{"position":[[822,2]]},"704":{"position":[[184,2]]},"708":{"position":[[310,2]]}}}],["8.8.8.8",{"_index":1674,"t":{"300":{"position":[[267,7]]}}}],["80",{"_index":1638,"t":{"261":{"position":[[84,2]]}}}],["8002:8001",{"_index":2033,"t":{"415":{"position":[[48,9]]}}}],["8080",{"_index":596,"t":{"70":{"position":[[657,4]]},"308":{"position":[[485,4]]},"343":{"position":[[755,6]]},"877":{"position":[[2780,4]]}}}],["8080:8080",{"_index":2806,"t":{"757":{"position":[[1417,9]]}}}],["8787",{"_index":548,"t":{"53":{"position":[[714,4]]}}}],["8888",{"_index":943,"t":{"111":{"position":[[266,4]]},"343":{"position":[[1056,4]]},"351":{"position":[[46,4],[388,4],[417,4],[434,4]]},"353":{"position":[[366,4]]},"355":{"position":[[3528,4],[4074,4],[4103,4],[4120,4],[4408,4]]},"359":{"position":[[425,4],[486,4]]},"363":{"position":[[158,4]]},"739":{"position":[[1539,5]]}}}],["8x",{"_index":459,"t":{"39":{"position":[[167,2]]}}}],["9",{"_index":2635,"t":{"708":{"position":[[357,2]]},"746":{"position":[[886,1]]}}}],["920",{"_index":456,"t":{"39":{"position":[[136,3]]}}}],["98612.0",{"_index":2068,"t":{"438":{"position":[[452,7]]}}}],["9:00",{"_index":1527,"t":{"203":{"position":[[98,5]]}}}],["_.interfacedescript",{"_index":1678,"t":{"300":{"position":[[503,24]]}}}],["a\"].compute().head",{"_index":1109,"t":{"125":{"position":[[466,21]]}}}],["a'filtered_str",{"_index":1200,"t":{"131":{"position":[[2339,19]]}}}],["a.k.a",{"_index":2092,"t":{"446":{"position":[[646,7],[1195,7]]}}}],["abbrevi",{"_index":2526,"t":{"647":{"position":[[416,12]]}}}],["abid",{"_index":404,"t":{"32":{"position":[[189,5]]},"718":{"position":[[219,5]]}}}],["abil",{"_index":309,"t":{"23":{"position":[[336,7]]}}}],["abov",{"_index":656,"t":{"76":{"position":[[664,5]]},"121":{"position":[[635,5]]},"135":{"position":[[441,5]]},"201":{"position":[[1432,5]]},"207":{"position":[[1269,5]]},"228":{"position":[[1112,6]]},"432":{"position":[[438,7]]},"489":{"position":[[319,5]]},"497":{"position":[[126,5]]},"649":{"position":[[260,5]]}}}],["absolut",{"_index":2085,"t":{"446":{"position":[[416,10],[1830,10]]},"610":{"position":[[104,8],[153,8]]}}}],["absolute_path_in_pod",{"_index":2468,"t":{"610":{"position":[[542,25]]},"618":{"position":[[252,25]]}}}],["ac",{"_index":105,"t":{"2":{"position":[[1524,2],[2648,2],[2723,2],[2754,2]]},"4":{"position":[[1524,2],[2648,2],[2723,2],[2754,2]]}}}],["acc",{"_index":1600,"t":{"228":{"position":[[859,5]]}}}],["acceptor",{"_index":2201,"t":{"491":{"position":[[99,8]]}}}],["access",{"_index":169,"t":{"13":{"position":[[12,6]]},"15":{"position":[[154,6]]},"17":{"position":[[46,6],[474,6],[592,6],[676,6]]},"19":{"position":[[0,6],[140,6]]},"21":{"position":[[432,6],[559,6],[624,6]]},"25":{"position":[[8,6]]},"30":{"position":[[53,6]]},"32":{"position":[[23,8],[414,6]]},"49":{"position":[[556,6]]},"63":{"position":[[7,6]]},"65":{"position":[[37,10],[135,11],[161,6],[316,7],[495,10]]},"70":{"position":[[733,6]]},"72":{"position":[[27,10]]},"74":{"position":[[493,6]]},"76":{"position":[[181,6],[287,6],[874,6]]},"80":{"position":[[270,6]]},"113":{"position":[[170,6]]},"115":{"position":[[132,6]]},"137":{"position":[[888,6]]},"153":{"position":[[543,9]]},"155":{"position":[[433,9]]},"171":{"position":[[371,6]]},"181":{"position":[[31,10],[287,6],[489,6],[536,8],[620,6],[748,6]]},"185":{"position":[[216,6]]},"187":{"position":[[217,6],[248,6],[543,9],[744,6],[794,6]]},"189":{"position":[[516,6],[1212,6]]},"195":{"position":[[122,6]]},"197":{"position":[[488,6]]},"199":{"position":[[99,6]]},"207":{"position":[[214,6]]},"226":{"position":[[940,7]]},"228":{"position":[[345,6],[2010,6]]},"233":{"position":[[109,6]]},"263":{"position":[[303,6]]},"276":{"position":[[109,6]]},"298":{"position":[[27,6]]},"308":{"position":[[60,6],[502,6]]},"325":{"position":[[253,6]]},"333":{"position":[[509,6]]},"355":{"position":[[1804,6]]},"359":{"position":[[176,10]]},"373":{"position":[[184,6]]},"400":{"position":[[403,6]]},"415":{"position":[[58,6]]},"420":{"position":[[68,6]]},"440":{"position":[[299,8]]},"444":{"position":[[154,6]]},"446":{"position":[[46,6],[322,6],[406,6],[453,6],[1736,6],[1820,6],[1867,6]]},"467":{"position":[[22,6]]},"475":{"position":[[473,6]]},"479":{"position":[[59,6]]},"487":{"position":[[21,6],[215,6]]},"491":{"position":[[1952,6]]},"495":{"position":[[30,6],[974,8],[1038,6]]},"503":{"position":[[1253,6]]},"509":{"position":[[31,6]]},"525":{"position":[[211,6]]},"529":{"position":[[289,6]]},"576":{"position":[[762,6]]},"588":{"position":[[0,6],[53,6]]},"594":{"position":[[93,6]]},"600":{"position":[[45,6],[357,6]]},"635":{"position":[[37,10],[135,11],[161,6],[316,7]]},"647":{"position":[[368,6],[400,6],[777,6],[883,6]]},"653":{"position":[[253,6],[361,6],[390,6],[473,6]]},"670":{"position":[[275,6]]},"674":{"position":[[1258,6]]},"678":{"position":[[77,6],[175,6],[291,6],[313,6],[571,6],[622,6],[660,6],[740,6]]},"718":{"position":[[23,8]]},"720":{"position":[[175,6]]},"727":{"position":[[185,6]]},"729":{"position":[[1997,13],[2701,10],[3302,6]]},"737":{"position":[[629,6]]},"739":{"position":[[97,6],[340,6],[791,6],[934,6],[1332,6],[1403,6],[1935,6],[1997,8]]},"740":{"position":[[304,6],[632,6]]},"748":{"position":[[566,6],[742,6]]},"750":{"position":[[16,6],[444,6]]},"757":{"position":[[1273,6],[1472,10],[1747,6]]},"865":{"position":[[357,6]]},"869":{"position":[[89,6]]},"877":{"position":[[1036,6],[1108,6],[2755,10]]}}}],["access_token",{"_index":2040,"t":{"420":{"position":[[705,12],[734,14],[866,14]]}}}],["accesskubectl",{"_index":1989,"t":{"400":{"position":[[756,13]]}}}],["accessmod",{"_index":1806,"t":{"337":{"position":[[606,12]]},"355":{"position":[[2594,12]]}}}],["accident",{"_index":2593,"t":{"690":{"position":[[404,12]]}}}],["accord",{"_index":2571,"t":{"686":{"position":[[0,9]]},"757":{"position":[[1043,9]]}}}],["accordingli",{"_index":1624,"t":{"228":{"position":[[2679,12]]}}}],["account",{"_index":172,"t":{"15":{"position":[[25,7],[182,7]]},"32":{"position":[[524,7],[605,8]]},"175":{"position":[[650,8]]},"183":{"position":[[475,8],[869,7]]},"215":{"position":[[808,8]]},"228":{"position":[[357,8],[716,7],[1390,9],[1908,8],[2138,7]]},"263":{"position":[[245,7]]},"294":{"position":[[1342,8]]},"411":{"position":[[254,7]]},"420":{"position":[[25,7]]},"422":{"position":[[10,7]]},"424":{"position":[[16,7]]},"444":{"position":[[25,7],[182,7]]},"463":{"position":[[92,7],[220,8]]},"475":{"position":[[236,8],[617,8]]},"491":{"position":[[815,8]]},"521":{"position":[[233,7]]},"596":{"position":[[411,8]]},"598":{"position":[[102,8],[130,7],[155,7],[391,7],[499,8]]},"600":{"position":[[447,8]]},"602":{"position":[[408,8]]}}}],["accumsan",{"_index":107,"t":{"2":{"position":[[1551,9],[2302,8]]},"4":{"position":[[1551,9],[2302,8]]}}}],["achiev",{"_index":2144,"t":{"467":{"position":[[995,8],[1592,7]]}}}],["action",{"_index":887,"t":{"99":{"position":[[115,7]]},"137":{"position":[[666,7]]},"189":{"position":[[770,7]]},"201":{"position":[[776,7]]},"267":{"position":[[15,7]]},"381":{"position":[[694,7]]},"434":{"position":[[76,7]]},"491":{"position":[[2003,7]]},"649":{"position":[[52,6]]},"835":{"position":[[7,7],[161,7]]},"865":{"position":[[73,7],[180,7],[232,7],[290,7]]},"869":{"position":[[286,7],[311,7],[712,7],[837,7],[935,7]]},"871":{"position":[[344,7],[369,7]]},"873":{"position":[[23,6]]},"875":{"position":[[15,7]]},"877":{"position":[[207,7],[2022,8]]},"879":{"position":[[76,7],[163,7],[181,6],[321,7]]}}}],["actions.github.io/openshift",{"_index":2932,"t":{"865":{"position":[[262,27]]}}}],["actions/buildah",{"_index":3014,"t":{"877":{"position":[[3602,15],[3674,15],[3819,15]]}}}],["actions/checkout@v2",{"_index":3006,"t":{"877":{"position":[[3347,19]]}}}],["actions/oc",{"_index":2990,"t":{"877":{"position":[[2548,10],[4819,10],[4884,10]]}}}],["actions/openshift",{"_index":3044,"t":{"879":{"position":[[58,17],[145,17],[303,17]]}}}],["actions/push",{"_index":3019,"t":{"877":{"position":[[3995,12],[4086,12]]}}}],["actions/spr",{"_index":2980,"t":{"877":{"position":[[1887,14]]}}}],["activ",{"_index":399,"t":{"32":{"position":[[125,10]]},"173":{"position":[[1315,8]]},"228":{"position":[[402,8]]},"385":{"position":[[718,6]]},"398":{"position":[[74,8]]},"509":{"position":[[67,8]]},"682":{"position":[[100,7]]},"718":{"position":[[155,10]]},"729":{"position":[[623,8]]},"869":{"position":[[989,6]]}}}],["activation='relu",{"_index":775,"t":{"86":{"position":[[231,18]]}}}],["activation='softmax')])model.compile(optimizer='adam",{"_index":778,"t":{"86":{"position":[[296,54]]}}}],["actual",{"_index":2355,"t":{"517":{"position":[[1967,6]]}}}],["acycl",{"_index":2916,"t":{"837":{"position":[[102,7]]}}}],["ad",{"_index":1075,"t":{"123":{"position":[[120,6]]},"228":{"position":[[2149,6]]},"246":{"position":[[66,5]]},"341":{"position":[[167,6]]},"489":{"position":[[402,6]]},"517":{"position":[[505,6]]},"873":{"position":[[163,5]]}}}],["adapt",{"_index":2180,"t":{"481":{"position":[[143,5]]},"690":{"position":[[832,5]]}}}],["add",{"_index":621,"t":{"74":{"position":[[477,3]]},"97":{"position":[[474,3]]},"111":{"position":[[10,4],[35,3],[271,3]]},"137":{"position":[[142,3],[527,3],[809,3]]},"147":{"position":[[102,3],[117,3],[811,3]]},"189":{"position":[[907,3]]},"197":{"position":[[104,3]]},"199":{"position":[[12,3]]},"207":{"position":[[286,3],[565,3],[1159,3]]},"213":{"position":[[184,4],[209,3]]},"278":{"position":[[0,3],[52,3]]},"294":{"position":[[701,5]]},"300":{"position":[[178,3]]},"327":{"position":[[118,4]]},"355":{"position":[[80,3],[123,4],[176,3]]},"357":{"position":[[1929,3],[2276,3]]},"359":{"position":[[8,3]]},"373":{"position":[[368,3]]},"381":{"position":[[6,4],[761,3]]},"398":{"position":[[353,3]]},"451":{"position":[[282,3]]},"459":{"position":[[88,3],[168,3]]},"463":{"position":[[229,3],[263,3]]},"493":{"position":[[648,3]]},"529":{"position":[[1106,3]]},"535":{"position":[[65,3]]},"574":{"position":[[74,3]]},"649":{"position":[[90,3],[179,3],[219,3]]},"674":{"position":[[781,3]]},"678":{"position":[[116,3],[309,3],[323,3],[480,3]]},"682":{"position":[[282,3]]},"739":{"position":[[351,3],[550,4],[575,3]]},"752":{"position":[[419,3],[466,3]]},"756":{"position":[[189,3]]},"778":{"position":[[49,3],[91,3]]},"865":{"position":[[218,3]]},"877":{"position":[[1245,3],[1476,3]]}}}],["addgroup",{"_index":1933,"t":{"385":{"position":[[362,8]]}}}],["addit",{"_index":255,"t":{"19":{"position":[[417,10]]},"82":{"position":[[739,10]]},"171":{"position":[[1219,10]]},"467":{"position":[[195,10]]},"521":{"position":[[328,10]]},"535":{"position":[[89,10]]},"752":{"position":[[895,10]]}}}],["addition",{"_index":1688,"t":{"306":{"position":[[156,12]]},"440":{"position":[[251,13]]}}}],["address",{"_index":2088,"t":{"446":{"position":[[517,7]]},"600":{"position":[[393,8]]},"602":{"position":[[354,8]]},"725":{"position":[[186,7]]},"729":{"position":[[2838,9],[3366,10]]}}}],["addus",{"_index":1935,"t":{"385":{"position":[[384,7]]}}}],["adequ",{"_index":2561,"t":{"674":{"position":[[1198,8]]}}}],["adipisc",{"_index":6,"t":{"2":{"position":[[40,10],[192,10]]},"4":{"position":[[40,10],[192,10]]}}}],["admin",{"_index":660,"t":{"76":{"position":[[1003,5],[1022,5]]},"187":{"position":[[312,6],[1124,5]]},"228":{"position":[[710,5],[906,6],[1168,5]]},"304":{"position":[[14,6]]},"389":{"position":[[89,5]]},"400":{"position":[[505,5],[660,6]]},"411":{"position":[[312,5]]},"459":{"position":[[430,5]]},"529":{"position":[[69,5]]},"678":{"position":[[154,6]]},"737":{"position":[[754,5]]}}}],["admin:org",{"_index":2936,"t":{"865":{"position":[[664,9]]}}}],["admin_access",{"_index":1586,"t":{"226":{"position":[[1174,13]]}}}],["admin_password=mypassword",{"_index":2416,"t":{"565":{"position":[[131,25]]}}}],["admin_us",{"_index":1574,"t":{"226":{"position":[[227,11]]},"228":{"position":[[495,11],[630,11],[1153,12]]}}}],["administr",{"_index":305,"t":{"23":{"position":[[193,13]]},"25":{"position":[[232,13]]},"49":{"position":[[452,13]]},"53":{"position":[[637,13]]},"70":{"position":[[580,13]]},"113":{"position":[[14,13]]},"171":{"position":[[945,13]]},"211":{"position":[[591,13]]},"224":{"position":[[242,13]]},"292":{"position":[[450,13]]},"331":{"position":[[796,13]]},"355":{"position":[[712,13]]},"495":{"position":[[183,14]]},"503":{"position":[[1058,13]]},"529":{"position":[[863,13]]},"647":{"position":[[14,13]]},"651":{"position":[[14,13]]},"674":{"position":[[1290,14]]},"678":{"position":[[810,13]]},"720":{"position":[[9,14],[148,14]]}}}],["adminoc",{"_index":1954,"t":{"389":{"position":[[142,7]]}}}],["adminus",{"_index":1979,"t":{"400":{"position":[[244,9],[359,9]]}}}],["adopt",{"_index":2641,"t":{"714":{"position":[[22,7]]}}}],["advanc",{"_index":410,"t":{"32":{"position":[[424,7]]},"131":{"position":[[293,7]]},"160":{"position":[[12,8]]},"357":{"position":[[22,9]]},"398":{"position":[[429,8]]},"576":{"position":[[445,7],[493,7]]},"657":{"position":[[22,10]]},"678":{"position":[[720,8]]}}}],["aenean",{"_index":121,"t":{"2":{"position":[[1871,6]]},"4":{"position":[[1871,6]]}}}],["afni",{"_index":2386,"t":{"533":{"position":[[45,5],[170,4]]}}}],["again",{"_index":1999,"t":{"400":{"position":[[1246,5]]},"620":{"position":[[0,6]]}}}],["against",{"_index":2139,"t":{"467":{"position":[[135,7]]}}}],["ai",{"_index":567,"t":{"61":{"position":[[255,2],[285,2]]}}}],["aid",{"_index":2576,"t":{"686":{"position":[[287,3]]}}}],["aim",{"_index":2679,"t":{"729":{"position":[[167,5]]}}}],["airflow",{"_index":560,"t":{"61":{"position":[[87,7]]},"754":{"position":[[24,7]]},"756":{"position":[[158,7],[200,7]]},"757":{"position":[[23,7],[299,7],[820,7],[1237,7],[1721,7]]},"759":{"position":[[251,7],[277,7],[502,10],[1042,11]]},"761":{"position":[[15,7]]},"763":{"position":[[54,7],[156,7],[211,7]]},"837":{"position":[[0,7],[138,7],[178,7]]},"845":{"position":[[120,7],[166,7]]},"851":{"position":[[162,7]]}}}],["airflow.contrib.operators.kubernetes_pod_oper",{"_index":2813,"t":{"759":{"position":[[340,49]]}}}],["airflow.operators.dummy_oper",{"_index":2815,"t":{"759":{"position":[[423,32]]}}}],["airflow/airflow",{"_index":2791,"t":{"757":{"position":[[314,15]]}}}],["airflow@example.com",{"_index":2820,"t":{"759":{"position":[[581,24]]}}}],["aka",{"_index":474,"t":{"45":{"position":[[149,5]]},"341":{"position":[[26,5]]},"432":{"position":[[682,5]]},"467":{"position":[[1130,5]]},"668":{"position":[[753,5]]},"729":{"position":[[1693,3]]},"837":{"position":[[82,4]]}}}],["algorithm",{"_index":2151,"t":{"467":{"position":[[1356,10]]},"811":{"position":[[115,9],[164,9]]}}}],["alia",{"_index":1967,"t":{"396":{"position":[[447,5]]}}}],["aliquam",{"_index":16,"t":{"2":{"position":[[133,7],[578,8],[1207,8],[2924,7]]},"4":{"position":[[133,7],[578,8],[1207,8],[2924,7]]}}}],["aliquet",{"_index":22,"t":{"2":{"position":[[244,7],[770,8],[1467,7]]},"4":{"position":[[244,7],[770,8],[1467,7]]}}}],["all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinst",{"_index":2539,"t":{"663":{"position":[[197,67]]},"670":{"position":[[590,67]]}}}],["all,configmap,pvc,serviceaccount,secret,rolebinding,serviceinst",{"_index":2552,"t":{"670":{"position":[[1262,67]]}}}],["all,secret,configmap",{"_index":1738,"t":{"318":{"position":[[109,21]]}}}],["all,secret,configmaps,serviceaccount,rolebind",{"_index":504,"t":{"45":{"position":[[905,48]]},"582":{"position":[[154,48],[327,48]]},"659":{"position":[[528,48],[756,48]]}}}],["allegrograph",{"_index":1381,"t":{"167":{"position":[[0,13],[156,12]]}}}],["alloc",{"_index":735,"t":{"82":{"position":[[877,9]]},"131":{"position":[[818,11],[853,10]]},"203":{"position":[[8,9]]},"333":{"position":[[849,9]]},"355":{"position":[[2144,9]]},"731":{"position":[[365,11]]}}}],["allow",{"_index":646,"t":{"76":{"position":[[351,5]]},"111":{"position":[[193,5]]},"123":{"position":[[630,6]]},"226":{"position":[[269,8]]},"263":{"position":[[392,8]]},"341":{"position":[[518,8]]},"343":{"position":[[167,6]]},"349":{"position":[[38,5]]},"359":{"position":[[130,5]]},"404":{"position":[[134,5],[159,5],[193,5]]},"432":{"position":[[382,5]]},"517":{"position":[[1087,5]]},"647":{"position":[[947,5]]},"670":{"position":[[200,5]]},"727":{"position":[[364,8]]},"729":{"position":[[305,6],[1900,6],[2146,8],[2317,6],[2555,5]]},"737":{"position":[[258,6]]},"835":{"position":[[15,6]]}}}],["allow_us",{"_index":1599,"t":{"228":{"position":[[481,11],[616,11]]}}}],["allowed_us",{"_index":1573,"t":{"226":{"position":[[210,14]]},"228":{"position":[[1174,14]]}}}],["alon",{"_index":1572,"t":{"226":{"position":[[146,5]]}}}],["along",{"_index":1071,"t":{"121":{"position":[[2505,5]]},"727":{"position":[[174,5]]}}}],["alongsid",{"_index":1809,"t":{"341":{"position":[[129,9]]}}}],["alpin",{"_index":1339,"t":{"147":{"position":[[1586,6]]}}}],["alreadi",{"_index":556,"t":{"59":{"position":[[100,7]]},"74":{"position":[[205,7]]},"76":{"position":[[423,7]]},"145":{"position":[[259,7]]},"233":{"position":[[226,7],[269,7]]},"276":{"position":[[226,7],[269,7]]},"396":{"position":[[402,7]]},"424":{"position":[[69,7]]},"440":{"position":[[948,7]]},"467":{"position":[[580,7]]},"477":{"position":[[95,7]]},"598":{"position":[[58,7]]},"616":{"position":[[123,7]]},"643":{"position":[[423,7]]},"647":{"position":[[1019,7]]},"737":{"position":[[424,7]]},"877":{"position":[[4770,7]]}}}],["alter",{"_index":386,"t":{"30":{"position":[[934,7]]}}}],["altern",{"_index":1355,"t":{"155":{"position":[[507,14]]},"495":{"position":[[745,13]]},"877":{"position":[[131,13]]}}}],["although",{"_index":1316,"t":{"147":{"position":[[658,9]]}}}],["alway",{"_index":389,"t":{"30":{"position":[[1018,6]]},"175":{"position":[[8,6]]},"335":{"position":[[264,6]]},"341":{"position":[[543,6]]},"690":{"position":[[17,6]]},"746":{"position":[[177,6]]}}}],["amazon",{"_index":2901,"t":{"821":{"position":[[26,7]]}}}],["amd",{"_index":2707,"t":{"729":{"position":[[1218,3]]}}}],["amd64.tar.gz",{"_index":2104,"t":{"451":{"position":[[195,13],[268,13]]}}}],["amd64/minishift",{"_index":1951,"t":{"385":{"position":[[836,15]]}}}],["amd64sudo",{"_index":2843,"t":{"769":{"position":[[105,9]]}}}],["amet",{"_index":4,"t":{"2":{"position":[[22,5],[174,5],[385,4],[536,4],[928,5],[1122,4],[1500,4],[2138,4],[2534,4]]},"4":{"position":[[22,5],[174,5],[385,4],[536,4],[928,5],[1122,4],[1500,4],[2138,4],[2534,4]]}}}],["amount",{"_index":261,"t":{"19":{"position":[[483,6]]},"30":{"position":[[140,6]]},"82":{"position":[[616,6]]},"467":{"position":[[349,6]]},"629":{"position":[[33,6]]},"698":{"position":[[4,6]]},"748":{"position":[[537,6]]},"750":{"position":[[415,6]]}}}],["anacondadepend",{"_index":1425,"t":{"173":{"position":[[1199,21]]},"507":{"position":[[677,21]]},"523":{"position":[[811,21]]}}}],["analysi",{"_index":506,"t":{"49":{"position":[[11,8]]},"493":{"position":[[497,8]]},"686":{"position":[[64,8]]},"731":{"position":[[553,9]]}}}],["analyt",{"_index":1724,"t":{"314":{"position":[[309,10]]},"811":{"position":[[191,9]]}}}],["analyz",{"_index":2601,"t":{"692":{"position":[[23,9]]},"712":{"position":[[23,9]]}}}],["anatomi",{"_index":1762,"t":{"329":{"position":[[20,7]]},"739":{"position":[[1570,7]]}}}],["and/or",{"_index":1631,"t":{"242":{"position":[[169,6]]},"252":{"position":[[203,6]]},"280":{"position":[[169,6]]},"616":{"position":[[33,6]]},"690":{"position":[[569,6]]},"731":{"position":[[236,6]]}}}],["annot",{"_index":1766,"t":{"331":{"position":[[437,12]]},"355":{"position":[[353,12]]},"489":{"position":[[477,12]]},"729":{"position":[[2346,8]]}}}],["anonym",{"_index":1988,"t":{"400":{"position":[[734,9],[811,9]]}}}],["anoth",{"_index":1340,"t":{"151":{"position":[[135,7]]},"153":{"position":[[172,7]]},"155":{"position":[[143,7]]},"158":{"position":[[266,7]]},"160":{"position":[[321,7]]},"163":{"position":[[348,7]]},"165":{"position":[[885,7]]},"341":{"position":[[109,7]]}}}],["ansibl",{"_index":2501,"t":{"641":{"position":[[260,8],[302,8]]},"643":{"position":[[206,8],[734,7]]}}}],["ansible.html",{"_index":2519,"t":{"643":{"position":[[897,12]]}}}],["ant",{"_index":109,"t":{"2":{"position":[[1579,5],[1596,4],[1846,4],[2651,4]]},"4":{"position":[[1579,5],[1596,4],[1846,4],[2651,4]]}}}],["anyconnect",{"_index":1680,"t":{"300":{"position":[[542,12]]},"446":{"position":[[724,10],[774,11],[933,11],[1019,10],[1245,11]]}}}],["anymor",{"_index":1668,"t":{"300":{"position":[[134,8]]},"487":{"position":[[28,7],[225,8]]},"674":{"position":[[151,7]]},"748":{"position":[[40,7]]},"750":{"position":[[651,8]]}}}],["anyth",{"_index":375,"t":{"30":{"position":[[683,8]]},"70":{"position":[[70,9]]},"72":{"position":[[129,8]]},"171":{"position":[[115,8]]},"211":{"position":[[109,8]]},"331":{"position":[[582,8]]},"355":{"position":[[498,8]]},"389":{"position":[[118,8]]},"529":{"position":[[104,8]]},"737":{"position":[[0,8],[673,8],[781,8]]},"871":{"position":[[552,8]]}}}],["anyuid",{"_index":475,"t":{"45":{"position":[[155,7]]},"137":{"position":[[865,6]]},"189":{"position":[[963,6]]},"207":{"position":[[1148,6]]},"343":{"position":[[115,6],[883,8]]},"355":{"position":[[3357,8]]},"373":{"position":[[341,13],[977,6]]},"381":{"position":[[626,13],[785,6],[849,6]]},"463":{"position":[[205,6]]},"517":{"position":[[643,6],[963,6],[1352,6]]},"643":{"position":[[344,7]]},"757":{"position":[[1218,13]]}}}],["anywher",{"_index":2084,"t":{"446":{"position":[[380,9],[1794,9]]}}}],["apach",{"_index":465,"t":{"45":{"position":[[0,6],[217,6],[843,6]]},"555":{"position":[[47,6]]},"748":{"position":[[813,6]]},"756":{"position":[[193,6]]},"757":{"position":[[307,6]]},"851":{"position":[[155,6]]}}}],["apart",{"_index":1826,"t":{"343":{"position":[[582,5]]}}}],["api",{"_index":883,"t":{"97":{"position":[[739,3]]},"131":{"position":[[2057,3]]}}}],["apivers",{"_index":1309,"t":{"147":{"position":[[473,11],[1214,11]]},"335":{"position":[[602,11]]},"337":{"position":[[514,11]]},"339":{"position":[[55,11]]},"341":{"position":[[876,11]]},"351":{"position":[[281,11]]},"353":{"position":[[189,11]]},"355":{"position":[[2264,11],[2502,11],[2687,11],[2847,11],[3967,11],[4231,11]]},"357":{"position":[[600,11]]},"511":{"position":[[131,11]]},"517":{"position":[[327,10],[763,11]]},"799":{"position":[[176,11]]}}}],["app",{"_index":1528,"t":{"203":{"position":[[152,4]]},"205":{"position":[[586,3]]},"207":{"position":[[925,3],[941,3],[979,3],[1017,3],[1060,3],[1071,3],[1199,3]]},"228":{"position":[[2461,3],[2533,3]]},"306":{"position":[[300,4]]},"335":{"position":[[680,4]]},"337":{"position":[[575,4]]},"339":{"position":[[116,4]]},"341":{"position":[[937,4],[1204,4]]},"343":{"position":[[790,4]]},"351":{"position":[[342,4],[449,4]]},"353":{"position":[[250,4]]},"355":{"position":[[2342,4],[2563,4],[2748,4],[2908,4],[3169,4],[3264,4],[4028,4],[4135,4],[4292,4]]},"357":{"position":[[665,4]]},"373":{"position":[[13,3],[57,3],[103,3],[759,3],[1241,3],[1303,3]]},"379":{"position":[[395,3],[422,3]]},"381":{"position":[[496,3],[556,3]]},"491":{"position":[[120,3],[154,3]]},"503":{"position":[[348,4]]},"565":{"position":[[7,3],[85,3],[124,3],[211,3]]},"610":{"position":[[447,3]]},"618":{"position":[[143,3]]},"659":{"position":[[308,3]]},"752":{"position":[[1598,3]]},"877":{"position":[[957,3],[3045,4],[3385,3],[5148,3],[5279,4],[5310,3],[5585,3],[5646,3],[6351,3]]}}}],["app.kubernetes.io/instance=freesurf",{"_index":2376,"t":{"529":{"position":[[1463,37]]}}}],["app=::.apps.dsri2.unimaas.nl/hub/oauth_callback",{"_index":1621,"t":{"228":{"position":[[2352,46]]}}}],["nameserv",{"_index":1671,"t":{"300":{"position":[[197,12],[210,10],[233,10],[256,10]]}}}],["namespac",{"_index":1306,"t":{"147":{"position":[[371,9],[409,9]]},"282":{"position":[[274,11],[305,9]]},"284":{"position":[[90,9],[100,11],[147,11],[178,9]]},"286":{"position":[[196,11],[227,9],[271,9],[300,9]]},"411":{"position":[[45,10],[510,9]]},"503":{"position":[[617,10]]},"737":{"position":[[90,10]]},"759":{"position":[[204,9]]},"877":{"position":[[2833,9],[2887,10],[5035,10]]}}}],["namespace='changem",{"_index":2832,"t":{"759":{"position":[[927,21]]}}}],["namespace=::/mnt",{"_index":501,"t":{"45":{"position":[[817,14]]}}}],["pod_id>:: -- mkdir -p /mnt/workspace/resourcesoc cp workspace/resources/RMLStreamer.jar :/mnt/ Delete the Apache Flink cluster (change the application name): oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=flink-cluster","s":"Apache Flink","u":"/docs/catalog-data-streaming","h":"#apache-flink","p":46},{"i":51,"t":"Deploy applications Data Science catalog Imaging softwares","s":"Imaging softwares","u":"/docs/catalog-imaging","h":"","p":50},{"i":53,"t":"Cell image analysis software. See their website. You can start a container using the CellProfiler template in the Catalog web UI (make sure the Templates checkbox is checked) This template uses the official CellProfiler image hosted on DockerHub Persistent data folder 📂 Use the /usr/local/src/work folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. Once the CellProfiler has been started you can access it through the pod terminal (in the DSRI web UI, or using oc rsh POD_ID) cellprofiler --helpcellprofiler --runcellprofiler --run-headless Getting Started 🐬 For more information using cell profiler from the command line see this post","s":"CellProfiler","u":"/docs/catalog-imaging","h":"#cellprofiler","p":50},{"i":55,"t":"Deploy applications Data Science catalog Genomics","s":"Genomics","u":"/docs/catalog-genomics","h":"","p":54},{"i":57,"t":"Trinity assembles transcript sequences from Illumina RNA-Seq data. It represents a novel method for the efficient and robust de novo reconstruction of transcriptomes from RNA-seq data. See their documentation. You can start a container using the Trinity RNA-Seq template in the Catalog web UI (make sure the Templates checkbox is checked) This template uses the Trinity RNA-Seq image hosted in the UM IDS GitHub Container Registry Persistent data folder 📂 Use the /usr/local/src/work folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. We enabled the port 8787 in the container, if you need to deploy applications.","s":"Trinity RNA Seq","u":"/docs/catalog-genomics","h":"#trinity-rna-seq","p":54},{"i":59,"t":"On this page","s":"OpenDataHub","u":"/docs/catalog-opendatahub","h":"","p":58},{"i":61,"t":"Those components have been tested on the DSRI: JupyterHub Spark Operator from radanalytics","s":"Components available on DSRI","u":"/docs/catalog-opendatahub","h":"#components-available-on-dsri","p":58},{"i":63,"t":"Checkout the official documentation to start an instance of OpenDataHub (note that the Operator has already been installed) Then visit the documentation to reach the Spark cluster from a Jupyter notebook.","s":"Start Spark with JupyterHub","u":"/docs/catalog-opendatahub","h":"#start-spark-with-jupyterhub","p":58},{"i":65,"t":"Here are all the components that can be deployed as part of an OpenDataHub: JupyterHub Airflow Argo Grafana & Prometheus for data/logs visualization Spark Operator from radanalytics Kafka/Strimzi for streaming applications Superset for data visualization AI Library (Seldon to publish AI models) Let us know if you need help to deploy one of those components on the DSRI.","s":"All components","u":"/docs/catalog-opendatahub","h":"#all-components","p":58},{"i":67,"t":"Deploy applications Data Science catalog Utilities","s":"Utilities","u":"/docs/catalog-utilities","h":"","p":66},{"i":70,"t":"Start Ubuntu with the root user which has sudo permissions to install anything. You can start the application using the Ubuntu template in the Catalog web UI (make sure the Templates checkbox is checked) Login Credentials Username: root Password: Template creation password This template uses the Ubuntu image hosted on DockerHub, see its documentation at https://hub.docker.com/r/ubuntu Persistent data folder 📂 Use the /root folder (home of the root user) to store your data in the existing persistent storage. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. We enabled the port 8080 in the Ubuntu container if you need to deploy applications. To quickly access it from the terminal you can use the Terminal tab in the pod page, or via your local terminal: Get the Ubuntu pod ID: oc get pods Connect to it: oc rsh POD_ID Enable Bash in the Ubuntu container (if it starts with the Shell) bash","s":"With the terminal","u":"/docs/catalog-utilities","h":"#with-the-terminal","p":66},{"i":72,"t":"Start Ubuntu with a web UI accessible via a URL (using VNC). You will be the root user which has elevated permissions to install anything via apt install . Before you install a package run apt update. This also solves E: unable to locate package and E: no installation candidate errors. You can start the application using the Ubuntu with web UI template in the Catalog web UI (make sure the Templates checkbox is checked) Login Credentials Username: root Password: Template creation password This template uses the Docker image defined at https://github.com/fcwu/docker-ubuntu-vnc-desktop Less stable than the official image This image might be less stable than the original Ubuntu image. Let us know on Slack if you have any problem!","s":"With a web UI","u":"/docs/catalog-utilities","h":"#with-a-web-ui","p":66},{"i":74,"t":"Deploy a file browser on your persistent volume. This will provide a web UI to upload and download data to your DSRI persistent volume in case you need it (JupyterLab, RStudio and VisualStudio Code server already include a file browser) You can start a container using the File Browser for existing storage template in the Catalog web UI (make sure the Templates checkbox is checked) You can only deploy file browser on an existing Persistent Volume Claim, this enables you to add a web UI to access this storage. The following parameters can be provided: Provide a unique Application name. It will be used to generate the application URL. Provide a Password, you will need to hash the password first for extra security, use this quick docker command to do it: docker run filebrowser/filebrowser hash mypassword The Storage name of the Persistent Volume Claim (PVC) that will be exposed by the filebrowser. Storage subpath in the the Persistent Volume Claim that will be exposed by the filebrowser. Let it empty to use the Root folder of the persistent volume. You can find the Storage name if you Go to the deployments page > Storage panel.","s":"File browser","u":"/docs/catalog-utilities","h":"#file-browser","p":66},{"i":76,"t":"Find more details about the how to create persistent storage info The DSRI using the Openshift Container Stroage (OCS) which is based on CEPH offers ReadWriteOnce and ReadWriteMany access mode. ReadWriteOnce (RWO) volumes cannot be mounted on multiple nodes. Use the ReadWriteMany (RWX) access mode when possible. If a node fails, the system does not allow the attached RWO volume to be mounted on a new node because it is already assigned to the failed node. If you encounter a multi-attach error message as a result, force delete the pod on a shut down or crashed node. Find more details about the how to Connect the Existing persistent storage info You can try above method if you want to connect more applications to the same storage This deployment require to have root user enabled on your project. Contact the DSRI support team or create a new issues to request root access or to create persistent volume for your project if you don't have them . Credentials Default credentials will be username admin and password admin Change password Please change the password in the Filebrowser Web UI once it has been created.","s":"Creating or Connecting an Existing Persistent Storage","u":"/docs/catalog-utilities","h":"#creating-or-connecting-an-existing-persistent-storage","p":66},{"i":78,"t":"Guides Checkpointing Machine Learning Training","s":"Checkpointing Machine Learning Training","u":"/docs/checkpointing-ml-training","h":"","p":77},{"i":80,"t":"Checkpointing is periodically saving the learned model parameters and current hyperparameter values during training. It helps to resume training of a model where you left off, instead of restarting the training from the beginning. On shared DSRI cluster, you might have access to a GPU node for a limited number of time in one stretch, for example, maybe for 24 hours. Therefore, whenever the training job fails (due to timelimit expiry or otherwise), many hours of training can be lost. This problem is mitigated by a frequent checkpoint saving. When the training is resumed it'll continue from the last checkpoint saved. If the failure occurred 12 hours after the last checkpoint has been saved, 12 hours of training is lost and needs to be re-done. This can be very expensive.","s":"What is Checkpointing?","u":"/docs/checkpointing-ml-training","h":"#what-is-checkpointing","p":77},{"i":82,"t":"In theory one could save a checkpoint every 10 minutes and only ever lose 10 minutes of training time, but this too would dramatically delay the reaching of the finish line because large models can't be saved quickly and if the saving time starts to create a bottleneck for the training this approach becomes counterproductive. Depending on your checkpointing methodology and the speed of your IO storage partition the saving of a large model can take from dozens of seconds to several minutes. Therefore, the optimal approach to saving frequency lies somewhere in the middle. The math is quite simple - measure the amount of time it takes to save the checkpoint, multiply it by how many times you'd want to save it and see how much of an additional delay the checkpoint saving will contribute to the total training time. For instance, Let suppose, 1) Training Time (TT), i.e. allocated time on cluster : x days 2) Time needed to save every checkpoint: y seconds 3) Checkpoint fequencty: every z hours => Then, Total Number of Checkpoints during the complete training time (NCP) = (x *24)/ z => Total Time Spent on Checkpointing (TTSC) [in hours] = NCP * y/3600 => % of Training time spent on checkpointing = (TTSC/TT24) 100 ------------------Example calculations------------------------------------ Training Time (TT or x): 7 days Time needed to save every checkpoint (y): 20 secs Checkpoint fequency (z): every 30 minutes, i.e., 0.5 hours Then, NCP = 7*24/0.5 = 336 TTSC = 336* 20/3600 = 1.87 hours % of Training time spent on checkpointing = (1.87/724)100 ~ 1.2 %","s":"Checkpointing fequency?","u":"/docs/checkpointing-ml-training","h":"#checkpointing-fequency","p":77},{"i":84,"t":"Both PyTorch and TensorFlow/Keras support checkpointing. The follwoing sections provide an example of how Checkpointing can be done in these libraries.","s":"Support for Checkpointing in Tensorflow/Keras and PyTorch ?","u":"/docs/checkpointing-ml-training","h":"#support-for-checkpointing-in-tensorflowkeras-and-pytorch-","p":77},{"i":86,"t":"import tensorflow as tf#Imports the ModelCheckpoint classfrom tensorflow.keras.callbacks import ModelCheckpoint# Create your model as you normally would and compile it:model = tf.keras.models.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=(32,)), tf.keras.layers.Dense(10, activation='softmax')])model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])# Create a Checkpoint Callbackcheckpoint_callback = ModelCheckpoint(#filepath should be a path to your persistent volume. Example, /home/jovyan path in your JupyterLab pod. filepath='model_checkpoint.h5', # You can use formats like .hdf5 or .ckpt. save_best_only=True, monitor='val_loss', mode='min', verbose=1)# Train the Model with the Checkpoint Callbackhistory = model.fit( x_train, y_train, validation_data=(x_val, y_val), epochs=10, callbacks=[checkpoint_callback])# Loading a Saved Checkpoint# Load the model architecture + weights if you saved the full modelmodel = tf.keras.models.load_model('model_checkpoint.h5')# If you saved only the weights, you would need to create the model architecture first, then load weights:model.load_weights('model_checkpoint.h5')# Optional Parameters for Checkpointing, Example with Custom Save Intervalscheckpoint_callback = ModelCheckpoint( filepath='model_checkpoint_epoch_{epoch:02d}.h5', save_freq='epoch', save_weights_only=True, verbose=1)","s":"Example of Tensorflow/Keras based checkpointing:","u":"/docs/checkpointing-ml-training","h":"#example-of-tensorflowkeras-based-checkpointing","p":77},{"i":88,"t":"import torch# Example modelmodel = torch.nn.Linear(10, 2)# Save the entire modeltorch.save(model, 'model.pth')# Loading the Entire Modelmodel = torch.load('model.pth')# Saving and Loading Optimizer State, i.e., To continue training exactly as before, you may want to save the optimizer state as well.optimizer = torch.optim.SGD(model.parameters(), lr=0.01)# Save model and optimizer state_dictscheckpoint = { 'epoch': 5, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': 0.5,}torch.save(checkpoint, 'checkpoint.pth')# Load checkpointcheckpoint = torch.load('checkpoint.pth')model.load_state_dict(checkpoint['model_state_dict'])optimizer.load_state_dict(checkpoint['optimizer_state_dict'])epoch = checkpoint['epoch']loss = checkpoint['loss']model.train() # Ensure model is in training mode if needed","s":"Example of PyTorch based checkpointing:","u":"/docs/checkpointing-ml-training","h":"#example-of-pytorch-based-checkpointing","p":77},{"i":90,"t":"PyTorch Documentation: https://pytorch.org/tutorials/beginner/saving_loading_models.html#save-on-gpu-load-on-cpu Tensorflow/Keras Documentation: https://www.digitalocean.com/community/tutorials/checkpointing-in-tensorflow https://keras.io/api/callbacks/model_checkpoint/ Machine Learning Engineering by stas bekman: https://stasosphere.com/machine-learning/","s":"External Resources","u":"/docs/checkpointing-ml-training","h":"#external-resources","p":77},{"i":93,"t":"Check if there are issues related to your contribution, or post a new issue to discuss improvement to the documentation. Fork this repository Otherwise you will need to first fork this repository, then send a pull request when your changes have been pushed. Direct change if permission If you are part of the MaastrichtU-IDS organization on GitHub you can directly create a new branch to make your change in the main repository.","s":"Contribute","u":"/docs/contribute","h":"","p":91},{"i":95,"t":"You can really easily make quick changes directly on the GitHub website by clicking the Edit this page button at the bottom left of each documentation page. Or browsing to your forked repository. For example to edit the introduction page you can go to https://github.com/MaastrichtU-IDS/dsri-documentation/edit/master/website/docs/introduction.md","s":"⚡ Quick edit on GitHub","u":"/docs/contribute","h":"#-quick-edit-on-github","p":91},{"i":97,"t":"To edit the documentation it is easier to clone the repository on your laptop, and use a user-friendly markdown editor. Use a Markdown editor We strongly recommend you to use a markdown editor, such as Typora. It makes writing documentation much faster, and more enjoyable. Clone the repository on your machine: git clone https://github.com/MaastrichtU-IDS/dsri-documentation.gitcd dsri-documentation Create a new branch from the master branch 🕊️ git checkout -b my-branch Add your changes in this branch ✒️ Start the website on http://localhost:3000 to test it: cd websiteyarn installyarn start Send a pull request Send a pull request to the master branch when your changes are done Development documentation Read more about running the API in development at https://github.com/MaastrichtU-IDS/dsri-documentation#run-for-development","s":"🏗️ Larger changes locally","u":"/docs/contribute","h":"#️-larger-changes-locally","p":91},{"i":99,"t":"The documentation website is automatically updated and redeployed at each change to the main branch using a GitHub Actions workflow.","s":"🔄 Automated deployment","u":"/docs/contribute","h":"#-automated-deployment","p":91},{"i":101,"t":"Most pages of this website are written in Markdown, hence they are really easy to edit, especially when you are using a convenient markdown editor. Only the index.js page is written in React JavaScript.","s":"📝 Help","u":"/docs/contribute","h":"#-help","p":91},{"i":103,"t":"Main DSRI documentation markdown files in website/docs Left docs menu defined in website/sidebars.json Blog articles as markdown files in website/docs Index and contribute pages in website/src/pages Images in website/src/static/img Website configuration file in website/docusaurus.config.js","s":"🔎 Files locations","u":"/docs/contribute","h":"#-files-locations","p":91},{"i":105,"t":"Colorful boxes Use the following tags to create colorful boxes in markdown files: :::note You can specify an optional titleGrey box::::::tip Green boxThe content and title *can* include markdown.::::::info Blue boxUseful information.::::::caution Be careful!Yellow box::::::danger Fire red boxDanger danger, mayday!:::","s":"🦄 Markdown tip","u":"/docs/contribute","h":"#-markdown-tip","p":91},{"i":107,"t":"Before sending a pull request make sure the DSRI documentation website still work as expected with the new changes properly integrated: cd websiteyarn installyarn start Send a pull request to the master branch. Project contributors will review your change as soon as they can!","s":"✔️ Pull Request process","u":"/docs/contribute","h":"#️-pull-request-process","p":91},{"i":109,"t":"Deploy applications Data Science catalog Deploy Dask Cluster","s":"Deploy Dask Cluster","u":"/docs/dask-cluster","h":"","p":108},{"i":111,"t":"Go to the +Add page, and select to add Helm Chart Search and Select the Dask chart then click on Create Configure the Yaml file, while under the Jupyter section: Command: [\"jupyter\", \"lab\", \"--allow-root\", \"--ip=0.0.0.0\", \"--port=8888\", \"--no-browser\"] servicePort: 8888 Add Storage to the dask-jupyter pod as shown below Set up a new Persistent Volume Claim for the cluster as shown below","s":"🧊 Installation with Helm","u":"/docs/dask-cluster","h":"#-installation-with-helm","p":108},{"i":113,"t":"Switch to the Administrator view and navigate to Route Create a new route by clicking the button Create Route with the setup as shown below Navigate the provided link to access your local cluster","s":"🪐 Configure a Route for the Cluster","u":"/docs/dask-cluster","h":"#-configure-a-route-for-the-cluster","p":108},{"i":115,"t":"Start up the terminal Run oc get pods to find the full podname of the dask-jupyter Run oc logs and copy the token used to access the jupyter notebook","s":"🪐 Access the Jupyter Password/Token","u":"/docs/dask-cluster","h":"#-access-the-jupyter-passwordtoken","p":108},{"i":117,"t":"Deploy applications Data Science catalog Databases","s":"Databases","u":"/docs/deploy-database","h":"","p":116},{"i":119,"t":"You can easily create a database from the templates available in the DSRI OpenShift web UI catalog: You can connect to a database from another application in the same project by using the database service name as hostname: You can also use the oc CLI to get the services in your project: oc get services","s":"SQL databases","u":"/docs/deploy-database","h":"#sql-databases","p":116},{"i":121,"t":"Use the Postgresql template in the DSRI OpenShift web UI catalog to start a SQL database. Connect to the database When the database has been deployed, you can connect from another pod using your favorite language and connector. Example with the psql Command Line Interface: apt-get update && apt-get install postgresql-client -y Connect to the Postgresql database using the service name (change depending on the username and database name you chose): psql -h postgresql-db -U postgres db Checkout the dsri-demo repository for a quick demo for accessing and using a PostgreSQL database from a Jupyter notebook on the DSRI.","s":"Start PostgreSQL 🐘","u":"/docs/deploy-database","h":"#start-postgresql-","p":116},{"i":123,"t":"Use the MySQL template in the DSRI OpenShift web UI catalog. Connect to the database When the database has been deployed, you can connect from another pod using your favorite language and connector. Example with the mysql Command Line Interface: apt-get update && apt-get install mariadb-client -y Connect to the MySQL database using the service name: mysql -h example-mysql -p Checkout the dsri-demo repository for a quick demo for accessing and using a MySQL database from a Jupyter notebook on the DSRI. Alternatively, MySQL databases can be started using Helm, see the Helm documentation page for more details.","s":"Start MySQL 🐬","u":"/docs/deploy-database","h":"#start-mysql-","p":116},{"i":126,"t":"MongoDB is a general purpose, document-based, distributed database built for modern application developers and for the cloud era. Use the MongoDB template in the DSRI OpenShift web UI catalog. Connect to the database Use the service name as hostname to connect from another pod in the same project.","s":"MongoDB 🌿","u":"/docs/deploy-database","h":"#mongodb-","p":116},{"i":128,"t":"Redis is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperlog. Use the Redis template in the DSRI OpenShift web UI catalog. Connect to the database Use the service name as hostname to connect from another pod in the same project.","s":"Redis 🎲","u":"/docs/deploy-database","h":"#redis-","p":116},{"i":131,"t":"Search for the Virtuoso triplestore template in the DSRI web UI catalog. Instantiate the template to create a Virtuoso triplestore in your project. The deployment is based on the latest open source version of Virtuoso: https://hub.docker.com/r/openlink/virtuoso-opensource-7 Connect to the database Use the service name as hostname to connect from another pod in the same project.","s":"OpenLink Virtuoso triplestore","u":"/docs/deploy-database","h":"#openlink-virtuoso-triplestore","p":116},{"i":133,"t":"Use the official DockerHub image if you have an enterprise license. Or build GraphDB free edition image from graphdb-docker on GitHub. After copying the .zip file in the graphdb-docker/free-edition folder, go the graphdb-docker folder in your terminal: cd graphdb-docker Before creating your GraphDB ImageStream, make sure you are in the right project: oc project my-project Create the ImageStream for GraphDB: oc new-build --name graphdb --binary Build the image on the DSRI and save it in the ImageStream: oc start-build graphdb --from-dir=free-edition --follow --wait You can now use the Ontotext GraphDB template to deploy a GraphDB instance on DSRI. Use the name of the ImageStream when instantiating the template, you can check if the image was properly built in Search > Filter Resources for ImageStreams Connect to the database Use the service name as hostname to connect from another pod in the same project.","s":"Ontotext GraphDB triplestore","u":"/docs/deploy-database","h":"#ontotext-graphdb-triplestore","p":116},{"i":135,"t":"AllegroGraph® is a modern, high-performance, persistent graph database. It supports SPARQL, RDFS++, and Prolog reasoning from numerous client applications. AllegroGraph has not been tested on DSRI yet, but it can be deployed on Kubernetes using Helm, cf. https://www.github.com/franzinc/agraph-examples/tree/master/clustering%2Fkubernetes%2Fmmr%2Fkubernetes-mmr.md","s":"AllegroGraph","u":"/docs/deploy-database","h":"#allegrograph","p":116},{"i":137,"t":"Deploy applications Deploy from a Docker image","s":"Deploy from a Docker image","u":"/docs/deploy-from-docker","h":"","p":136},{"i":139,"t":"The easiest way to deploy a service on the DSRI is to use a Docker image from DockerHub 🐳. Search for an image for your service published on DockerHub Google \"dockerhub my_service_name\" Sometimes multiple images can be found for your service. Take the official image when possible, or the one most relevant to your use-case. Deploy from a Dockerfile If no suitable image can be found on DockerHub, it can be deployed from a Dockerfile. See above to do so.","s":"Find an image for your service","u":"/docs/deploy-from-docker","h":"#find-an-image-for-your-service","p":136},{"i":141,"t":"Once you have a Docker image for your application you can deploy it using the DSRI web UI. Go to the Overview page of your project. Click the Add to Project button in top right corner > Deploy Image Select to deploy from Image Name Provide your image name, e.g. umdsri/freesurfer Eventually change the Name, it needs to be unique by project. Click Deploy. Fix a common problem Once the application is deployed it will most probably fail because it has not been optimized to work with OpenShift random user ID. You will need to add an entry to the deployment to enable your image to run using any user ID. Go to Topology, click on your application node, click on the Actions button of your application details, and Edit deployment. In the deployment YAML search for spec: which has a containers: as child, and add the following under spec: spec: serviceAccountName: anyuid containers: ... Access the application You should now see your pod deployed on the Overview page of your project. You can expose routes to this pod in the Overview page: Create route","s":"Deploy the image on DSRI","u":"/docs/deploy-from-docker","h":"#deploy-the-image-on-dsri","p":136},{"i":143,"t":"In case you there is no Docker image for your application you can build and push one. To build and push a Docker image you will need to have Docker installed. Install Docker See the official documentation to install Docker.","s":"Build and push a new Docker image","u":"/docs/deploy-from-docker","h":"#build-and-push-a-new-docker-image","p":136},{"i":145,"t":"If no images are available on DockerHub, it is still possible that the developers created the Dockerfile to build the image without pushing it to DockerHub. Go to the GitHub/GitLab source code repository and search for a Dockerfile, it can usually be found in the source code repository root folder a docker subfolder as instructions in the README.md If no Dockerfile are available we will need to define one. Contact us Feel free to contact us to get help with this, especially if you are unfamiliar with Docker.","s":"Define a Dockerfile","u":"/docs/deploy-from-docker","h":"#define-a-dockerfile","p":136},{"i":147,"t":"Once a Dockerfile has been defined for the service you can build it by running the following command from the source code root folder, where the Dockerfile is: docker build -t username/my-service . Arguments can be provided when starting the build, they need to be defined in the Dockerfile to be used. docker build -t username/my-service --build-args MY_ARG=my_value .","s":"Build the image","u":"/docs/deploy-from-docker","h":"#build-the-image","p":136},{"i":149,"t":"Before pushing it to DockerHub you will need to create a repository. To do so, click on Create Repository. DockerHub is free for public repositories Images can be published under your DockerHub user or an organization you belong to Login to DockerHub, if not already done: docker login Push the image previously built to DockerHub: docker push username/my-service You can link DockerHub to your source code repository and ask it to build the Docker image automatically (from the Dockerfile in the root folder). It should take between 10 and 30min for DockerHub to build your image Deploy from a local Dockerfile You can also deploy a service on the DSRI directly from a local Dockerfile, to avoid using DockerHub. See this page to deploy a service from a local Dockerfile for more instructions","s":"Push to DockerHub","u":"/docs/deploy-from-docker","h":"#push-to-dockerhub","p":136},{"i":151,"t":"deploy-gitlab-runner First, obtain gitlab runner registration token via the gitlab webinterface TODO: add screenshot Add \"GitLab Runner\" operator to your project from the Operators --> OperatorHub page. Make sure you choose the \"certified\" GitLab Runner (v1.4.0) The community runner (v1.10.0) is a bit more up to date, but currently does not work. Install in a specific namespace on the cluster. Choose your namespace in the dropdown. Create registration token secret: ---apiVersion: v1kind: Secretmetadata: name: gitlab-runner-secrettype: OpaquestringData: runner-registration-token: oc create -f gitlab-runner-secret.yaml Although, this should also work: oc create secret generic gitlab-runner-secret --from-literal=runner-registration-token= Add the following to the ConfigMap of the GitLab Runner operator: [[runners]] executor = \"kubernetes\" [runners.kubernetes] [runners.kubernetes.volumes] [[runners.kubernetes.volumes.empty_dir]] name = \"empty-dir\" mount_path = \"/\" medium = \"Memory\" Create the configmap: oc create configmap custom-config-toml --from-file config.toml=/tmp/customconfig Create the gitlab runner Custom Resource Definition: apiVersion: apps.gitlab.com/v1beta2kind: Runnermetadata: name: gitlab-runnerspec: gitlabUrl: https://gitlab.maastrichtuniversity.nl token: gitlab-runner-secret config: custom-config-toml tags: openshift --- other stuff dont use!apiVersion: apps.gitlab.com/v1beta2kind: Runnermetadata: name: gitlab-runnerspec: gitlabUrl: https://gitlab.maastrichtuniversity.nl buildImage: alpine token: gitlab-runner-secret tags: openshift","s":"deploy-gitlab-runner","u":"/docs/deploy-gitlab-runner","h":"","p":150},{"i":153,"t":"Deploy applications Data Science catalog Jupyter Notebooks","s":"Jupyter Notebooks","u":"/docs/deploy-jupyter","h":"","p":152},{"i":155,"t":"Start a JupyterLab container based on the official Jupyter docker stacks (debian), with sudo privileges to install anything you need (e.g. pip or apt packages) You can start a container using the JupyterLab template in the Catalog web UI (make sure the Templates checkbox is checked) When instantiating the template you can provide a few parameters, such as: Password to access the notebook Optionally you can provide a git repository to be automatically cloned in the JupyterLab (if there is a requirements.txt packages will be automatically installed with pip) Docker image to use for the notebook (see below for more details on customizing the docker image) Your git username and email to automatically configure git The DSRI will automatically create a persistent volume to store data you will put in the /home/jovyan/work folder (the folder used by the notebook interface). You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. With this template you can use any image based on the official Jupyter docker stack: https://github.com/jupyter/docker-stacks ghcr.io/maastrichtu-ids/jupyterlab:latest: custom image for data science on the DSRI, with additional kernels (Java), conda integration, VisualStudio Code, and autocomplete for Python ghcr.io/maastrichtu-ids/jupyterlab:knowledge-graph: custom image for working with knowledge graph on the DSRI, with SPARQL kernel and OpenRefine jupyter/scipy-notebook: some packages for science are preinstalled jupyter/datascience-notebook: with Julia kernel jupyter/tensorflow-notebook: with tensorflow package pre-installed jupyter/r-notebook: to work with R jupyter/pyspark-notebook: if you want to connect to a Spark cluster jupyter/all-spark-notebook: if you want to run Spark locally in the notebook You can also build your own image, we recommend to use this repository as example to extend a JupyterLab image: https://github.com/MaastrichtU-IDS/jupyterlab","s":"🪐 Start JupyterLab","u":"/docs/deploy-jupyter","h":"#-start-jupyterlab","p":152},{"i":157,"t":"With the ghcr.io/maastrichtu-ids/jupyterlab:latest image, you can easily start notebooks from the JupyterLab Launcher page using installed conda environments, at the condition nb_conda_kernels and ipykernel are installed in those environments. You can pass a Git repository URL which contains an environment.yml file in the root folder when starting JupyterLab, the conda environment will automatically be installed at the start of your container, and available in the JupyterLab Launcher page. You can use this repository as example: https://github.com/MaastrichtU-IDS/dsri-demo Or you can install it directly in a running JupyterLab (we use mamba which is like conda but faster): mamba env create -f environment.yml You'll need to wait for 1 or 2 minutes before the new conda environment becomes available on the JupyterLab Launcher page. You can easily install an environment with a different version of Python if you need it. Here is an example of an environment.yml file to create an environment with Python 3.9, install the minimal dependencies required to easily starts notebooks in this environment with conda, and install a pip package: name: custom-envchannels: - defaults - conda-forge - anacondadependencies: - python=3.9 - ipykernel - nb_conda_kernels - pip - pip: - matplotlib ⚠️ You cannot use conda activate in a Docker container, so you will need to either open a notebook using the kernel for your conda env, or use conda run to run scripts in the new environment: conda run -n custom-env python --version","s":"📦️ Manage dependencies with Conda","u":"/docs/deploy-jupyter","h":"#️-manage-dependencies-with-conda","p":152},{"i":159,"t":"You can always use git from the terminal. Configure username Before pushing back to GitHub or GitLab, you will need to configure you username and email in VSCode terminal: git config --global user.name \"Jean Dupont\"git config --global user.email jeandupont@gmail.com Save your password You can run this command to ask git to save your password for 15min: git config credential.helper cache Or store the password in a plain text file: git config --global credential.helper 'store --file ~/.git-credentials' Git tip We recommend to use SSH instead of HTTPS connection when possible, checkout here how to generate SSH keys and use them with your GitHub account. You can also enable and use the JupyterLab Git extension to clone and manage your git repositories. It will prompt you for a username and password if the repository is private.","s":"🐙 Use git in JupyterLab","u":"/docs/deploy-jupyter","h":"#-use-git-in-jupyterlab","p":152},{"i":161,"t":"Initialize repository Include git details in DSRI project setup Verify automatic deployment","s":"🐶 Example","u":"/docs/deploy-jupyter","h":"#-example","p":152},{"i":163,"t":"Deploy applications Anatomy of a DSRI application","s":"Anatomy of a DSRI application","u":"/docs/anatomy-of-an-application","h":"","p":162},{"i":165,"t":"First, you need to create your Template objects, this will be the main object we will create here as all other objects defined will be deployed by this template. In this part we mainly just provide the description and information that will be shown to users when deploying the application from the DSRI web UI catalog. ---kind: TemplateapiVersion: template.openshift.io/v1labels: template: jupyterlab-rootmetadata: name: jupyterlab-root annotations: openshift.io/display-name: JupyterLab description: |- Start JupyterLab images as the `jovyan` user, with sudo privileges to install anything you need. 📂 Use the `/home/jovyan` folder (workspace of the JupyterLab UI) to store your data in the persistent storage automatically created You can find the persistent storage in the DSRI web UI, go to Administrator view > Storage > Persistent Volume Claims You can use any image based on the official Jupyter docker stack https://github.com/jupyter/docker-stacks - jupyter/tensorflow-notebook - jupyter/r-notebook - jupyter/all-spark-notebook - ghcr.io/maastrichtu-ids/jupyterlab (with Java and SPARQL kernels) Or build your own! Checkout https://github.com/MaastrichtU-IDS/jupyterlab for an example of custom image Once JupyterLab is deployed you can install any pip packages, JupyterLab extensions, and apt packages. iconClass: icon-python tags: python,jupyter,notebook openshift.io/provider-display-name: Institute of Data Science, UM openshift.io/documentation-url: https://maastrichtu-ids.github.io/dsri-documentation/docs/deploy-jupyter openshift.io/support-url: https://maastrichtu-ids.github.io/dsri-documentation/help","s":"Application walkthrough","u":"/docs/anatomy-of-an-application","h":"#application-walkthrough","p":162},{"i":167,"t":"Then define the parameters the user will be able to define in the DSRI catalog web UI when instantiating the application. APPLICATION_NAME is the most important as it will be used everywhere to create the objects and identify the application. parameters:- name: APPLICATION_NAME displayName: Name for the application description: Must be without spaces (use -), and unique in the project. value: jupyterlab required: true- name: PASSWORD displayName: JupyterLab UI Password description: The password/token to access the JupyterLab web UI required: true- name: APPLICATION_IMAGE displayName: Jupyter notebook Docker image value: ghcr.io/maastrichtu-ids/jupyterlab:latest required: true description: You can use any image based on https://github.com/jupyter/docker-stacks- name: STORAGE_SIZE displayName: Storage size description: Size of the storage allocated to the notebook persistent storage in `/home/jovyan`. value: 5Gi required: true We can then refer to those parameters value (filled by the users of the template) in the rest of the template using this syntax: ${APPLICATION_NAME} We will now describe all objects deployed when we instantiate this template (to start an application).","s":"Parameters","u":"/docs/anatomy-of-an-application","h":"#parameters","p":162},{"i":169,"t":"First we define the ImageStream object to import the Docker image(s) of your application(s) on the DSRI cluster Setting the importPolicy: scheduled to true will have the DSRI to automatically check for new version of this image, which can be useful if you want to always have the latest published version of an applications. Visit the OpenShift ImageStreams documentation for more details. Be careful as enabling this feature without real need will cause the DSRI to query DockerHub more, which might require you to login to DockerHub to increase your pull request quota. objects:- kind: \"ImageStream\" apiVersion: image.openshift.io/v1 metadata: name: ${APPLICATION_NAME} labels: app: ${APPLICATION_NAME} spec: tags: - name: latest from: kind: DockerImage name: ${APPLICATION_IMAGE} importPolicy: scheduled: true lookupPolicy: local: true","s":"Image","u":"/docs/anatomy-of-an-application","h":"#image","p":162},{"i":171,"t":"Then we define the PersistentVolumeClaim, which is a persistent storage on which we will mount the /home/jovyan folder to avoid loosing data if our application is restarted. Any file outside of a persistent volume can be lost at any moment if the pod restart, usually it only consists in temporary file if you are properly working in the persistent volume folder. This can be useful also if your application is crashing, stopping and restarting your pod (application) might fix it. - kind: \"PersistentVolumeClaim\" apiVersion: \"v1\" metadata: name: ${APPLICATION_NAME} labels: app: ${APPLICATION_NAME} spec: accessModes: - \"ReadWriteMany\" resources: requests: storage: ${STORAGE_SIZE}","s":"Create storage","u":"/docs/anatomy-of-an-application","h":"#create-storage","p":162},{"i":173,"t":"Then the Secret to store the password - kind: \"Secret\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: ${APPLICATION_NAME} stringData: application-password: \"${PASSWORD}\"","s":"Secret","u":"/docs/anatomy-of-an-application","h":"#secret","p":162},{"i":175,"t":"Then the DeploymentConfig (aka. Deployment) define how to deploy the JupyterLab image, if you want to deploy another application alongside JupyterLab you can do it by adding as many deployments as you want! (and use the same, or different, persistent volume claims for storage). Checkout the OpenShift Deployments documentation for more details. In this first block we will define the strategy to update and recreate our applications if you change the YAML configuration, or when a new latest docker image is updated, allowing your service to always use the latest up-to-date version of a software without any intervention from you. We chose the Recreate release option to make sure the container is properly recreated and avoid unnecessary resources consumption, but you can also use Rolling to have a downtime free transition between deployments. - kind: \"DeploymentConfig\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: \"${APPLICATION_NAME}\" spec: replicas: 1 strategy: type: \"Recreate\" triggers: - type: \"ConfigChange\" - type: \"ImageChange\" imageChangeParams: automatic: true containerNames: - jupyter-notebook from: kind: ImageStreamTag name: ${APPLICATION_NAME}:latest selector: app: \"${APPLICATION_NAME}\" deploymentconfig: \"${APPLICATION_NAME}\"","s":"Deployment","u":"/docs/anatomy-of-an-application","h":"#deployment","p":162},{"i":177,"t":"Then we define the spec of the pod that will be deployed by this DeploymentConfig. Setting the serviceAccountName: anyuid is required for most Docker containers as it allows to run a container using any user ID (e.g. root). Otherwise OpenShift expect to use a random user ID, which is require to build the Docker image especially to work with random user IDs. We then create the containers: array which is where we will define the containers deployed in the pod. It is recommended to deploy 1 container per pod, as it enables a better separation and management of the applications, apart if you know what you are doing. You can also provide the command to run at the start of the container to overwrite the default one, and define the exposed ports (here 8080). template: metadata: labels: app: \"${APPLICATION_NAME}\" deploymentconfig: \"${APPLICATION_NAME}\" spec: serviceAccountName: \"anyuid\" containers: - name: \"jupyter-notebook\" image: \"${APPLICATION_NAME}:latest\" command: - \"start-notebook.sh\" - \"--no-browser\" - \"--ip=0.0.0.0\" ports: - containerPort: 8888 protocol: TCP","s":"Pod spec","u":"/docs/anatomy-of-an-application","h":"#pod-spec","p":162},{"i":179,"t":"Then define the environment variables used in your container, usually the password and most parameters are set here, such as enabling sudo in the container. env: - name: JUPYTER_TOKEN valueFrom: secretKeyRef: key: \"application-password\" name: \"${APPLICATION_NAME}\" - name: JUPYTER_ENABLE_LAB value: \"yes\" - name: GRANT_SUDO value: \"yes\"","s":"Environment variables in the container","u":"/docs/anatomy-of-an-application","h":"#environment-variables-in-the-container","p":162},{"i":181,"t":"Then we need to mount the previously created PersistentVolume on /home/jovyan , the workspace of JupyterLab. Be careful: volumeMounts is in the containers: object, and volumes is defined in the spec: object volumeMounts: - name: data mountPath: \"/home/jovyan\" volumes: - name: data persistentVolumeClaim: claimName: \"${APPLICATION_NAME}\"","s":"Mount storage","u":"/docs/anatomy-of-an-application","h":"#mount-storage","p":162},{"i":183,"t":"Then we define the securityContext to allow JupyterLab to run as root, this is not required for most applications, just a specificity of the official Jupyter images to run with root privileges. securityContext: runAsUser: 0 supplementalGroups: - 100 automountServiceAccountToken: false","s":"Security context","u":"/docs/anatomy-of-an-application","h":"#security-context","p":162},{"i":185,"t":"Then we create the Service to expose the port 8888 of our JupyterLab container on the project network. This means that the JupyterLab web UI will reachable by all other application deployed in your project using its application name as hostname (e.g. jupyterlab) - kind: \"Service\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: ${APPLICATION_NAME} spec: ports: - name: 8888-tcp protocol: TCP port: 8888 targetPort: 8888 selector: app: ${APPLICATION_NAME} deploymentconfig: \"${APPLICATION_NAME}\" type: ClusterIP","s":"Service","u":"/docs/anatomy-of-an-application","h":"#service","p":162},{"i":187,"t":"Finally, we define the Route which will automatically generate a URL for the service of your application based following this template: APPLICATION_NAME-PROJECT_ID-DSRI_URL - kind: \"Route\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: ${APPLICATION_NAME} spec: host: '' to: kind: Service name: \"${APPLICATION_NAME}\" weight: 100 port: targetPort: 8888-tcp tls: termination: edge insecureEdgeTerminationPolicy: Redirect","s":"Route","u":"/docs/anatomy-of-an-application","h":"#route","p":162},{"i":189,"t":"Here is a complete file to describe the JupyterLab deployment template, you can add it to your project catalog by going to +Add in the DSRI web UI, then click on the option to add a YAML file content, and copy paste the template YAML. ---kind: TemplateapiVersion: template.openshift.io/v1labels: template: jupyterlab-rootmetadata: name: jupyterlab-root annotations: openshift.io/display-name: JupyterLab description: |- Start JupyterLab images as the `jovyan` user, with sudo privileges to install anything you need. 📂 Use the `/home/jovyan` folder (workspace of the JupyterLab UI) to store your data in the persistent storage automatically created You can find the persistent storage in the DSRI web UI, go to Administrator view > Storage > Persistent Volume Claims You can use any image based on the official Jupyter docker stack https://github.com/jupyter/docker-stacks - jupyter/tensorflow-notebook - jupyter/r-notebook - jupyter/all-spark-notebook - ghcr.io/maastrichtu-ids/jupyterlab (with Java and SPARQL kernels) Or build your own! Checkout https://github.com/MaastrichtU-IDS/jupyterlab for an example of custom image Once JupyterLab is deployed you can install any pip packages, JupyterLab extensions, and apt packages. iconClass: icon-python tags: python,jupyter,notebook openshift.io/provider-display-name: Institute of Data Science, UM openshift.io/documentation-url: https://maastrichtu-ids.github.io/dsri-documentation/docs/deploy-jupyter openshift.io/support-url: https://maastrichtu-ids.github.io/dsri-documentation/help parameters:- name: APPLICATION_NAME displayName: Name for the application description: Must be without spaces (use -), and unique in the project. value: jupyterlab required: true- name: PASSWORD displayName: JupyterLab UI Password description: The password/token to access the JupyterLab web UI required: true- name: APPLICATION_IMAGE displayName: Jupyter notebook Docker image value: ghcr.io/maastrichtu-ids/jupyterlab:latest required: true description: You can use any image based on https://github.com/jupyter/docker-stacks- name: STORAGE_SIZE displayName: Storage size description: Size of the storage allocated to the notebook persistent storage in `/home/jovyan`. value: 5Gi required: true objects:- kind: \"ImageStream\" apiVersion: image.openshift.io/v1 metadata: name: ${APPLICATION_NAME} labels: app: ${APPLICATION_NAME} spec: tags: - name: latest from: kind: DockerImage name: ${APPLICATION_IMAGE} lookupPolicy: local: true- kind: \"PersistentVolumeClaim\" apiVersion: \"v1\" metadata: name: ${APPLICATION_NAME} labels: app: ${APPLICATION_NAME} spec: accessModes: - \"ReadWriteMany\" resources: requests: storage: ${STORAGE_SIZE}- kind: \"Secret\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: ${APPLICATION_NAME} stringData: application-password: \"${PASSWORD}\"- kind: \"DeploymentConfig\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: \"${APPLICATION_NAME}\" spec: replicas: 1 strategy: type: Recreate triggers: - type: ConfigChange - type: ImageChange imageChangeParams: automatic: true containerNames: - jupyter-notebook from: kind: ImageStreamTag name: ${APPLICATION_NAME}:latest selector: app: \"${APPLICATION_NAME}\" deploymentconfig: \"${APPLICATION_NAME}\" template: metadata: labels: app: \"${APPLICATION_NAME}\" deploymentconfig: \"${APPLICATION_NAME}\" spec: serviceAccountName: \"anyuid\" containers: - name: jupyter-notebook image: \"${APPLICATION_NAME}:latest\" command: - \"start-notebook.sh\" - \"--no-browser\" - \"--ip=0.0.0.0\" ports: - containerPort: 8888 protocol: TCP env: - name: \"JUPYTER_TOKEN\" valueFrom: secretKeyRef: key: application-password name: \"${APPLICATION_NAME}\" - name: JUPYTER_ENABLE_LAB value: \"yes\" - name: GRANT_SUDO value: \"yes\" volumeMounts: - name: data mountPath: \"/home/jovyan\" volumes: - name: data persistentVolumeClaim: claimName: \"${APPLICATION_NAME}\" securityContext: runAsUser: 0 supplementalGroups: - 100 automountServiceAccountToken: false- kind: \"Service\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: ${APPLICATION_NAME} spec: ports: - name: 8888-tcp protocol: TCP port: 8888 targetPort: 8888 selector: app: ${APPLICATION_NAME} deploymentconfig: \"${APPLICATION_NAME}\" type: ClusterIP- kind: \"Route\" apiVersion: v1 metadata: name: \"${APPLICATION_NAME}\" labels: app: ${APPLICATION_NAME} spec: host: '' to: kind: Service name: \"${APPLICATION_NAME}\" weight: 100 port: targetPort: 8888-tcp tls: termination: edge insecureEdgeTerminationPolicy: Redirect","s":"The complete application","u":"/docs/anatomy-of-an-application","h":"#the-complete-application","p":162},{"i":191,"t":"This practice is more advanced, and is not required for most deployments, but you can easily create a ConfigMap object to define any file to be provided at runtime to the application. For example here we are going to define a python script that will be run when starting JupyterLab (jupyter_notebook_config.py). It will clone the git repository URL, provided by the user when creating the template, at the start of JupyterLab in the workspace. If this repo contains files with list of packages in the root folder (requirements.txt and packages.txt), they will be installed at start - kind: ConfigMap apiVersion: v1 metadata: name: \"${APPLICATION_NAME}-cfg\" labels: app: \"${APPLICATION_NAME}\" data: # Clone git repo, then install requirements.txt and packages.txt jupyter_notebook_config.py: | import os git_url = os.environ.get('GIT_URL') home_dir = os.environ.get('HOME') os.chdir(home_dir) if git_url: repo_id = git_url.rsplit('/', 1)[-1] os.system('git clone --quiet --recursive ' + git_url) os.chdir(repo_id) if os.path.exists('packages.txt'): os.system('sudo apt-get update') os.system('cat packages.txt | xargs sudo apt-get install -y') if os.path.exists('requirements.txt'): os.system('pip install -r requirements.txt') os.chdir(home_dir) We will then need to mount this config file like a persistent volume in the path we want it to be (here /etc/jupyter/openshift), change the volumes and volumeMounts of your DeploymentConfig: volumeMounts: - name: data mountPath: \"/home/jovyan\" - name: configs mountPath: \"/etc/jupyter/openshift\" automountServiceAccountToken: false volumes: - name: data persistentVolumeClaim: claimName: \"${APPLICATION_NAME}\" - name: configs configMap: name: \"${APPLICATION_NAME}-cfg\" Then change the jupyter-notebook container start command to include this config file: command: - \"start-notebook.sh\" - \"--no-browser\" - \"--ip=0.0.0.0\" - \"--config=/etc/jupyter/openshift/jupyter_notebook_config.py\" Add the optional parameter to get the git URL to clone when the user create the template: parameters:- name: GIT_URL displayName: URL of the git repository to clone (optional) required: false description: Source code will be automatically cloned, then requirements.txt and packages.txt content will be automatically installed if presents Finally, add the git URL parameter provided by the user as environment variable of the container, so that it is picked up by the config script when running at the start of JupyterLab: env: - name: GIT_URL value: \"${GIT_URL}\"","s":"Add a configuration file","u":"/docs/anatomy-of-an-application","h":"#add-a-configuration-file","p":162},{"i":193,"t":"You can add readiness and liveness probes to a container to automatically check if the web application is up and ready. This will allow to wait for the JupyterLab web UI to be accessible before showing the application as ready in the Topology. Useful if you are cloning a repository and installing packages, which will take more time to start JupyterLab. containers: - name: jupyter-notebook readinessProbe: tcpSocket: port: 8888 livenessProbe: initialDelaySeconds: 15 tcpSocket: port: 8888 failureThreshold: 40 periodSeconds: 10 timeoutSeconds: 2 Checkout the OpenShift Application health documentation for more details.","s":"Add automated health checks","u":"/docs/anatomy-of-an-application","h":"#add-automated-health-checks","p":162},{"i":195,"t":"You can also define resources request and limits for each DeploymentConfig, in spec: spec: resources: requests: cpu: \"1\" memory: \"2Gi\" limits: cpu: \"128\" memory: \"300Gi\"","s":"Define resource limits","u":"/docs/anatomy-of-an-application","h":"#define-resource-limits","p":162},{"i":197,"t":"The easiest way to build a template for a new application is to start from this JupyterLab template: Replace jupyterlab-root by your application name Replace 8888 by your application Change the template and parameters descriptions to match your application Remove the securityContext part, and other objects you do not need If you need to start multiple containers, copy/paste the objects you need to create and edit them","s":"Build your own application template","u":"/docs/anatomy-of-an-application","h":"#build-your-own-application-template","p":162},{"i":199,"t":"Deploy applications Data Science catalog JupyterHub","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":201,"t":"Before you begin download the config.yaml Download the preconfigured config.yaml from our GitHub repository. The default config that is provided by JupyterHub will not work.","s":"Downloading and adjusting the config.yaml","u":"/docs/deploy-jupyterhub","h":"#downloading-and-adjusting-the-configyaml","p":198},{"i":204,"t":"Persistent volumes​ Persistent volumes are automatically created for each user and instance started in JupyterHub to ensure persistence of the data even JupyterHub is stopped. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. It is possible to change the default size of a persistent volume claim for a user in the config.yaml. In our config.yaml the default value is 2Gi. However if you think that your users will need more storage space you can change this default size in the config.yaml. singleuser: # ... storage: capacity: 2Gi","s":"Setting user's default persistent volume size","u":"/docs/deploy-jupyterhub","h":"#setting-users-default-persistent-volume-size","p":198},{"i":206,"t":"At the moment we support three different authentication methods. One for testing purposes (dummy authenthication), one for people who are working alone in a JupyterHub instance or with one or two collaborators (allowed_users / admin_users authenthication), and one for allowing groups of people to collaborate in the same JupyterHub instance (GitHub OAuth). By default the dummy authentication is set in the config.yaml. Note that this is only for testing purposes!!! However, with very few changes to the config.yaml you can set up the other authentication methods. For reference see the zero2jupyterhub documentation about authentication methods Dummy authentication​ This authentication method is set by default and is only there so that you can easily test your JupyterHub instance without the need of setting up proper authentication. The catch with this method is that whatever username/password combination you fill in, you will get access! In other words this is completely not safe to use in usecases other than testing! In the config.yaml you see -besides the commented out other authentication methods- the following block of text: hub: # ... config: JupyterHub: admin_access: true authenticator_class: dummy Some parts are intentionally left out here, shown as dots # ... for better representation. If you are first setting up your JupyterHub instance you can leave this as is. Upon going to your instance via the URL you will get prompted with a login screen:","s":"Configuring an authentication method","u":"/docs/deploy-jupyterhub","h":"#configuring-an-authentication-method","p":198},{"i":208,"t":"Fill in any usernamer and password combination you would like and the useraccount will be made. Note that this useraccount really is made and has its own userpod in the deployment. It has a persistent volume as well and all other properties like any other useraccount that will be made. However you can use whatever password you will fill in to access this account. In other words do not use this user actively and definitely do not store any (sensitive) data in this useraccount! allow_users / admin_users authentication​ If you will be working on your own in your JupyterHub instance it will be easiest to use the allow_users / admin_users authentication method. This method will let you specify an user and admin account with a shared password. It is important that you keep this password a secret and safe! If people will get their hands on this they can acces your JupyterHub instance and login as an admin, which can lead to hefty consequences. If you want to make use of this config uncomment the following block of text and comment out the previous block of text seen at the Dummy authentication section above: hub: # ... config: Authenticator: admin_users: - admin allowed_users: - user1 DummyAuthenticator: password: a-shared-secret-password JupyterHub: authenticator_class: dummy Note that this password is in plaintext in your config.yaml. Do not use password you use for other accounts, this is never a good idea and is surely not a good idea in this case! Unfortunately it is not possible to set passwords in JupyterHub using secrets in the DSRI at the moment. If you need to share your JupyterHub instance with others we recommend you to use the GitHub OAuth authentication method described below. GitHub OAuth authentication​ This authentication method is the most secure option we provide at the moment. The major caveat is that you and the people you want to collaborate with need a GitHub account. Moreover, you will need to create an organization and team within that organization, or have access to an organization and team. You grant the people authorization to log in into the JupyterHub instance with their GitHub account by adding them to a team in an organization in GitHub. hub: # ... config: GitHubOAuthenticator: client_id: your-client-id client_secret: your-client-secret oauth_callback_url: https://-.apps.dsri2.unimaas.nl/hub/oauth_callback JupyterHub: authenticator_class: github For creating an OAuth app in GitHub please refer to GitHub's documentation.. The GitHub OAuth app will provide your client ID and client secret. The and you provided yourself in the previous steps, fill those in accordingly. To set up an organization and team, please refer to GitHub's documentation. as well.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":211,"t":"Before you begin download the config.yaml Download the preconfigured config.yaml from our GitHub repository. The default config that is provided by JupyterHub will not work.","s":"Deploying JupyterHub using the DSRI website 🪐","u":"/docs/deploy-jupyterhub","h":"#deploying-jupyterhub-using-the-dsri-website-","p":198},{"i":213,"t":"After you have created a project you can start with installing the JupyterHub Helm Chart. If you do not have access to DSRI or created a project yet, and you need to find out how, please refer to our documentation. Helm Chart already available The Helm Chart should be already made available for everyone to use on the DSRI platform. There will be no need to install the repository yourself.","s":"Installing the JupyterHub Helm Chart repository","u":"/docs/deploy-jupyterhub","h":"#installing-the-jupyterhub-helm-chart-repository","p":198},{"i":215,"t":"In Developer mode in your project, go to Helm in the sidepanel (1). Next, click on Create and choose Repository (2).","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":217,"t":"Then fill in the Name, Display Name, give it a Description and fill in the URL: https://hub.jupyter.org/helm-chart/.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":219,"t":"Next, click Create.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":222,"t":"info At the moment the latest -and only- Helm Chart version which is supported by DSRI is version 3.3.8. Newer versions will not work, and older versions are not tested and/or configured!","s":"Installing the JupyterHub Helm Chart","u":"/docs/deploy-jupyterhub","h":"#installing-the-jupyterhub-helm-chart","p":198},{"i":224,"t":"In Developer mode in your project, go to Helm in the sidepanel (1). Next, click on Create and choose Helm Release (2)","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":226,"t":"Search for jupyterhub (or the name you gave the repository if you added the repository yourself), and choose the JupyterHub Helm Chart (1).","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":228,"t":"Click Create.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":230,"t":"Click the Chart version drop down menu (1).","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":232,"t":"And choose the right Chart version: 3.3.8 (1). Note that this is an important step, as we only support version 3.3.8 at the moment. Newer versions do not work yet and older versions we did not configure and/or test!","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":234,"t":"Now, change the config with the content of the config.yaml you have downloaded from our GitHub repository. Copy the content of the config.yaml and paste it in the highlighted box to replace the old with the new config. Click Create to install the JupyterHub Helm Chart.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":237,"t":"Create a secured route, with TLS edge termination. In Developer mode in your project, go to Project in the sidepanel (1). Next, click on Route (2).","s":"Creating a secured route","u":"/docs/deploy-jupyterhub","h":"#creating-a-secured-route","p":198},{"i":239,"t":"Next, click Create.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":241,"t":"Fill in the Name (1), choose the Service: proxy-public (2), choose the Target Port: 80 -> http (TCP) (3), tick the box Secure Route (4), and finally choose TLS Termination: Edge (5). Next, click Create, to create the route.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":243,"t":"You can upgrade your config.yaml easily in the DSRI web UI if you would like to change certain settings, such as user's default persistent volume claims, authentication methods, and many more things. Note that in some cases users who created an account with an old authentication method will still have access via that method, make sure you set up your preferred authentication method before allowing users to authenticate and use the JupyterHub instance.","s":"Upgrading the config.yaml","u":"/docs/deploy-jupyterhub","h":"#upgrading-the-configyaml","p":198},{"i":245,"t":"In Developer mode in your project, go to Helm in the sidepanel (1). Next, click on your Helm Chart Release (2).","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":247,"t":"Now, click the Actions drop down menu, and choose Upgrade (1).","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":249,"t":"In the box -highlighted in the picutre below- you can make changes to the config.yaml. After you have made your changes, click Upgrade and your upgraded JupyterHub Helm Chart Release will automatically be deployed.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":251,"t":"Configure JupyterHub Feel free to submit a ticket to ask for help configuring your JupyterHub.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":254,"t":"Before you begin download the config.yaml Download the preconfigured config.yaml from our GitHub repository. The default config that is provided by JupyterHub will not work.","s":"Deploying JupyterHub using the Command Line Interface (CLI) 🪐","u":"/docs/deploy-jupyterhub","h":"#deploying-jupyterhub-using-the-command-line-interface-cli-","p":198},{"i":256,"t":"After you have created a project you can start with installing the JupyterHub Helm Chart. If you do not have access to DSRI or created a project yet, and you need to find out how, please refer to our documentation. Helm Chart already available The Helm Chart should be already made available for everyone to use on the DSRI platform. There will be no need to install the repository yourself.","s":"Installing the JupyterHub Helm Chart repository","u":"/docs/deploy-jupyterhub","h":"#installing-the-jupyterhub-helm-chart-repository-1","p":198},{"i":258,"t":"Add the JupyterHub Helm Chart repository: helm repo add jupyterhub https://hub.jupyter.org/helm-chart/helm repo update","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":260,"t":"info At the moment the latest -and only- Helm Chart version which is supported by DSRI is version 3.3.8. Newer versions will not work, and older versions are not tested and/or configured!","s":"Installing the JupyterHub Helm Chart","u":"/docs/deploy-jupyterhub","h":"#installing-the-jupyterhub-helm-chart-1","p":198},{"i":262,"t":"Make sure you use the right config.yaml downloaded from our GitHub repository. Install the Helm Chart using the following command: helm upgrade --cleanup-on-fail \\ --install jupyterhub jupyterhub/jupyterhub \\ --version=3.3.8 \\ --namespace= \\ --values config.yaml is the name of the namespace your project is in.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":264,"t":"Create a secured route, with TLS edge termination: oc create route edge --namespace --service=proxy-public --port=http is the name of the namespace your project is in. is the name of the route.","s":"Creating a secured route","u":"/docs/deploy-jupyterhub","h":"#creating-a-secured-route-1","p":198},{"i":266,"t":"Run the following command with your new config.yaml: helm upgrade --cleanup-on-fail \\ --install jupyterhub jupyterhub/jupyterhub \\ --version=3.3.8 \\ --namespace= \\ --values config.yaml is the name of the namespace your project is in. Note that the namespace should be the same namespace as the one where your original deployment was initiated!","s":"Upgrading the config.yaml","u":"/docs/deploy-jupyterhub","h":"#upgrading-the-configyaml-1","p":198},{"i":268,"t":"Configure JupyterHub Feel free to submit a ticket to ask for help configuring your JupyterHub.","s":"JupyterHub","u":"/docs/deploy-jupyterhub","h":"","p":198},{"i":270,"t":"Guides Parallelization using Dask","s":"Parallelization using Dask","u":"/docs/dask-tutorial","h":"","p":269},{"i":272,"t":"!pip install \"dask[complete]\" import daskdask.__version__ '2023.5.0' import dask.array as daimport dask.bag as dbimport dask.dataframe as ddimport numpy as npimport pandas as pd","s":"🧊 Installation","u":"/docs/dask-tutorial","h":"#-installation","p":269},{"i":274,"t":"On a high-level, you can think of Dask as a wrapper that extends the capabilities of traditional tools like pandas, NumPy, and Spark to handle larger-than-memory datasets. When faced with large objects like larger-than-memory arrays (vectors) or matrices (dataframes), Dask breaks them up into chunks, also called partitions. For example, consider the array of 12 random numbers in both NumPy and Dask: narr = np.random.rand(12)narr array([0.44236558, 0.00504448, 0.87087911, 0.468925 , 0.37513511, 0.22607761, 0.83035297, 0.07772372, 0.61587933, 0.82861156, 0.66214299, 0.90979423]) darr = da.from_array(narr, chunks=3)darr The image above shows that the Dask array contains four chunks as we set chunks to 3. Under the hood, each chunk is a NumPy array in itself. To fully appreciate the benefits of Dask, we need a large dataset, preferably over 1 GB in size. Consider the autogenerated data from the script below: import string# Set the desired number of rows and columnsnum_rows = 5_000_000num_cols = 10chunk_size = 100_000# Define an empty DataFrame to store the chunksdf_chunks = pd.DataFrame()# Generate and write the dataset in chunksfor i in range(0, num_rows, chunk_size): # Generate random numeric data numeric_data = np.random.rand(chunk_size, num_cols) # Generate random categorical data letters = list(string.ascii_uppercase) categorical_data = np.random.choice(letters, (chunk_size, num_cols)) # Combine numeric and categorical data into a Pandas DataFrame df_chunk = pd.DataFrame(np.concatenate([numeric_data, categorical_data], axis=1)) # Set column names for better understanding column_names = [f'Numeric_{i}' for i in range(num_cols)] + [f'Categorical_{i}' for i in range(num_cols)] df_chunk.columns = column_names # Append the current chunk to the DataFrame holding all chunks df_chunks = pd.concat([df_chunks, df_chunk], ignore_index=True) # Write the DataFrame chunk to a CSV file incrementally if (i + chunk_size) >= num_rows or (i // chunk_size) % 10 == 0: df_chunks.to_csv('large_dataset.csv', index=False, mode='a', header=(i == 0)) df_chunks = pd.DataFrame() dask_df = dd.read_csv(\"large_dataset.csv\")dask_df.head() Even though the file is large, you will notice that the result is fetched almost instantaneously. For even larger files, you can specify the blocksize parameter, which determines the number of bytes to break up the file into. Similar to how Dask Arrays contain chunks of small NumPy arrays, Dask is designed to handle multiple small Pandas DataFrames arranged along the row index.","s":"🪐 Basic Concepts of Dask","u":"/docs/dask-tutorial","h":"#-basic-concepts-of-dask","p":269},{"i":276,"t":"In this example, we're doing some pretty straightforward column operations on our Dask DataFrame, called dask_df. We're adding the values from the column Numeric_0 to the result of multiplying the values from Numeric_9 and Numeric_3. We store the outcome in a variable named result. result = ( dask_df[\"Numeric_0\"] + dask_df[\"Numeric_9\"] * dask_df[\"Numeric_3\"])result.compute().head() As we’ve mentioned, Dask is a bit different from traditional computing tools in that it doesn't immediately execute these operations. Instead, it creates a kind of 'plan' called a task graph to carry out these operations later on. This approach allows Dask to optimize the computations and parallelize them when needed. The compute() function triggers Dask to finally perform these computations, and head() just shows us the first few rows of the result.","s":"✨ Selecting columns and element-wise operations","u":"/docs/dask-tutorial","h":"#-selecting-columns-and-element-wise-operations","p":269},{"i":278,"t":"Now, let's look at how Dask can filter data. We're selecting rows from our DataFrame where the value in the \"Categorical_5\" column is \"A\". This filtering process is similar to how you'd do it in pandas, but with a twist - Dask does this operation lazily. It prepares the task graph for this operation but waits to execute it until we call compute(). When we run head(), we get to see the first few rows of our filtered DataFrame. dask_df[dask_df[\"Categorical_5\"] == \"A\"].compute().head()","s":"⚡️ Conditional filtering","u":"/docs/dask-tutorial","h":"#️-conditional-filtering","p":269},{"i":280,"t":"Next, we're going to generate some common summary statistics using Dask's describe() function. It gives us a handful of descriptive statistics for our DataFrame, including the mean, standard deviation, minimum, maximum, and so on. As with our previous examples, Dask prepares the task graph for this operation when we call describe(), but it waits to execute it until we call compute(). dask_df.describe().compute() dask_df[\"Categorical_3\"].value_counts().compute().head() We also use value_counts() to count the number of occurrences of each unique value in the \"Categorical_3\" column. We trigger the operation with compute(), and head() shows us the most common values.","s":"✨ Common summary statistics","u":"/docs/dask-tutorial","h":"#-common-summary-statistics","p":269},{"i":282,"t":"Finally, let's use the groupby() function to group our data based on values in the \"Categorical_8\" column. Then we select the \"Numeric_7\" column and calculate the mean for each group. This is similar to how you might use ‘groupby()’ in pandas, but as you might have guessed, Dask does this lazily. We trigger the operation with compute(), and head() displays the average of the \"Numeric_7\" column for the first few groups. dask_df.groupby(\"Categorical_8\")[\"Numeric_7\"].mean().compute().head()","s":"✨ Groupby","u":"/docs/dask-tutorial","h":"#-groupby","p":269},{"i":284,"t":"Now, let’s explore the use of the compute function at the end of each code block. Dask evaluates code blocks in lazy mode compared to Pandas’ eager mode, which returns results immediately. To draw a parallel in cooking, lazy evaluation is like preparing ingredients and chopping vegetables in advance but only combining them to cook when needed. The compute function serves that purpose. In contrast, eager evaluation is like throwing ingredients into the fire to cook as soon as they are ready. This approach ensures everything is ready to serve at once. Lazy evaluation is key to Dask’s excellent performance as it provides: Reduced computation. Expressions are evaluated only when needed (when compute is called), avoiding unnecessary intermediate results that may not be used in the final result. Optimal resource allocation. Lazy evaluation avoids allocating memory or processing power to intermediate results that may not be required. Support for large datasets. This method processes data elements on-the-fly or in smaller chunks, enabling efficient utilization of memory resources. When the results of compute are returned, they are given as Pandas Series/DataFrames or NumPy arrays instead of native Dask DataFrames. type(dask_df) dask.dataframe.core.DataFrame type( dask_df[[\"Numeric_5\", \"Numeric_6\", \"Numeric_7\"]].mean().compute()) pandas.core.series.Series The reason for this is that most data manipulation operations return only a subset of the original dataframe, taking up much smaller space. So, there won’t be any need to use parallelism of Dask, and you continue the rest of your workflow either in pandas or NumPy. 🪐 Dask Bags and Dask Delayed for Unstructured Data​ Dask Bags and Dask Delayed are two components of the Dask library that provide powerful tools for working with unstructured or semi-structured data and enabling lazy evaluation. While in the past, tabular data was the most common, today’s datasets often involve unstructured files such as images, text files, videos, and audio. Dask Bags provides the functionality and API to handle such unstructured files in a parallel and scalable manner. For example, let’s consider a simple illustration: # Create a Dask Bag from a list of stringsb = db.from_sequence([\"apple\", \"banana\", \"orange\", \"grape\", \"kiwi\"])# Filter the strings that start with the letter 'a'filtered_strings = b.filter(lambda x: x.startswith(\"a\"))# Map a function to convert each string to uppercaseuppercase_strings = filtered_strings.map(lambda x: x.upper())# Compute the result as a listresult = uppercase_strings.compute()print(result) ['APPLE'] In this example, we create a Dask Bag b from a list of strings. We then apply operations on the Bag to filter the strings that start with the letter 'a' and convert them to uppercase using the filter() and map() functions, respectively. Finally, we compute the result as a list using the compute() method and print the output. Now imagine that you can perform even more complex operations on billions of similar strings stored in a text file. Without the lazy evaluation and parallelism offered by Dask Bags, you would face significant challenges. As for Dask Delayed, it provides even more flexibility and introduces lazy evaluation and parallelism to various other scenarios. With Dask Delayed, you can convert any native Python function into a lazy object using the @dask.delayed decorator. Here is a simple example: %%timeimport time@dask.delayeddef process_data(x): # Simulate some computation time.sleep(1) return x**2# Generate a list of inputsinputs = range(1000)# Apply the delayed function to each inputresults = [process_data(x) for x in inputs]# Compute the results in parallelcomputed_results = dask.compute(*results) CPU times: user 260 ms, sys: 68.1 ms, total: 328 msWall time: 32.2 s In this example, we define a function process_data decorated with @dask.delayed. The function simulates some computational work by sleeping for 1 second and then returning the square of the input value. Without parallelism, performing this computation on 1000 inputs would have taken more than 1000 seconds. However, with Dask Delayed and parallel execution, the computation only took about 42.1 seconds. This example demonstrates the power of parallelism in reducing computation time by efficiently distributing the workload across multiple cores or workers. That’s what parallelism is all about. for more information see https://docs.dask.org/en/stable/","s":"⚡️ Lazy evaluation","u":"/docs/dask-tutorial","h":"#️-lazy-evaluation","p":269},{"i":286,"t":"Deploy applications Data Science catalog Matlab","s":"Matlab","u":"/docs/deploy-matlab","h":"","p":285},{"i":288,"t":"Start Matlab with a desktop UI accessible directly using your web browser at a URL automatically generated. Go to the Catalog, make sure Templates are displayed (box checked), and search for Matlab, and provide the right parameters: You will need to provide the password you will use to access the Matlab UI when filling the template. You can also change the Matlab image version, see the latest version released in the official Matlab Docker image documentation Once Matlab start you can access it through 2 routes (URL), which can be accessed when clicking on the Matlab node in the Topology: The main matlab route to access Matlab desktop UI directly in your web browser. It is recommended to use this route. The matlab-vnc route can be used to access Matlab using a VNC client (you will need to use the full URL to your Matlab VNC route). Only use it if you know what you're doing.","s":"Use the official Matlab image","u":"/docs/deploy-matlab","h":"#use-the-official-matlab-image","p":285},{"i":290,"t":"The official Matlab image is infamous for showing a black screening after a few hours of use, making it a bit cumbersome to be used trustfully. We have a solution if you need to have a more stable Matlab image, that will require a bit more manual operations: Use the Ubuntu with GUI template to setup a Ubuntu pod on the DSRI with the image ghcr.io/vemonet/docker-ubuntu-vnc-desktop:latest Start firefox and browse to https://nl.mathworks.com Login with your personal Matlab account, create one if you don’t have it Choose get matlab and download, the linux matlab version Open a terminal window and run the following commands: sudo apt-get updatesudo apt-get install unzip# Unzip the previous downloaded matlab installation file# start the matlab installation with:sudo .\\install You will then be prompted the Matlab installation process: Fill in your personal matlab account credentials ⚠️ Fill in the username as used in the Ubuntu environment, in your case it will most probably be root (Matlab gives a license error if this is not correct, check with whoami in the terminal when in doubt) Select every Matlab modules you want to be installed Check \"symbolic link\" and \"Improve……\"","s":"Use a stable Matlab image","u":"/docs/deploy-matlab","h":"#use-a-stable-matlab-image","p":285},{"i":292,"t":"You can also use mathworks/jupyter-matlab-proxy. You can easily install it in a JupyterLab image with pip: pip install jupyter-matlab-proxy Follow the instructions on the mathworks/jupyter-matlab-proxy repository to access it.","s":"Use Matlab in Jupyter","u":"/docs/deploy-matlab","h":"#use-matlab-in-jupyter","p":285},{"i":294,"t":"We use the Matlab template in the DSRI catalog to deploy a pre-built Nvidia Matlab Deep Learning Container on CPU or GPU nodes. See the official documentation from MathWorks for more details about this image. Request access to Matlab To be able to access the Matlab on GPU template you will need to ask the DSRI admins to enable it in your project. 2 options are available to connect to your running Matlab pod terminal: Go to the matlab pod page on the DSRI web UI Or connect from your terminal with oc rsh MATLAB_POD_ID Type bash when first accessing to the terminal to have a better experience. Type cd /ContainerDeepLearningData to go in the persistent volume, and use this volume to store all data that should be preserved. Type matlab to access Matlab from the terminal It is possible to access the Matlab desktop UI through VNC and a web UI, but the script to start it in /bin/run.sh seems to face some errors, let us know if you have any luck with this. By default the image run with the matlab user which does not have sudo privilege, you can run the container as root if you need to install packages which require admin privileges.","s":"Deploy Matlab on GPU","u":"/docs/deploy-matlab","h":"#deploy-matlab-on-gpu","p":285},{"i":296,"t":"Follow the instructions at: https://github.com/mathworks-ref-arch/matlab-dockerfile This will require you to retrieve Matlab installation files to build your own container Once all the files have been properly placed in the folder and the license server URL has been set, you can start the build on DSRI by following the documentation to deploy from a Dockerfile License server not available on your laptop If you try to build Matlab directly on your laptop it will most probably fail as your machine might not have access to the license server. You will need to build the Matlab container directly on DSRI with oc start-build Once Matlab deployed, you will need to edit the matlab deployment YAML before it works. Go to Topology, click on the Matlab node, click on the Actions button of the matlab details, and Edit deployment. In the deployment YAML search for spec: which has a containers: as child, and add the following under spec: spec: serviceAccountName: anyuid containers: ... Your Matlab container should now be running! 2 options are available to connect to your running Matlab pod terminal: Go to the matlab pod page on the DSRI web UI Or connect from your terminal with oc rsh MATLAB_POD_ID You can access Matlab from the terminal by running matlab Unfortunately Matlab did not expected their users to need the graphical UI when using Matlab in containers. So only the command line is available by default. You can find more information to enable the Matlab UI in this issue.","s":"Build your own Matlab image","u":"/docs/deploy-matlab","h":"#build-your-own-matlab-image","p":285},{"i":298,"t":"Deploy applications Data Science catalog Spark cluster","s":"Spark cluster","u":"/docs/deploy-spark","h":"","p":297},{"i":300,"t":"Once the DSRI admins have enabled the Spark Operator your project, you should found a Spark Cluster entry in the Catalog (in the Operator Backed category)","s":"Deploy a Spark cluster","u":"/docs/deploy-spark","h":"#deploy-a-spark-cluster","p":297},{"i":302,"t":"Click on the Spark Cluster entry to deploy a Spark cluster. You will be presented a form where you can provide the number of Spark workers in your cluster. Additionally you can provide a label which can be helpful later to manage or delete the cluster, use the name of your application and the label app, e.g.: app=my-spark-cluster Change The number of Spark workers can be easily updated later in the Spark deployment YAML file.","s":"Deploy the cluster from the catalog","u":"/docs/deploy-spark","h":"#deploy-the-cluster-from-the-catalog","p":297},{"i":304,"t":"Once the cluster has been started you can create a route to access the Spark web UI: Go to Search > Click on Resources and search for Route > Click on Route You should now see the routes deployed in your project. Click on the button Create Route Give a short meaningful name to your route, e.g. my-spark-ui Keep Hostname and Path as it is Select the Service corresponding your Spark cluster suffixed with -ui, e.g. my-spark-cluster-ui Select the Target Port of the route, it should be 8080 You can now access the Spark web UI at the generated URL to see which jobs are running and the nodes in your cluster.","s":"Create a route to the Spark dashboard","u":"/docs/deploy-spark","h":"#create-a-route-to-the-spark-dashboard","p":297},{"i":306,"t":"You can now start a spark-enabled JupyterLab, or any other spark-enabled applications, to use the Spark cluster deployed.","s":"Run on Spark","u":"/docs/deploy-spark","h":"#run-on-spark","p":297},{"i":308,"t":"The easiest is to use a Spark-enabled JupyterLab image, such as jupyter/pyspark-notebook But you can also use any image as long as you download the jar file, install all requirements, such as pyspark, and set the right environment variable, such as SPARK_HOME Connect to a Spark cluster deployed in the same project, replace spark-cluster by your Spark cluster name: from pyspark import SparkConf, SparkContextfrom pyspark.sql import SparkSession# Stop existing Spark Contextspark = SparkSession.builder.master(\"spark://spark-cluster:7077\").getOrCreate()spark.sparkContext.stop()# Connect to the Spark clusterconf = SparkConf().setAppName('sansa').setMaster('spark://spark-cluster:7077') sc = SparkContext(conf=conf)# Run basic Spark testx = ['spark', 'rdd', 'example', 'sample', 'example'] y = sc.parallelize(x)y.collect()","s":"Using PySpark","u":"/docs/deploy-spark","h":"#using-pyspark","p":297},{"i":310,"t":"SANSA is a big data engine for scalable processing of large-scale RDF data. SANSA uses Spark, or Flink, which offer fault-tolerant, highly available and scalable approaches to efficiently process massive sized datasets. SANSA provides the facilities for Semantic data representation, Querying, Inference, and Analytics. Use the Zeppelin notebook for Spark template in the catalog to start a Spark-enabled Zeppelin notebook. You can find more information on the Zeppelin image at https://github.com/rimolive/zeppelin-openshift Connect and test Spark in a Zeppelin notebook, replace spark-cluster by your Spark cluster name: %pysparkfrom pyspark import SparkConf, SparkContextfrom pyspark.sql import SparkSession# Stop existing Spark Contextspark = SparkSession.builder.master(\"spark://spark-cluster:7077\").getOrCreate()spark.sparkContext.stop()# Connect to the Spark clusterconf = SparkConf().setAppName('sansa').setMaster('spark://spark-cluster:7077') sc = SparkContext(conf=conf)# Run basic Spark testx = [1, 2, 3, 4, 5] y = sc.parallelize(x)y.collect() You should see the job running in the Spark web UI, kill the job with the kill button in the Spark dashboard. You can now start to run your workload on the Spark cluster Reset a Zeppelin notebook Click on the cranked wheel in the top right of the note: Interpreter binding, and reset the interpreter Use the official SANSA notebooks examples See more examples: https://github.com/rimolive/zeppelin-openshift","s":"RDF analytics with SANSA and Zeppelin notebooks","u":"/docs/deploy-spark","h":"#rdf-analytics-with-sansa-and-zeppelin-notebooks","p":297},{"i":312,"t":"Instructions available at https://github.com/rimolive/ceph-spark-integration Requirements: pip install boto Check the example notebook for Ceph storage","s":"Connect Spark to the persistent storage","u":"/docs/deploy-spark","h":"#connect-spark-to-the-persistent-storage","p":297},{"i":314,"t":"Get all objects part of the Spark cluster, change app=spark-cluster to match your Spark cluster name: oc get all,secret,configmaps --selector app=spark-cluster Then delete the Operator deployment from the OpenShift web UI overview.","s":"Delete a running Spark cluster","u":"/docs/deploy-spark","h":"#delete-a-running-spark-cluster","p":297},{"i":316,"t":"Deploy applications Data Science catalog RStudio","s":"RStudio","u":"/docs/deploy-rstudio","h":"","p":315},{"i":318,"t":"Start a RStudio container based on Rocker RStudio tidyverse images (debian), with sudo privileges to install anything you need (e.g. pip or apt packages) You can start a container using the RStudio template in the Catalog web UI (make sure the Templates checkbox is checked) Provide a few parameters, and Instantiate the template. The username will be rstudio and the password will be what you configure yourself, the DSRI will automatically create a persistent volume to store data you will put in the /home/rstudio folder. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. Official image documentation See the official Docker image documentation for more details about the container deployed.","s":"Start RStudio","u":"/docs/deploy-rstudio","h":"#start-rstudio","p":315},{"i":320,"t":"Start a RStudio application, with a complementary Shiny server, using a regular rstudio user, without sudo privileges. Create the template in your project: In the DSRI web UI, go to + Add, then click on YAML, add the content of the template-rstudio-shiny-restricted.yml file, and validate. You can also do it using the terminal: oc apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/templates/restricted/template-rstudio-shiny-restricted.yml Once the template has been created in your project, use the RStudio with Shiny server template in the OpenShift web UI catalog. It will automatically create a persistent storage for the data. No sudo privileges You will not have sudo privileges in the application.","s":"Restricted RStudio with Shiny server","u":"/docs/deploy-rstudio","h":"#restricted-rstudio-with-shiny-server","p":315},{"i":322,"t":"The fastest way to get started is to use git from the terminal, for example to clone a git repository use git clone You can also check how to enable Git integration in RStudio at https://support.rstudio.com/hc/en-us/articles/200532077 You can run this command to ask git to save your password for 15min: git config credential.helper cache Or store the password/token in a plain text file: git config --global credential.helper 'store --file ~/.git-credentials' Before pushing back to GitHub or GitLab, you will need to configure you username and email in the terminal: git config --global user.name \"Jean Dupont\"git config --global user.email jeandupont@gmail.com Git tip We recommend to use SSH instead of HTTPS connection when possible, checkout here how to generate SSH keys and use them with your GitHub account.","s":"Use Git in RStudio","u":"/docs/deploy-rstudio","h":"#use-git-in-rstudio","p":315},{"i":324,"t":"You can visit this folder that gives all resources and instructions to explain how to run a standalone R job on the DSRI: https://github.com/MaastrichtU-IDS/dsri-demo/tree/main/r-job If you want to run jobs directly from RStudio, checkout this package to run chunks of R code as jobs directly through RStudio: https://github.com/lindeloev/job","s":"Run R jobs","u":"/docs/deploy-rstudio","h":"#run-r-jobs","p":315},{"i":326,"t":"Deploy applications Data Science catalog VisualStudio Code","s":"VisualStudio Code","u":"/docs/deploy-vscode","h":"","p":325},{"i":328,"t":"Start a VisualStudio Code server with the coder user, which has sudo privileges. You can deploy it using the VisualStudio Code server solution in the Catalog web UI (make sure the Templates checkbox is checked) Provide a few parameters, and instantiate the template. The DSRI will automatically create a persistent volume to store data you will put in the /home/coder/project folder. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims.","s":"Start VisualStudio Code server","u":"/docs/deploy-vscode","h":"#start-visualstudio-code-server","p":325},{"i":330,"t":"The easiest way to login and clone a repository from GitHub is to use the built-in authentication system of VisualStudio Code, to do so click on clone repository... in the Welcome page, and follow the instructions in the top of the VisualStudio window. If this solution does not work for you, you can use git from the terminal to clone the git repository with git clone. VisualStudio might ask you to login in the dialog box at the top of the page, enter your username and password when requested. For GitHub you might need to generate a token at https://github.com/settings/tokens to use as password. Once the repository cloned, you can use git from the VSCode web UI to manage your git repositories (add, commit, push changes), or in the terminal. Before committing to GitHub or GitLab, you might need to configure you username and email in VSCode terminal: git config --global user.name \"Jean Dupont\"git config --global user.email jeandupont@gmail.com Save your git password You can run this command to ask git to save your password for 15min: git config credential.helper cache Or store the password in a plain text file: git config --global credential.helper 'store --file ~/.git-credentials' Git tip We recommend to use SSH instead of HTTPS connection when possible, checkout here how to generate SSH keys and use them with your GitHub account.","s":"Use Git in VSCode","u":"/docs/deploy-vscode","h":"#use-git-in-vscode","p":325},{"i":332,"t":"See the Deploy on GPU page to deploy a VisualStudio Code server on GPU.","s":"VSCode for GPU","u":"/docs/deploy-vscode","h":"#vscode-for-gpu","p":325},{"i":334,"t":"Miscellaneous Enabling VPN access in WSL2","s":"Enabling VPN access in WSL2","u":"/docs/enabling-vpn-wsl","h":"","p":333},{"i":336,"t":"Create a file in /etc/wsl.conf: [network] generateResolvConf = false This makes sure that WSL2 does not generate it's own resolv.conf anymore. Edit the file /etc/resolv.conf and add the appropiate nameservers: nameserver 137.120.1.1 nameserver 137.120.1.5 nameserver 8.8.8.8 # OR OF YOUR CHOOSING search unimaas.nl These are all the steps you should take in WSL2. Now you should do the following step after you connected to the VPN. You can run this command in Powershell: Get-NetAdapter | Where-Object {$_.InterfaceDescription -Match \"Cisco AnyConnect\"} | Set-NetIPInterface -InterfaceMetric 6000 you should now be able to verify that WSL2 has connectivity: ping google.com -c 4","s":"Follow these steps in the WSL2 environment:","u":"/docs/enabling-vpn-wsl","h":"#follow-these-steps-in-the-wsl2-environment","p":333},{"i":338,"t":"Guides Glossary","s":"Glossary","u":"/docs/glossary","h":"","p":337},{"i":341,"t":"Kubernetes is a portable, extensible, open-source platform for managing containerized workloads and services, that facilitates both declarative configuration and automation. It has a large, rapidly growing ecosystem. Kubernetes services, support, and tools are widely available. Kubernetes, also known as K8s, is an open-source system for automating deployment, scaling, and management of containerized applications. More Information: https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/","s":"Kubernetes","u":"/docs/glossary","h":"#kubernetes","p":337},{"i":343,"t":"Red Hat OpenShift is a hybrid cloud, enterprise Kubernetes application platform, trusted by 2,000+ organizations. It includes Container host and runtime Enterprise Kubernetes Validated integrations Integrated container registry Developer workflows Easy access to services","s":"OpenShift","u":"/docs/glossary","h":"#openshift","p":337},{"i":345,"t":"OKD is a distribution of Kubernetes optimized for continuous application development and multi-tenant deployment. OKD adds developer and operations-centric tools on top of Kubernetes to enable rapid application development, easy deployment and scaling, and long-term lifecycle maintenance for small and large teams. OKD is a sibling Kubernetes distribution to Red Hat OpenShift OKD 4 Documentation","s":"OKD","u":"/docs/glossary","h":"#okd","p":337},{"i":347,"t":"Deploy applications GPU applications","s":"GPU applications","u":"/docs/deploy-on-gpu","h":"","p":346},{"i":349,"t":"You will first need to start your workspace without the GPU enabled, you can then prepare your experiments: clone the code, download the data, prepare scripts to install all requirements (the workspace will be restarted when you enable the GPU).","s":"Prepare your GPU workspace","u":"/docs/deploy-on-gpu","h":"#prepare-your-gpu-workspace","p":346},{"i":351,"t":"We are mainly using images provided by Nvidia, with all required drivers and optimizations for GPU pre-installed. You can access the workspace with JupyterLab and VisualStudio Code in your browser, and install dependencies with apt-get, conda or pip in the workspace. We currently mainly use Tensorflow, PyTorch and CUDA, but any image available in the Nvidia catalog should be easy to deploy. Checkout this documentation for more details on how we build the optimized docker images for the DSRI GPUs. And feel free to extend the images to install any software you need.","s":"About the docker images","u":"/docs/deploy-on-gpu","h":"#about-the-docker-images","p":346},{"i":353,"t":"You can easily deploy your GPU workspace from the DSRI catalog: Go to the DSRI Catalog web UI: Click on Add to Project, then Browse Catalog Search the catalog for \"GPU\", and make sure the Template checkbox is enabled Choose the template: JupyterLab on GPU Follow the instructions to create the template in the DSRI web UI, all information about the images you can use are provided there. The most notable is the base image you want to use for your workspace (cuda, tensorflow or pytorch) Access the workspace from the route created (the small arrow at the top right of your application bubble in the Topology page).","s":"Deploy the workspace","u":"/docs/deploy-on-gpu","h":"#deploy-the-workspace","p":346},{"i":355,"t":"You can now add your code and data in the persistent folder to be fully prepared when you will get access to the GPUs. You can install dependencies with apt-get, conda or pip. We recommend your to use scripts stored in the persistent folder to easily install all your requirements, so you can reinstall them when we enable the GPU, as it restarts the workspace. For more information on how to use conda/mamba to install new dependencies or complete environment (useful if you need to use a different version of python than the one installed by default) checkout this page. ⚠️ We recommend you to also try and debug your code on small sample using the CPU before getting the GPU, this way you will be able to directly start long running task when you get the GPU, instead of losing time debugging your code (it's probably not going to work on the first try, you know it). You can find more details on the images we use and how to extend them in this repository. Storage Use the /workspace/persistent folder, which is the JupyterLab workspace, to store your code and data persistently. Note that loading data from the persistent storage will be slowly that what you might expected, this is due to the nature of the distributed storage. So try to optimize this part and avoid reloading multiple time your data, and let us know if it is too much of a problem, we have some solution to improve this","s":"Prepare the workspace","u":"/docs/deploy-on-gpu","h":"#prepare-the-workspace","p":346},{"i":357,"t":"You will receive an email when the GPU has been enabled in your project. You can then update your deployment to use the GPUs using either the oc command-line tool, or by editing the deployment configuration from the web UI With the Command Line Interface, run the following command from the terminal of your laptop after having installed the oc command-line tool. We use jupyterlab-gpu as deployment name is in the example, change it to yours if it is different. oc patch dc/jupyterlab-gpu --type=json -p='[{\"op\": \"replace\", \"path\": \"/spec/template/spec/containers/0/resources\", \"value\": {\"requests\": {\"nvidia.com/gpu\": 1}, \"limits\": {\"nvidia.com/gpu\": 1}}}]' Or through the web UI In the Topology view click on the circle representing your GPU application, then click on the Actions button in the top right of the screen, and click on Edit Deployment Config at the bottom of the list In the Deployment Config text editor, hit ctrl + f to search for \"resources\". You should see a line - resources: {} under containers:. You need to change this line to the following to enable GPU in your application (and make sure the indentation match the rest of the file): - resources: requests: nvidia.com/gpu: 1 limits: nvidia.com/gpu: 1 Then wait for the pod to restart, or start it if it was stopped. You can use the following command in the terminal of your container on the DSRI to see the current GPU usage: nvidia-smi Windows When using above command with the oc client on windows you might receive an error like: error: unable to parse \"'[{op:\": yaml: found unexpected end of stream This is because the single quotation mark on windows is handled differently. Try replacing the single quotation marks in the command with double quotation marks and the command should work.","s":"Enable the GPU","u":"/docs/deploy-on-gpu","h":"#enable-the-gpu","p":346},{"i":359,"t":"The GPU allocated to your workspace will be automatically disabled the after your booking ends at 9:00. You can also manually disable the GPU from your app, the pod will be restarted automatically on a CPU node: oc patch dc/jupyterlab-gpu --type=json -p='[{\"op\": \"replace\", \"path\": \"/spec/template/spec/containers/0/resources\", \"value\": {}}]'","s":"Disable the GPU","u":"/docs/deploy-on-gpu","h":"#disable-the-gpu","p":346},{"i":361,"t":"If you have been granted a 2nd GPU to speed up your experiment you can easily upgrade the number of GPU used by your workspace: From the Topology view click on your application: Stop the application, by decreasing the number of pod to 0 (in the Details tab) Click on Options > Edit Deployment > in the YAML of the deployment search for limits and change the number of GPU assigned to your deployment to 2: resources: limits: nvidia.com/gpu: '2' requests: nvidia.com/gpu: '2' You can also do it using the command line, make sure to stop the pod first, and replace jupyterlab-gpu by your app name in this command: oc patch dc/jupyterlab-gpu --type=json -p='[{\"op\": \"replace\", \"path\": \"/spec/template/spec/containers/0/resources\", \"value\": {\"requests\": {\"nvidia.com/gpu\": 2}, \"limits\": {\"nvidia.com/gpu\": 2}}}]' Restart the pod for your application (the same way you stopped it)","s":"Increase the number of GPUs","u":"/docs/deploy-on-gpu","h":"#increase-the-number-of-gpus","p":346},{"i":363,"t":"You can also install the GPU drivers in any image and use this image directly. See the latest official Nvidia docs to install the nvidia-container-runtime, which should contain all packages and drivers required to access the GPU from your application. Here is an example of commands to add to a debian based Dockerfile to install the GPU drivers (note that this is not complete, you will need to check the latest instructions and do some research & development to get it to work): RUN curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | \\ apt-key add - \\ && distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \\ && curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list | RUN apt-get update \\ && apt-get install -y nvidia-container-runtime Then, build your image in your DSRI project using oc from the folder where your put the Dockerfile (replace custom-app-gpu by your app name): oc new-build --name custom-app-gpu --binaryoc start-build custom-app-gpu --from-dir=. --follow --waitoc new-app custom-app-gpu You will then need to edit the deployment to the serviceAccountName: anyuid and add a persistent storage oc edit custom-app-gpu Finally for when your reservation start, checkout the section above about how to enable the GPU in workspace See also: official Nvidia docs for CUDA","s":"Install GPU drivers in any image","u":"/docs/deploy-on-gpu","h":"#install-gpu-drivers-in-any-image","p":346},{"i":365,"t":"Guides Known Issues","s":"Known Issues","u":"/docs/guide-known-issues","h":"","p":364},{"i":367,"t":"Sometimes you cannot access anymore the data you put in the persistent folder of your container. It can be due to a node going down, if the persistent volume your pod is connected to is on this node, then it cannot access it anymore. You can easily fix this issue by restarting the pod of your application, it will make it properly connect to resources on nodes that are up. To restart the pod, go in topology, click on your application, go to the details tab, and decrease the pod count to 0, then put it back up to 1.","s":"Cannot access your data in the persistent folder","u":"/docs/guide-known-issues","h":"#cannot-access-your-data-in-the-persistent-folder","p":364},{"i":369,"t":"Pod or Deployment will not start You could run into a following message in the Events tab that looks similar to this Error: kubelet may be retrying requests that are timing out in CRI-O due to system load. Currently at stage container volume configuration: context deadline exceeded: error reserving ctr name The issue above will occur if you are using a large persistent volume. It can be resolved by adding the following to your Deployment(Config): spec: template: metadata: annotations: io.kubernetes.cri-o.TrySkipVolumeSELinuxLabel: 'true' spec: runtimeClassName: selinux Take note of the indentation and the place in the file! An example of this can be found here:","s":"Large volumes","u":"/docs/guide-known-issues","h":"#large-volumes","p":364},{"i":371,"t":"Spot the issue If the Events tab show this error: --> Scaling filebrowser-case-1 to 1error: update acceptor rejected my-app-1: pods for rc 'my-project/my-app-1' took longer than 600 seconds to become available Then check for the application ImageStream in Build > Images, and you might see this for your application image: Internal error occurred: toomanyrequests: You have reached your pull rate limit.You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit. You can solve this by creating a secret to login to DockerHub in your project: oc create secret docker-registry dockerhub-login --docker-server=docker.io --docker-username=dockerhub_username --docker-password=dockerhub_password --docker-email=example@mail.com Linking the login secret to the default service account: oc secrets link default dockerhub-login --for=pull tip Login to DockerHub should raise the limitations To definitely solve this issue you can publish the DockerHub image to the GitHub Container Registry. Follow those instructions on your laptop: Login to the GitHub Container Registry with docker login. Pull the docker image from docker pull myorg/myimage:latest git@github.com:MaastrichtU-IDS/dsri-documentation.gitgit@github.com:MaastrichtU-IDS/dsri-documentation.gitgit@github.com:MaastrichtU-IDS/dsri-documentation.git Change its tag docker tag myorg/myimage:latest ghcr.io/maastrichtu-ids/myimage:latest Push it back to the GitHub Container Registry: docker push ghcr.io/maastrichtu-ids/myimage:latest Image created automatically If the image does not exist, GitHub will create automatically when you push it for the first time! You can then head to your organization Packages tab to see the package. Make it public By default new images are set as Private, go to your Package Settings, and click Change Visibility to set it as Public, this avoids the need to login to pull the image. You can update the image if you want access to the latest version, you can set a GitHub Actions workflow to do so. Finally you will need to update your DSRI deployment, or template, to use the newly created image on ghcr.io, and redeploy the application with the new template.","s":"DockerHub pull limitations","u":"/docs/guide-known-issues","h":"#dockerhub-pull-limitations","p":364},{"i":373,"t":"Spot the issue If the Events tab show this error: --> cd /usr/local/src/work2/aerius-sample-sequencing/CD4K4ANXXTrinity --seqType fq --max_memory 100G --CPU 64 --samples_file samples.txt --output /usr/local/src/work2/Trinity_output_zip_090221error: The function starts but at some points just exits without warnings or errors to Windows folder DSRI in the container's terminal keep running fine but never finishes. At some point a red label ''disconnected'' appears and the terminal stops and the analysis never continues. Those two issues are due to the process running attach to the terminal Should be able to easily run it using the \"Bash way\": add nohup at the beginning and & at the end It will run in the back and all output that should have gone to the terminal will go to a file nohup.out in the repo nohup Trinity --seqType fq --max_memory 100G --CPU 64 --samples_file samples.txt --output /usr/local/src/work2/Trinity_output_zip_090221 & To check if it is still running: ps aux | grep Trinity Be careful make sure the terminal uses bash and not shell (\"sh\") To use bash just type bash in the terminal: bash","s":"How to run function within a container ''in the background'","u":"/docs/guide-known-issues","h":"#how-to-run-function-within-a-container-in-the-background","p":364},{"i":375,"t":"danger ⚠️ remote: HTTP Basic: Access denied fatal: Authentication failed for It happen every time when we forced to change the Windows password. Apply command from powershell (run as administrator) git config --system --unset credential.helper And then remove gitconfig file from C:\\Program Files\\Git\\mingw64/etc/ location (Note: this path will be different in MAC like \"/Users/username\") After that use git command like git pull or git push, it asked me for username and password. applying valid username and password and git command working. Windows:​ Go to Windows Credential Manager. This is done in a EN-US Windows by pressing the Windows Key and typing 'credential'. In other localized Windows variants you need to use the localized term. alternatively you can use the shortcut control /name Microsoft.CredentialManager in the run dialog (WIN+R) Edit the git entry under Windows Credentials, replacing old password with the new one. Mac:​ cmd+space and type \"KeyChain Access\", You should find a key with the name like \"gitlab.*.com Access Key for user\". You can order by date modified to find it more easily. Right click and delete.","s":"Git authentication issue","u":"/docs/guide-known-issues","h":"#git-authentication-issue","p":364},{"i":377,"t":"Spot the issue If you get 403 forbidden issue while try to upload folders / files or creating new folder / file 403 forbidden Above issue will occur if you are not using the persistent storage. A persistent storage can be created by the DSRI team for a persistent storage of the data. Contact the DSRI team to request a persistent storage. You can find the persistent storage name as below","s":"Filebrowser 403 forbidden","u":"/docs/guide-known-issues","h":"#filebrowser-403-forbidden","p":364},{"i":379,"t":"On this page","s":"Install local OpenShift","u":"/docs/guide-local-install","h":"","p":378},{"i":381,"t":"You will need to set up the virtualization environment before installing MiniShift. Download MiniShift and unzip it. # For Ubuntu 18.04 and oldersudo apt install -y libvirt-bin qemu-kvm# For Ubuntu 18.10 and newer (replace libvirtd by libvirt in next commands)sudo apt install -y qemu-kvm libvirt-daemon libvirt-daemon-system# Create group if does not existsudo addgroup libvirtdsudo adduser $(whoami) libvirtdsudo usermod -a -G libvirtd $(whoami)newgrp libvirtdcurl -L https://github.com/dhiltgen/docker-machine-kvm/releases/download/v0.10.0/docker-machine-driver-kvm-ubuntu16.04 -o /usr/local/bin/docker-machine-driver-kvmsudo chmod +x /usr/local/bin/docker-machine-driver-kvm# Check if libvirtd runningsystemctl is-active libvirtd# Start if inactivesudo systemctl start libvirtd# Copy MiniShift in your pathcp minishift-1.34.1-linux-amd64/minishift /usr/local/bin","s":"Install MiniShift","u":"/docs/guide-local-install","h":"#install-minishift","p":378},{"i":383,"t":"minishift start Get your local OpenShift cluster URL after the command complete.","s":"Start MiniShift","u":"/docs/guide-local-install","h":"#start-minishift","p":378},{"i":385,"t":"Go to your local cluster URL. E.g. https://192.168.42.58:8443/console/catalog. Username: admin or developer Password: anything will work # As adminoc login -u system:admin","s":"Login","u":"/docs/guide-local-install","h":"#login","p":378},{"i":387,"t":"minishift stop","s":"Stop","u":"/docs/guide-local-install","h":"#stop","p":378},{"i":389,"t":"minishift delete -f","s":"Reset","u":"/docs/guide-local-install","h":"#reset","p":378},{"i":392,"t":"For more details: read the official install Kubernetes on Ubuntu tutorial or see the official Ubuntu Kubernetes install documentation. sudo snap install microk8s --classicsudo usermod -a -G microk8s $USER# Restart your machinemkdir -p ~/.kubemicrok8s.kubectl config view --raw > $HOME/.kube/config# Make sure this works for dashboard on Ubuntumicrok8s.enable dashboard dns To do only if kubectl is not already installed on your machine: sudo snap alias microk8s.kubectl kubectl","s":"kubectl on Ubuntu","u":"/docs/guide-local-install","h":"#kubectl-on-ubuntu","p":378},{"i":394,"t":"Included in Docker installation. Use the installer provided by DockerHub. Activate it in Docker Preferences > Kubernetes. For Windows you will need to download the kubectl.exe and place it in your PATH. https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/windows/amd64/kubectl.exe We recommend to create a kubectl directory in C:/ and add this C:/kubectl to the Path environment variable in System properties > Advanced > Environment Variables > Path","s":"kubectl on MacOS & Windows","u":"/docs/guide-local-install","h":"#kubectl-on-macos--windows","p":378},{"i":396,"t":"# Install Kubernetes UIkubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yamlkubectl apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/d2s-core/master/argo/roles/kube-dashboard-adminuser-sa.ymlkubectl apply -f https://raw.githubusercontent.com/MaastrichtU-IDS/d2s-core/master/argo/roles/kube-adminuser-rolebinding.yml# Get the Token to access the dashboardkubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')# Windows user will need to execute the 2 commands manually:kubectl -n kube-system get secret # And get the token containing 'admin-user'kubectl -n kube-system describe secret# For Windows: give the anonymous user global accesskubectl create clusterrolebinding cluster-system-anonymous --clusterrole=admin --user=system:anonymous# Note: this could be improved. I think only the Dashboard UI didn't have the required permissions.# Finally, start the web UI, and chose the Token connectionkubectl proxy Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ And provide the previously obtained token. Warning: you will need to save the token to login again next time (use the password save from your browser if possible).","s":"Install the Dashboard UI","u":"/docs/guide-local-install","h":"#install-the-dashboard-ui","p":378},{"i":398,"t":"kubectl should be running at start. Just restart the web UI kubectl proxy Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/","s":"Run kubectl","u":"/docs/guide-local-install","h":"#run-kubectl","p":378},{"i":400,"t":"Debug DNS on Ubuntu microk8s.enable dns Restart your machine. You might need to change your firewall configuration On Ubuntu sudo ufw allow in on cni0sudo ufw allow out on cni0sudo ufw default allow routed Try to connect to the internet from Kubernetes with the test-busybox pod. kubectl exec -ti busybox -- /bin/shping google.com","s":"Enable internet","u":"/docs/guide-local-install","h":"#enable-internet","p":378},{"i":402,"t":"# Create volumekubectl apply -n argo -f d2s-core/argo/storage/storage-mac.yaml Not working at the moment.","s":"Create persistent volume","u":"/docs/guide-local-install","h":"#create-persistent-volume","p":378},{"i":404,"t":"Clean uninstall before 2.2. kubectl get cm workflow-controller-configmap -o yaml -n kube-system --export | kubectl apply -n argo -f -kubectl delete -n kube-system cm workflow-controller-configmapkubectl delete -n kube-system deploy workflow-controller argo-uikubectl delete -n kube-system sa argo argo-uikubectl delete -n kube-system svc argo-ui","s":"Uninstall","u":"/docs/guide-local-install","h":"#uninstall","p":378},{"i":407,"t":"Argo workflows will be installed on the argo namespace. See the official Argo documentation for more details. kubectl create ns argokubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo/v2.4.2/manifests/install.yaml# Configure service account to run workflowkubectl create rolebinding default-admin --clusterrole=admin --serviceaccount=default:default# Test runargo submit --watch https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml See custom configuration for namespace install. kubectl apply -n argo -f https://raw.githubusercontent.com/vemonet/argo/master/manifests/namespace-install.yaml","s":"Install on your local Kubernetes","u":"/docs/guide-local-install","h":"#install-on-your-local-kubernetes","p":378},{"i":409,"t":"See the Argo workflows documentation.","s":"Install the client","u":"/docs/guide-local-install","h":"#install-the-client","p":378},{"i":411,"t":"kubectl -n argo port-forward deployment/argo-ui 8002:8001 Access on http://localhost:8002.","s":"Expose the UI","u":"/docs/guide-local-install","h":"#expose-the-ui","p":378},{"i":413,"t":"Get started Monitor your applications","s":"Monitor your applications","u":"/docs/guide-monitoring","h":"","p":412},{"i":415,"t":"You can have an overview of the different resources consumed by the applications running in your project by going to the Monitoring tab (in the developer view) You can also check the CPU and memory usage directly from the terminal inside a specific container Go to your application terminal, and run: top Check the number of Cpu(s) used at the top: %Cpu(s): 3,3 us, Check the memory usage with the used column: MiB Mem : 515543.2 total, 403486.8 free, 98612.0 used, 13444.5 buff/cache","s":"Monitor your application resources use","u":"/docs/guide-monitoring","h":"#monitor-your-application-resources-use","p":412},{"i":417,"t":"If your application is facing issues when deployed: If the pod is not building, or not deploying properly, take a look at the Events tab of the deployment. It shows a log of all events faced by the deployment (assign to node, pull image, build, etc). Additionally, all Events in your project can be accessed in Monitoring. Various ways to check the events You can also check the Monitoring page in the left side menu to see all events in a project. Or use the terminal: oc get events When a pod is running you can check its logs in the Logs tab (after going to the pod page). It will show the logs output of the container, equivalent to doing docker logs. Get help If you cannot figure out the issue by yourself: Gather relevant information to help the DSRI team to solve your issue: URL to the faulty application, which error was shown in the Events tab? Or in the Logs tab? Seek help on the #helpdesk DSRI Slack channel Checkout if an issue have already been created for this problem, or create a new one: https://github.com/MaastrichtU-IDS/dsri-documentation/issues","s":"Debug an application deployment","u":"/docs/guide-monitoring","h":"#debug-an-application-deployment","p":412},{"i":419,"t":"Deploy applications Deploy from a Dockerfile","s":"Deploy from a Dockerfile","u":"/docs/guide-dockerfile-to-openshift","h":"","p":418},{"i":421,"t":"This manual shows you an example of how to convert a dockerfile from your local machine to a running container on DSRI (openshift / okd). Start by cloning the example repository to your local machine. git clone git@gitlab.maastrichtuniversity.nl:dsri-examples/dockerfile-to-okd.git After cloning you now have a local folder containing a Dockerfile and index.html file. Inspect both files. Login with the openshift client: Authenticate to the OpenShift cluster using oc login . oc login --token= Create a new project if you don't have a project yet you can work with (change myproject to a project name of your choice: oc new-project myproject","s":"Build from local Dockerfile","u":"/docs/guide-dockerfile-to-openshift","h":"#build-from-local-dockerfile","p":418},{"i":423,"t":"oc new-build --name dockerfile-to-okd --binary","s":"Create new build configuration.","u":"/docs/guide-dockerfile-to-openshift","h":"#create-new-build-configuration","p":418},{"i":425,"t":"Start a new build on the DSRI with the files provided: cd dockerfile-to-okdoc start-build dockerfile-to-okd --from-dir=. --follow --wait","s":"Build the image","u":"/docs/guide-dockerfile-to-openshift","h":"#build-the-image","p":418},{"i":427,"t":"Create a new app using the build we just created: oc new-app dockerfile-to-okd To properly deploy your app on OpenShift you will need to define a few more parameters: Enable root user access (with serviceAccountName) by running this command: oc patch deployment/dockerfile-to-okd --patch '{\"spec\":{\"template\": {\"spec\":{\"serviceAccountName\": \"anyuid\"}}}}' You can also add persistent storage (with volumes and containers: volumeMounts ) ${STORAGE_NAME}: Name of your persistent volume claim in the Storage page of your project in the web UI ${STORAGE_FOLDER} : Name of the folder inside the persistent volume claim to store the application data (so you can store multiple applications on the same persistent volume claim) Open the configuration of the started app to fix its configuration: oc edit deployment/dockerfile-to-okd You can mount existing persistent volume this way (replace the variables, such as ${STORAGE_NAME} by your values): template: spec: serviceAccountName: anyuid volumes: - name: data persistentVolumeClaim: claimName: \"${STORAGE_NAME}\" containers: - image: rstudio-root:latest volumeMounts: - name: data mountPath: \"/home/rstudio\" subPath: \"${STORAGE_FOLDER}\" Generate deployment file in YAML You can also generate the app deployment in a YAML file to edit it before start: oc new-app dockerfile-to-okd -o yaml > myapp.yml# Edit myapp.ymloc create -f myapp.yml","s":"Create your app","u":"/docs/guide-dockerfile-to-openshift","h":"#create-your-app","p":418},{"i":429,"t":"Expose the application so you can reach it from your browser and check the route that was created oc expose svc/dockerfile-to-okdoc get route You can now visit the route shown in the HOST/PORT output of the oc get route command and see if you have successfully converted the docker file. You can edit the created route to enable HTTPS with this command: oc patch route/dockerfile-to-okd --patch '{\"spec\":{\"tls\": {\"termination\": \"edge\", \"insecureEdgeTerminationPolicy\": \"Redirect\"}}}'","s":"Expose app","u":"/docs/guide-dockerfile-to-openshift","h":"#expose-app","p":418},{"i":431,"t":"oc delete build dockerfile-to-okd See oc delete documentation.","s":"Delete the created build","u":"/docs/guide-dockerfile-to-openshift","h":"#delete-the-created-build","p":418},{"i":433,"t":"You can also deploy a local docker image from your machine. First build the docker image: docker build -t my-docker-image:latest . Check you have the image locally on your system: docker images ls You should have a docker image for your application: REPOSITORY TAG my-docker-image latest You can then deploy providing the docker image name and the name of the application to be deployed: oc new-app my-docker-image --name app-name-on-openshift","s":"Deploy from a local docker image","u":"/docs/guide-dockerfile-to-openshift","h":"#deploy-from-a-local-docker-image","p":418},{"i":435,"t":"Go to +Add > From Git: https://console-openshift-console.apps.dsri2.unimaas.nl/import Follow the instructions given by the web UI: provide the URL to your git repository, the port on which the web interface will be deployed, you can also create a secret for git login if the repository is private. Once the container has started you will need to make a small change to enable it running with any user ID (due to OpenShift security policies). You can do it with the command line (just change your-app-name by your application name) oc patch deployment/your-app-name --patch '{\"spec\":{\"template\": {\"spec\":{\"serviceAccountName\": \"anyuid\"}}}}' Or through the web UI: click on your deployment, then Actions > Edit Deployment. And edit the YAML of your deployment to add serviceAccountName: anyuid under template.spec: template: spec: serviceAccountName: anyuid containers: - [...]","s":"Deploy from a Git repository","u":"/docs/guide-dockerfile-to-openshift","h":"#deploy-from-a-git-repository","p":418},{"i":437,"t":"Guides Prepare a workshop","s":"Prepare a workshop","u":"/docs/guide-workshop","h":"","p":436},{"i":439,"t":"If the users are students from Maastricht University, or not from Maastricht University (without an email @maastrichtuniversity.nl, or @maastro.nl), you will need to contact the ICT support of your department to request the creation of accounts so that your users can connect to the UM VPN. At FSE, you will need to send an email to lo-fse@maastrichtuniversity.nl and DSRI-SUPPORT-L@maastrichtuniversity.nl with the following information: Emails of the users Why they need access to the DSRI (provide the ID of the course at Maastricht University if it is for a course) Until which date the users will need those VPN accounts","s":"Request VPN accounts for users","u":"/docs/guide-workshop","h":"#request-vpn-accounts-for-users","p":436},{"i":441,"t":"Fill this form 📬 to give us more details on your project (you don't need to do it if you have already filled it in the past).","s":"Fill a form","u":"/docs/guide-workshop","h":"#fill-a-form","p":436},{"i":443,"t":"Use the DSRI documentation to explain to your users how to access the DSRI.","s":"Prepare you workshop","u":"/docs/guide-workshop","h":"#prepare-you-workshop","p":436},{"i":445,"t":"Feel free to use the existing templates for JupyterLab, RStudio, or Visual Studio Code in the DSRI catalog. You can easily reuse our images to adapt it to your training need and install all required dependencies: https://github.com/MaastrichtU-IDS/jupyterlab https://github.com/MaastrichtU-IDS/rstudio https://github.com/MaastrichtU-IDS/code-server Then you will just need to instruct your users to start an existing templates with your newly published image. With the JupyterLab template you can also prepare a git repository to be cloned in the workspace as soon as they start it. You can find some examples of python scripts with database to run on the DSRI in this repository: https://github.com/MaastrichtU-IDS/dsri-demo","s":"Publish an image for your training","u":"/docs/guide-workshop","h":"#publish-an-image-for-your-training","p":436},{"i":447,"t":"You can use this video showing how to start a RStudio workspace, the process is similar for JupyterLab and VisualStudio Code: https://www.youtube.com/watch?v=Y0BjotH1LiE Otherwise just do it directly with them.","s":"Show your users how to start a workspace","u":"/docs/guide-workshop","h":"#show-your-users-how-to-start-a-workspace","p":436},{"i":449,"t":"Guides Install UM VPN","s":"Install UM VPN","u":"/docs/guide-vpn","h":"","p":448},{"i":451,"t":"You will need to have an account at Maastricht University with an email ending with @maastrichtuniversity.nl or @student.maastrichtuniversity.nl. Request access to the DSRI for your account Please fill this form 📬. to provide us some information on what you plan to do with the DSRI.","s":"Request an account","u":"/docs/guide-vpn","h":"#request-an-account","p":448},{"i":453,"t":"You need to be connected to the UM network to access the DSRI. Connect to UMnet or eduroam WiFi at Maastricht University Use the Maastricht University VPN at vpn.maastrichtuniversity.nl Log in to that using your UM username and password. Students By default the UM VPN is only available to employees. As a student you can access UM resources from any location via Student Desktop Anywhere. However, if VPN access is absolutely necessary you can request access via your course coordinator. The prefix of your UM email address with the first letter capitalized, e.g. Firstname.Lastname or F.LastnameOr your employee number at Maastricht University (a.k.a. P number), e.g. P7000000 Then You will see below page to download the AnyConnect Secure Mobility Client Install the VPN (AnyConnect Secure Mobility Client) on Windows​ Double click on the .exe file to install the VPN. You can follow below steps as in pictures. Log in to the VPN (AnyConnect Secure Mobility Client)​ Once you finish installing you can run the Cisco AnyConnect Secure Mobility Client. Then after you will get the bellow wizard and click connect Provide your UM username and password. (employee number at Maastricht University (a.k.a. P number), e.g. P7000000) Install the VPN (AnyConnect Secure Mobility Client) on Linux​ Connect to UMnet or eduroam WiFi at Maastricht University For Linux, use openconnect to connect to the UM VPN. You can easily install it on Ubuntu and Debian distributions with apt: sudo apt install openconnectsudo openconnect -u YOUR.USER --authgroup 01-Employees --useragent=AnyConnect vpn.maastrichtuniversity.nl Provide your UM password when prompted. For students: By default the UM VPN is only available to employees. As a student you can access UM resources from any location via Student Desktop Anywhere. However, if VPN access is absolutely necessary you can request access via your course coordinator.","s":"Connect to the UM network","u":"/docs/guide-vpn","h":"#connect-to-the-um-network","p":448},{"i":455,"t":"Deploy applications Install from Helm charts","s":"Install from Helm charts","u":"/docs/helm","h":"","p":454},{"i":458,"t":"Go lang is required to run Helm. Install go 1.14.4 on Linux, you can find instructions for MacOS, Windows and newer versions at https://golang.org/dl wget https://dl.google.com/go/go1.14.4.linux-amd64.tar.gz# Extract to /usr/localtar -C /usr/local -xzf go1.14.4.linux-amd64.tar.gz# Add Go to path in .profileecho \"export PATH=$PATH:/usr/local/go/bin\" >> ~/.profile# Or in .zshrc if you use ZSHecho \"export PATH=$PATH:/usr/local/go/bin\" >> ~/.zshrc Restart your laptop for the changes to take effects or execute source ~/.profile","s":"Install Golang","u":"/docs/helm","h":"#install-golang","p":454},{"i":460,"t":"You can also use the official documentation to install Helm on your machine. Install on Linux​ curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash See Helm documentation for Linux. Install on MacOS​ brew install helm See Helm documentation for MacOS. Install on Windows​ Install using Chocolatey. choco install kubernetes-helm See Helm documentation for Windows.","s":"Install Helm","u":"/docs/helm","h":"#install-helm","p":454},{"i":462,"t":"helm version","s":"Check Helm installation","u":"/docs/helm","h":"#check-helm-installation","p":454},{"i":464,"t":"Explore published Helm charts at https://hub.helm.sh ⛵","s":"Install a Helm chart","u":"/docs/helm","h":"#install-a-helm-chart","p":454},{"i":466,"t":"Example from the OpenShift 4.3 documentation. See also the official Helm documentation. Add the repository of official Helm charts to your local Helm client: helm repo add stable https://kubernetes-charts.storage.googleapis.com/ Update the repository: helm repo update Install an example MySQL chart, and start the application named example-mysql: helm install example-mysql stable/mysql Password The instructions to retrieve the admin password and connect to the database will be displayed in the terminal. Retrieve the database password with this command (N.B.: kubectl can also be used in place of oc): oc get secret example-mysql -o jsonpath=\"{.data.mysql-root-password}\" | base64 --decode; echo Verify that the chart has installed successfully: helm list Expose the MySQL service as a route: oc expose service example-mysqloc get routes Or port-forward to http://localhost:3306 oc port-forward svc/example-mysql 3306","s":"Start a MySQL database with Helm","u":"/docs/helm","h":"#start-a-mysql-database-with-helm","p":454},{"i":468,"t":"helm uninstall example-mysql","s":"Uninstall the application","u":"/docs/helm","h":"#uninstall-the-application","p":454},{"i":470,"t":"You can also define deployment parameters when installing a Helm chart, such as the service account and node selector. For example, here we make sure the application will run on DSRI CPU nodes and use the anyuid service account: Add Bitnami repository: helm repo add bitnami https://charts.bitnami.com/bitnami Install and start Postgresql: helm install postgresql-db bitnami/postgresql --set nodeSelector.dsri.unimaas.nl/cpu=true --set serviceAccount.name=anyuid","s":"Set deployment parameters","u":"/docs/helm","h":"#set-deployment-parameters","p":454},{"i":472,"t":"Guides Publish a Docker image","s":"Publish a Docker image","u":"/docs/guide-publish-image","h":"","p":471},{"i":475,"t":"Use your existing GitHub account if you have one: Create a Personal Access Token for GitHub packages at https://github.com/settings/tokens/new Provide a meaningful description for the token, and enable the following scopes when creating the token: write:packages: publish container images to GitHub Container Registry delete:packages: delete specified versions of private or public container images from GitHub Container Registry You might want to store this token in a safe place, as you will not be able to retrieve it later on github.com (you can still delete it, and create a new token easily if you lose your token) 👨‍💻 Log in to the GitHub Container Registry in your terminal (change USERNAME and ACCESS_TOKEN to yours): echo \"ACCESS_TOKEN\" | docker login ghcr.io -u USERNAME --password-stdin On Windows use this command: docker login ghcr.io -u USERNAME -p \"ACCESS_TOKEN\" See the official GitHub documentation.","s":"Login to GitHub Container Registry","u":"/docs/guide-publish-image","h":"#login-to-github-container-registry","p":471},{"i":477,"t":"Create an account at https://quay.io Login in your terminal (you will be asked for username and password) docker login quay.io","s":"Login to quay.io","u":"/docs/guide-publish-image","h":"#login-to-quayio","p":471},{"i":479,"t":"Get a DockerHub account at https://hub.docker.com (you most probably already have one if you installed Docker Desktop) 👩‍💻 Run in your terminal: docker login Provide your DockerHub username and password.","s":"Login to DockerHub","u":"/docs/guide-publish-image","h":"#login-to-dockerhub","p":471},{"i":481,"t":"Once you built a Docker image, and you logged in to a Container Registry, you might want to publish the image to pull and re-use it easily later.","s":"Publish your image 📢","u":"/docs/guide-publish-image","h":"#publish-your-image-","p":471},{"i":483,"t":"Free for public images The GitHub Container Registry is still in beta but will be free for public images when fully released. It enables you to store your Docker images at the same place you keep your code! 📦 Publish to your user Container Registry on GitHub: docker build -t ghcr.io/github-username/my-image:latest .docker push ghcr.io/github-username/my-image:latest For example, to the MaastrichtU-IDS organization Container Registry on GitHub: docker build -t ghcr.io/maastrichtu-ids/jupyterlab:latest .docker push ghcr.io/maastrichtu-ids/jupyterlab:latest Created automatically If the image does not exist, GitHub Container Registry will create it automatically and set it as Private by default. You can easily change it to Public in the image settings on github.com.","s":"Publish to GitHub Container Registry","u":"/docs/guide-publish-image","h":"#publish-to-github-container-registry","p":471},{"i":485,"t":"Free for public images Quay.io is free for public images and does not restrict images pulls. Create the image on quay.io Build and push to quay.io docker build -t ghcr.io/quay-username/my-image:latest .docker push quay.io/quay-username/my-image:latest","s":"Publish to Quay.io","u":"/docs/guide-publish-image","h":"#publish-to-quayio","p":471},{"i":487,"t":"DockerHub pull rates limitations ⚠️ DockerHub imposes strict pull limitations for clusters like the DSRI (using DockerHub might result in failing to pull your images on the DSRI). We highly recommend to use the GitHub Container Registry or RedHat quay.io Container Registry to publish public Docker images. Logged in If you are login with your DockerHub user on the DSRI, it should allow you to pull DockerHub images in your project (see above). Create the repository on DockerHub (attached to your user or an organization) Build and push the image: docker build -t dockerhub-username/jupyterlab:latest .docker push dockerhub-username/jupyterlab:latest You can also change the name (aka. tag) of an existing image: docker build -t my-jupyterlab .docker tag my-jupyterlab ghcr.io/github-username/jupyterlab:latest","s":"Publish to DockerHub","u":"/docs/guide-publish-image","h":"#publish-to-dockerhub","p":471},{"i":489,"t":"You can automate the building and publication of Docker images using GitHub Actions workflows 🔄 Use a working workflow as example 👀 Check the .github/workflows/publish-docker.yml file to see an example of a workflow to publish an image to the GitHub Container Registry. 👩‍💻 You only need to change the IMAGE_NAME, and use it in your GitHub repository to publish a Docker image for your application automatically! It will build from a Dockerfile at the root of the repository. Workflow triggers The workflow can be easily configured to: publish a new image to the latest tag at each push to the main branch publish an image to a new tag if a release is pushed on GitHub (using the git tag) e.g. v0.0.1 published as image 0.0.1","s":"Use automated workflows","u":"/docs/guide-publish-image","h":"#use-automated-workflows","p":471},{"i":491,"t":"On this page","s":"JupyterHub with Spark","u":"/docs/jupyterhub-spark","h":"","p":490},{"i":493,"t":"You will need to have the usual oc tool installed, and to install kfctl on your machine, a tool to deploy Kubeflow applications, download the latest version for your OS 📥️ You can then install it by downloading the binary and putting it in your path, for example on Linux: wget https://github.com/kubeflow/kfctl/releases/download/v1.2.0/kfctl_v1.2.0-0-gbc038f9_linux.tar.gztar -xzf kfctl_v1.2.0-0-gbc038f9_linux.tar.gzsudo mv kfctl /usr/local/bin/ Clone the repository with the DSRI custom images and deployments for the OpenDataHub platform, and go to the kfdef folder: git clone https://github.com/MaastrichtU-IDS/odh-manifestscd odh-manifests/kfdef","s":"🧊 Install kfctl","u":"/docs/jupyterhub-spark","h":"#-install-kfctl","p":490},{"i":495,"t":"Go the the kfdef folder All scripts need to be run from the kfdef folder 📂 You can deploy JupyterHub with 2 different authentications system, use the file corresponding to your choice: For the default DSRI authentication use kfctl_openshift_dsri.yaml For GitHub authentication use kfctl_openshift_github.yaml You need to create a new GitHub OAuth app: https://github.com/settings/developers And provide the GitHub client ID and secret through environment variable before running the start script: export GITHUB_CLIENT_ID=YOUR_CLIENT_IDexport GITHUB_CLIENT_SECRET=YOUR_CLIENT_SECRET First you will need to change the namespace: in the file you want to deploy, to provide the project where you want to start JupyterHub (currently opendatahub-ids), then you can deploy JupyterHub and Spark with kfctl: ./start_odh.sh kfctl_openshift_dsri.yaml 🗄️ Persistent volumes are automatically created for each instance started in JupyterHub to insure persistence of the data even JupyterHub is stopped. You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims. ⚡️ A Spark cluster with 3 workers is automatically created with the service name spark-cluster, you can use the URL of the master node to access it from your workspace: spark://spark-cluster:7077","s":"🪐 Deploy JupyterHub and Spark","u":"/docs/jupyterhub-spark","h":"#-deploy-jupyterhub-and-spark","p":490},{"i":497,"t":"Matching Spark versions Make sure all the Spark versions are matching, the current default version is 3.0.1 You can test the Spark cluster connection with PySpark: from pyspark.sql import SparkSession, SQLContextimport osimport socket# Create a Spark sessionspark_cluster_url = \"spark://spark-cluster:7077\"spark = SparkSession.builder.master(spark_cluster_url).getOrCreate()sc = spark.sparkContext# Test your Spark connectionspark.range(5, numPartitions=5).rdd.map(lambda x: socket.gethostname()).distinct().collect()# Or try:#x = ['spark', 'rdd', 'example', 'sample', 'example']x = [1, 2, 3, 4, 5]y = sc.parallelize(x)y.collect()# Or try:data = [1, 2, 3, 4, 5]distData = sc.parallelize(data)distData.reduce(lambda a, b: a + b)","s":"✨ Use the Spark cluster","u":"/docs/jupyterhub-spark","h":"#-use-the-spark-cluster","p":490},{"i":499,"t":"Make sure all the Spark versions are matching, the current default version is 3.0.1: Go to the Spark UI to verify the version of the Spark cluster Run spark-shell --version to verify the version of the Spark binary installed in the workspace Run pip list | grep pyspark to verify the version of the PySpark library Check the JupyterLab workspace Dockerfile to change the version of Spark installed in the workspace, and see how you can download and install a new version of the Spark binary. If you need to change the Python, Java or PySpark version in the workspace you can create a environment.yml file, for example for 2.4.5: name: sparkchannels: - defaults - conda-forge - anacondadependencies: - python=3.7 - openjdk=8 - ipykernel - nb_conda_kernels - pip - pip: - pyspark==2.4.5 Create the environment with conda: mamba env create -f environment.yml","s":"Match the version","u":"/docs/jupyterhub-spark","h":"#match-the-version","p":490},{"i":501,"t":"You can also create a route to access the Spark UI and monitor the activity on the Spark cluster: oc expose svc/spark-cluster-ui Get the Spark UI URL: oc get route --selector radanalytics.io/service=ui --no-headers -o=custom-columns=HOST:.spec.host","s":"Spark UI","u":"/docs/jupyterhub-spark","h":"#spark-ui","p":490},{"i":503,"t":"You can create a new Spark cluster, for example here using Spark 3.0.1 with the installed Spark Operator: cat < --docker-password= Link the login secret to the default service account: oc secrets link default um-harbor-secret --for=pull","s":"Using your own user","u":"/docs/login-docker-registry","h":"#using-your-own-user","p":506},{"i":519,"t":"Go to UM Container registry, click on your project if you already created one. Click on the tab Robot Accounts Click on New Robot Account Create the Robot account to your liking Copy the secret or export it Create a secret to login to UM Harbor Container Registry in your project: oc create secret docker-registry um-harbor-secret --docker-server=cr.icts.unimaas.nl --docker-username= --docker-password= Link the login secret to the default service account: oc secrets link default um-harbor-secret --for=pull","s":"Using a robot account","u":"/docs/login-docker-registry","h":"#using-a-robot-account","p":506},{"i":521,"t":"Go to GitHub Settings, and create a Personal Access Token (PAT) which will be used as password to connect to the GitHub Container Registry Create a secret to login to GitHub Container Registry in your project: oc create secret docker-registry github-ghcr-secret --docker-server=ghcr.io --docker-username= --docker-password= --docker-email= Link the login secret to the default service account: oc secrets link default github-ghcr-secret --for=pull","s":"GitHub Container Registry","u":"/docs/login-docker-registry","h":"#github-container-registry","p":506},{"i":523,"t":"Increase DockerHub limitations Login with DockerHub also increase the DockerHub limitations to pull images in your project Create a secret to login to DockerHub in your project: oc create secret docker-registry dockerhub-secret --docker-server=docker.io --docker-username= --docker-password= --docker-email= Link the login secret to the default service account: oc secrets link default dockerhub-secret --for=pull","s":"DockerHub","u":"/docs/login-docker-registry","h":"#dockerhub","p":506},{"i":525,"t":"Miscellaneous Increase your processes speed","s":"Increase your processes speed","u":"/docs/increase-process-speed","h":"","p":524},{"i":527,"t":"With the DSRI you get access to a workspace with more memory and cores than your laptop (around 200G memory, and 64 cores on the DSRI, against around 16G memory and 8 cores on your laptop) Those additional resources might help to make your workload run faster, but not automatically! It will run faster If your code can make use of the really large amount of RAM to load more of the data to process in memory. But if your workload does not require dozens of GB of memory, and your laptop does not face out of memory issues, or crash, when you run your workload, then you probably already have enough memory on your laptop, and will not gain a significant boost from the increased memory. If you can run your workload in parallel, or enable the libraries you use to use the available cores. This will highly depend on the libraries you use. Do they support running their processes in parallel? Do you need to explicitly enable parallelism on a specific number of cores? Proper parallelism is not achieved easily, it needs to be manually implemented within the library processes. For example, Python has a \"Global Interpreter Lock\" (aka. GIL) that limit threads parallelism by design, so when you are doing some work on a spreadsheet with pandas, you are only going to use 1 thread (which is nice, because it makes the conceptualization and understanding of algorithms easier, but it also makes it harder to write truly efficient libraries) You will need to use complementary libraries if you want to use more threads while processing data with pandas. There are multiple ways and libraries to achieve this, but the easiest, if you want to check it yourself with pandas, is to use pandarallel. You could also implement the parallelism yourself with concurrent.futures","s":"The good","u":"/docs/increase-process-speed","h":"#the-good","p":524},{"i":529,"t":"Until now everything seems good, more memory, more cores... So, what's the catch? It can only get better, no? Application and workspace running on the DSRI uses a persistent volume to avoid losing data when the application is restarted. On most workspaces this persistent volume is mounted on the workspace working directory. This persistent volume is not hosted directly on the same node as your application, it's hosted on the cluster in a distributed fashion (remember you can attach this persistent volume to different applications, which might be hosted on different nodes themselves) And distributed storage means: slower read and write times! In your laptop the data is in a hard drive disc sitting at 2 cm of the CPU and memory. In the DSRI your workspace might be on node 4, when the persistent volume is on node 8. In this case the data will need to go through the network So if you write a script to just load data, do no computing, and write back the data to the persistent volume, it will probably be much faster on your laptop than on the DSRI!","s":"The bad","u":"/docs/increase-process-speed","h":"#the-bad","p":524},{"i":531,"t":"Only 1 folder (and its subfolders) usually mounted on the persistent volume. The rest is \"ephemeral storage\", which is the data bound to the application you started, this means the data will be stored on the same node as your workspace. Which might result in faster read/write speed! But also means the data will be lost if the workspace is restarted (which does not happen everyday, but can happen without notice) A solution could be to: Keep your code and important data as backup in the persistent volume (the workspace working dir usually) Copy the data your process needs to load in a folder outside of the persistent volume (on the ephemeral storage) Read/write data mostly from this folder on the ephemeral storage, avoid using the data in the persistent volume folder as much as possible Copy the important result files or temporary files you don't want to lose from the folder on the ephemeral storage to the folder on the persistent storage Let us know how it works for you on the Slack #general channel, and if you have suggestions to improve the workspaces.","s":"The solution","u":"/docs/increase-process-speed","h":"#the-solution","p":524},{"i":533,"t":"On this page","s":"JupyterHub workspace","u":"/docs/jupyterhub-workspace","h":"","p":532},{"i":535,"t":"You can easily start a data science workspace with JupyterLab, VisualStudio Code and Conda pre-installed on the DSRI with JupyterHub: Connect to the UM VPN Go to https://jupyterhub-github.apps.dsri2.unimaas.nl Login with your GitHub account Choose the type of workspace, and the resources limitations Optionally you can provide additional parameters as environment variables: GIT_NAME and GIT_EMAIL: your name and email that will be used when committing with git GIT_URL: the URL of a git repository to be automatically cloned in the workspace, if there is a requirements.txt it will be automatically installed with pip Once your workspace has started you can: Use the persistent folder to put data that will be kept even when the server is stopped, or if you use a different type of workspace Clone your code repository with git Install packages with mamba/conda or pip Go to the workspace overview: https://jupyterhub-github.apps.dsri2.unimaas.nl/hub/home to see your workspace, and stop it. tip Put all the commands you use to install the packages required to run your code in a file in the persistent folder (ideally in the git repository with your code), so you can easily reinstall your environment if your workspace is stopped.","s":"🪐 Start your workspace","u":"/docs/jupyterhub-workspace","h":"#-start-your-workspace","p":532},{"i":537,"t":"In your workspace you can install new conda environments, if they include the packages nb_conda_kernels and ipykernel, then you will be able to easily start notebooks in those environments from the JupyterLab Launcher page. Install a conda environment from a file with mamba (it is like conda but faster): mamba env create -f environment.yml You'll need to wait for 1 minute before the new conda environment becomes available on the JupyterLab Launcher page. You can easily install an environment with a different version of Python if you need it. Here is an example of an environment.yml file to create an environment with Python 3.9, install the minimal dependencies required to easily starts notebooks in this environment with conda, and install a pip package: name: py39channels: - defaults - conda-forge - anacondadependencies: - python=3.9 - ipykernel - nb_conda_kernels - pip - pip: - matplotlib","s":"📦️ Manage dependencies with Conda","u":"/docs/jupyterhub-workspace","h":"#️-manage-dependencies-with-conda","p":532},{"i":539,"t":"You can use git from the terminal. You can also use the JupyterLab Git extension or the VisualStudio Code git integration to clone and manage your git repositories. They will ask you for a username and personal access token if the repository is private, or the first time you want to push changes.","s":"🐙 Use git in JupyterLab","u":"/docs/jupyterhub-workspace","h":"#-use-git-in-jupyterlab","p":532},{"i":541,"t":"Deploy applications Data Science catalog Run MPI jobs","s":"Run MPI jobs","u":"/docs/mpi-jobs","h":"","p":540},{"i":543,"t":"Checkout the repository of the CPU benchmark for a complete example of an MPI job: python script, Dockerfile, and the job deployment YAML. Clone the repository, and go to the example folder: git clone https://github.com/kubeflow/mpi-operator.gitcd mpi-operator/examples/horovod Open the tensorflow-mnist.yaml file, and fix the apiVersion on the first line: # FromapiVersion: kubeflow.org/v1# ToapiVersion: kubeflow.org/v1alpha2 You will also need to specify those containers can run with the root user by adding the serviceAccountName between spec: and container: for the launcher and the worker templates: template: spec: serviceAccountName: anyuid containers: - image: docker.io/kubeflow/mpi-horovod-mnist Your tensorflow-mnist.yaml file should look like this: apiVersion: kubeflow.org/v1alpha2kind: MPIJobmetadata: name: tensorflow-mnistspec: slotsPerWorker: 1 cleanPodPolicy: Running mpiReplicaSpecs: Launcher: replicas: 1 template: spec: serviceAccountName: anyuid containers: - image: docker.io/kubeflow/mpi-horovod-mnist name: mpi-launcher command: - mpirun args: - -np - \"2\" - --allow-run-as-root - -bind-to - none - -map-by - slot - -x - LD_LIBRARY_PATH - -x - PATH - -mca - pml - ob1 - -mca - btl - ^openib - python - /examples/tensorflow_mnist.py resources: limits: cpu: 1 memory: 2Gi Worker: replicas: 2 template: spec: serviceAccountName: anyuid containers: - image: docker.io/kubeflow/mpi-horovod-mnist name: mpi-worker resources: limits: cpu: 2 memory: 4Gi Once this has been set, create the job in your current project on the DSRI (change with oc project my-project): oc create -f tensorflow-mnist.yaml You should see the 2 workers and the main job running in your project Topology page in the DSRI web UI. You can then easily check the logs of the launcher and workers. To run your own MPI job on the DSRI, you can take a look at, and edit, the different files provided by the MPI Operator example: 🐍 tensorflow_mnist.py: the python script with the actual job to run 🐳 Dockerfile.cpu: the Dockerfile to define the image of the containers in which your job will run (install dependencies) ⛵️ tensorflow-mnist.yaml: the YAML file to define the MPI deployment on Kubernetes (number and limits of workers, mpirun command, etc) Visit the Kubeflow documentation to create a MPI job for more details. Contact us Feel free to contact us on the DSRI Slack #helpdesk channel to discuss the use of MPI on the DSRI.","s":"Run MPI jobs on CPU","u":"/docs/mpi-jobs","h":"#run-mpi-jobs-on-cpu","p":540},{"i":545,"t":"Guides Delete objects (advanced)","s":"Delete objects (advanced)","u":"/docs/openshift-delete-objects","h":"","p":544},{"i":547,"t":"The best way to make sure all objects related to your application have been deleted is to use the command line providing your application name. Different selectors can be used to easily delete all objects generated by an application deployment. 2 selectors can easily be found in the template configuration: app : the name you gave when creating your application template : the name of the template you used to create the application. Use it only if you want to delete all applications created by a specific template. oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=my-application Delete storage if necessary from the OpenShift web UI. Force deletion You can force the deletion if the objects are not deleting properly: oc delete all,secret,configmaps,serviceaccount,rolebinding --force --grace-period=0 --selector app=my-application","s":"Delete an application","u":"/docs/openshift-delete-objects","h":"#delete-an-application","p":544},{"i":549,"t":"Get the ID of the specific pod you want to delete: oc get pod Use the pod ID retrieved to delete the pod: oc delete pod Force deletion If the pod is not properly deleted, you can force its deletion: oc delete pod --force --grace-period=0 ","s":"Delete pod","u":"/docs/openshift-delete-objects","h":"#delete-pod","p":544},{"i":551,"t":"Be careful All objects and persistent storages in this project will be deleted and cannot be retrieved. To properly delete a project you need to first delete all objects in this project: oc delete all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinstance --all -n Then delete the project: oc delete project ","s":"Delete a project","u":"/docs/openshift-delete-objects","h":"#delete-a-project","p":544},{"i":553,"t":"Be careful All data stored in this persistent storage will be lost and cannot be retrieved. oc delete pvc storage-name","s":"Delete persistent storage","u":"/docs/openshift-delete-objects","h":"#delete-persistent-storage","p":544},{"i":556,"t":"If a provisioned service is stuck on Marked for deletion you might need to set finalizers to null in the YAML. This can be done using the OpenShift web UI: Go to the Provisionned Service in the OpenShift UI overview Click on Edit YAML Remove the finalizers: finalizers: - kubernetes-incubator/service-catalog You can also do it using the oc CLI: oc get serviceinstance # Delete problematic line from serviceinstance to delete themoc get serviceinstance -o yaml | grep Terminating | sed \"/kubernetes-incubator/d\"| oc apply -f - No global catalog The OpenShift Catalog does not handle deploying templates globally properly (on all projects). If a template is deployed globally, OpenShift will try to create unnecessary objects such as provisioned service (aka. ServiceInstance), or ClusterClasses. Those services are not used, and some of them cannot be deleted easily. Catalog per project At the moment it is more reliable to create the template in directly in your project if you need to use it multiple time.","s":"Stuck provisioned service","u":"/docs/openshift-delete-objects","h":"#stuck-provisioned-service","p":544},{"i":558,"t":"Project can get stuck as marked for deletion. Usually due to Objects still present in the project that are not terminated or finalizers left in the some objects YAML file. The following commands will allow you to clean up all the projects stuck in terminating state you have access to . Force deletion of terminating projects: for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc delete project --force --grace-period=0 $i ; done Delete all objects in terminating projects: for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc delete all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinstance --force --grace-period=0 --all -n $i ; done Remove Kubernetes finalizers from terminating projects: for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc get project $i -o yaml | sed \"/kubernetes/d\" | sed \"/finalizers:/d\" | oc apply -f - ; done Fix deletion If ServiceInstances refuses to get deleted, try to remove kubernetes finalizers: for i in $(oc get projects | grep Terminating| awk '{print $1}'); do echo $i; oc get serviceinstance -n $i -o yaml | sed \"/kubernetes-incubator/d\"| oc apply -f - ; done Check deletion Check if there are still objects in a project: oc get all,configmap,pvc,serviceaccount,secret,rolebinding,serviceinstance","s":"Delete stuck project","u":"/docs/openshift-delete-objects","h":"#delete-stuck-project","p":544},{"i":560,"t":"Get started Delete an application","s":"Delete an application","u":"/docs/openshift-delete-services","h":"","p":559},{"i":562,"t":"The best way to make sure all objects related to your application have been deleted is to use the command line providing your application name: oc delete all,secret,configmaps,serviceaccount,rolebinding --selector app=my-application Force deletion You can force the deletion if the objects are not deleting properly: oc delete all,secret,configmaps,serviceaccount,rolebinding --force --grace-period=0 --selector app=my-application","s":"From the terminal","u":"/docs/openshift-delete-services","h":"#from-the-terminal","p":559},{"i":564,"t":"We recommend to use the oc CLI to easily delete an application. But in the case you cannot install oc on your computer you can delete the different objects created by the application (easy to find in the Topology page): Delete the Route Delete the Service Delete the Deployment Config","s":"From the web UI","u":"/docs/openshift-delete-services","h":"#from-the-web-ui","p":559},{"i":566,"t":"Guides Command Line Interface","s":"Command Line Interface","u":"/docs/openshift-commands","h":"","p":565},{"i":568,"t":"Here is an overview of common oc commands: Command Description oc login --token= Login to the DSRI OpenShift cluster in your terminal oc get projects List all available projects oc project Switch to project oc get pods Get running pods (a pod can run one or multiple containers for your application) oc rsh Remote terminal connexion to a pod (Shell/Bash) oc cp Copy files from host to container or vice versa, e.g. from host: oc cp : or from to host: oc cp : oc rsync Similar to rsync command on Linux to synchronize directories between container and host or the other way around oc exec Execute command in pods oc delete pod Delete pod","s":"Overview","u":"/docs/openshift-commands","h":"#overview","p":565},{"i":571,"t":"oc projects","s":"List projects","u":"/docs/openshift-commands","h":"#list-projects","p":565},{"i":573,"t":"oc project my-project","s":"Connect to project","u":"/docs/openshift-commands","h":"#connect-to-project","p":565},{"i":575,"t":"To update an ImageStream in your project to pull the latest update from the external repository (e.g. from ghcr.io or DockerHub): oc import-image ","s":"ImageStreams","u":"/docs/openshift-commands","h":"#imagestreams","p":565},{"i":578,"t":"oc create -f my-pod.yaml E.g. d2s-pod-virtuoso.yaml.","s":"Create pod from YAML","u":"/docs/openshift-commands","h":"#create-pod-from-yaml","p":565},{"i":580,"t":"oc get pod List running pods: oc get pods --field-selector=status.phase=Running","s":"List pods","u":"/docs/openshift-commands","h":"#list-pods","p":565},{"i":582,"t":"oc get pod | grep Using selector with Apache Flink as example, and showing only the pod id without header: oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name","s":"Get specific pod","u":"/docs/openshift-commands","h":"#get-specific-pod","p":565},{"i":584,"t":"Connect to a pod with Bash. oc rsh ","s":"Remote Shell connection","u":"/docs/openshift-commands","h":"#remote-shell-connection","p":565},{"i":586,"t":"Example creating a folder: oc exec -- mkdir -p /mnt/workspace/resources","s":"Execute command in pod","u":"/docs/openshift-commands","h":"#execute-command-in-pod","p":565},{"i":588,"t":"oc delete pod Force pod deletion If the pod is not properly deleted, you can force its deletion: oc delete pod --force --grace-period=0 ","s":"Delete pod","u":"/docs/openshift-commands","h":"#delete-pod","p":565},{"i":590,"t":"oc logs -f Debug a pod Get more details on how to debug a pod.","s":"Get pod logs","u":"/docs/openshift-commands","h":"#get-pod-logs","p":565},{"i":592,"t":"Create app from template using the CLI and providing parameters as arguments: oc new-app my-template -p APPLICATION_NAME=my-app -p ADMIN_PASSWORD=mypassword Example for the Semantic Web course notebooks: oc new-app template-jupyterstack-notebook -p APPLICATION_NAME=swcourseName -p NOTEBOOK_PASSWORD=PASSWORDoc delete all --selector template=template-jupyterstack-notebook","s":"Create app from template","u":"/docs/openshift-commands","h":"#create-app-from-template","p":565},{"i":594,"t":"See the Load data page.","s":"Copy files","u":"/docs/openshift-commands","h":"#copy-files","p":565},{"i":596,"t":"Deploy applications Data Science catalog Neuroscience research","s":"Neuroscience research","u":"/docs/neuroscience","h":"","p":595},{"i":598,"t":"Start a JupyterLab container with Freesurfer pre-installed providing admin (sudo) privileges to install anything you need from the terminal (e.g. pip or apt packages) When instantiating the template you can provide a few parameters similar to the standard JupyterLab, such as: Password to access the notebook Optionally you can provide a git repository to be automatically cloned in the JupyterLab (if there is a requirements.txt packages will be automatically installed with pip) Docker image to use for the notebook (see below for more details on customizing the docker image) Your git username and email to automatically configure git The DSRI will automatically create a persistent volume to store data you will put in the /home/jovyan/work folder (the folder used by the notebook interface). You can find the persistent volumes in the DSRI web UI, go to the Administrator view > Storage > Persistent Volume Claims You can also link your git repository to the project for automatic deployment see using git in JupyterLab This can also be deployed using Helm from the terminal, the steps are: helm repo add dsri https://maastrichtu-ids.github.io/dsri-helm-charts/helm repo updatehelm install freesurfer dsri/jupyterlab \\ --set serviceAccount.name=anyuid \\ --set openshiftRoute.enabled=true \\ --set image.repository=ghcr.io/maastrichtu-ids/jupyterlab \\ --set image.tag=freesurfer \\ --set storage.mountPath=/root \\ --set password=changemeoc get route --selector app.kubernetes.io/instance=freesurfer --no-headers -o=custom-columns=HOST:.spec.host Log in to the corresponding jupyter notebook and start the terminal, then enter freesurfer as a command","s":"JupyterLab with FreeSurfer","u":"/docs/neuroscience","h":"#jupyterlab-with-freesurfer","p":595},{"i":600,"t":"Generate a Dockerfile with: FreeSurfer 6.0.1 FSL 6.0.3 docker run --rm repronim/neurodocker:0.7.0 generate docker \\ --base debian:stretch --pkg-manager apt \\ --freesurfer version=6.0.1 --fsl version=6.0.3 > Dockerfile","s":"FreeSurfer and FSL","u":"/docs/neuroscience","h":"#freesurfer-and-fsl","p":595},{"i":602,"t":"Generate a Dockerfile with: FreeSurfer 6.0.1 AFNI, R and Python3 docker run --rm repronim/neurodocker:0.7.0 generate docker \\ --base debian:stretch --pkg-manager apt \\ --afni version=latest install_r=true install_r_pkgs=true install_python3=true \\ --freesurfer version=6.0.1 > Dockerfile","s":"FreeSurfer and AFNI","u":"/docs/neuroscience","h":"#freesurfer-and-afni","p":595},{"i":604,"t":"Before deploying the Dockerfile to the DSRI you can open it, and add commands to install additional package you are interested in, such as nighres or nipype. Checkout the documentation to deploy the Dockerfile on DSRI. UI with VNC Running a UI with VNC (e.g. FSLeyes) is still a work in progress. See this issue for more details.","s":"Deploy the generated Dockerfile","u":"/docs/neuroscience","h":"#deploy-the-generated-dockerfile","p":595},{"i":606,"t":"More details about using GPU with FSL: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/GPU","s":"Use the GPUs","u":"/docs/neuroscience","h":"#use-the-gpus","p":595},{"i":608,"t":"Get started Upload data","s":"Upload data","u":"/docs/openshift-load-data","h":"","p":607},{"i":610,"t":"If you are using JupyterLab or VSCode you should be able to load data to the container by simply drag and drop the files to upload in the JupyterLab/VSCode web UI. For RStudio, use the Upload file button in the RStudio web UI to upload files from your computer to the RStudio workspace. File too big If those solutions don't work due to the files size, try one of the solutions below.","s":"In RStudio, JupyterLab and VSCode","u":"/docs/openshift-load-data","h":"#in-rstudio-jupyterlab-and-vscode","p":607},{"i":612,"t":"The quickest way to upload large files or folders from a laptop or server to the DSRI is to use the oc command line interface. Install the client To install the oc client on your laptop/server, visit the Install the client page oc cp directly copy, and overwrite existing files, from a laptop or server to an Application pod on the DSRI. First get the using your application name: oc get pod --selector app=","s":"Copy large files with the terminal","u":"/docs/openshift-load-data","h":"#copy-large-files-with-the-terminal","p":607},{"i":614,"t":"Folders are uploaded recursively by default: oc cp : Use absolute path in the pod You need to provide the absolute (full) path where you want to copy it in the pod. Use your application workspace path, e.g. /home/jovyan for JupyterLab or /home/rstudio for RStudio) For example: oc cp my-folder jupyterlab-000:/home/jovyan You can also use this one-liner to automatically get the pod ID based on your app label: oc get pod --selector app= | xargs -I{} oc cp {}:","s":"Copy from local to pod","u":"/docs/openshift-load-data","h":"#copy-from-local-to-pod","p":607},{"i":616,"t":"Just do the inverse: oc cp : ","s":"Copy from pod to local","u":"/docs/openshift-load-data","h":"#copy-from-pod-to-local","p":607},{"i":618,"t":"You can download data from your SURFdrive to your pod by creating a public link to the file: Go to the file in SURFdrive you'd like to share Click share and the create public link Fill in a name for the public link (like DSRI). The name does not matter much, but it can help you keep track of the goal of the public link. Click copy to clipboard Visit link in browser and copy the direct URL displayed on that page. Use the direct URL you just copied to download the file using either wget or curl (e.g. \"wget https://surfdrive.surf.nl/files/index.php/s/5mFwyAKj4UexlJb/download\") Revoke link in the SURFdrive portal","s":"Download data from SURFdrive","u":"/docs/openshift-load-data","h":"#download-data-from-surfdrive","p":607},{"i":620,"t":"If you have a lot of large files and/or they are updated regularly, you can use rsync as it synchronizes the files if they already exist, preventing duplication and making synchronization faster. You can also see the progress with rsync which you cannot with cp. And if the upload is stopped for any reason rsync should pick it up from where it stopped (instead of restarting from scratch like oc cp does) caution Rsync does not work with symlinks (created with ln -s)","s":"Synchronizes files with oc rsync","u":"/docs/openshift-load-data","h":"#synchronizes-files-with-oc-rsync","p":607},{"i":622,"t":"oc rsync --progress : You can also use this one-liner to automatically get the pod ID based on your app label: oc get pod --selector app= | xargs -I{} oc rsync --progress {}:","s":"Sync local to pod","u":"/docs/openshift-load-data","h":"#sync-local-to-pod","p":607},{"i":624,"t":"Again, do the inverse: oc rsync --progress : ","s":"Sync pod to local","u":"/docs/openshift-load-data","h":"#sync-pod-to-local","p":607},{"i":626,"t":"You can use more options to improve the upload of large files: --compress compress file data during the transfer --delete delete files not present in source --watch Watch directory for changes and resync automatically","s":"More options","u":"/docs/openshift-load-data","h":"#more-options","p":607},{"i":629,"t":"Guides Data storage","s":"Data storage","u":"/docs/openshift-storage","h":"","p":628},{"i":631,"t":"Switch to the Administrator view Go to the Project panel Select your project Expand the Storage panel then go to the Persistent Volume Claim panel Click the button call Create Persistent Volume Claim then you will redirect the wizard of Create Persistent Volume Claim Provide the unique Persistent Volume Claim Name start with pvc- example: pvc-filebrowser Select the Access Mode RWXand Storage Size Access Mode CLI abbreviation Description ReadWriteOnce RWO The volume can be mounted as read-write by a single node. ReadOnlyMany ROX The volume can be mounted as read-only by many nodes. ReadWriteMany RWX The volume can be mounted as read-write by many nodes. Click Create info The DSRI using the Openshift Container Stroage ( OCS) which is based on CEPH offers ReadWriteOnce access mode. ReadWriteOnce (RWO) volumes cannot be mounted on multiple nodes. Use the ReadWriteMany (RWX) access mode when possible. If a node fails, the system does not allow the attached RWO volume to be mounted on a new node because it is already assigned to the failed node. If you encounter a multi-attach error message as a result, force delete the pod on a shut down or crashed node. Static persistent volumes provides a sustainable persistent storage over time for applications that need to run regular Docker images (which usually use the root user). info Some Applications such as Jupyter template automatically creates a persistent storage","s":"Create the Persistent Storage","u":"/docs/openshift-storage","h":"#create-the-persistent-storage","p":628},{"i":633,"t":"On the Topology page select your application, Click Action on your application Select the Add Storage option from the dropdown list. Select the Use Existing Claim option from the Add Storage wizard and Select the Claim Add the Mount Path Save info You can try above method if you want to connect more applications to the same storage","s":"Connect the Existing Persistent Storage","u":"/docs/openshift-storage","h":"#connect-the-existing-persistent-storage","p":628},{"i":635,"t":"Switch to the Administrator view Go to the Project panel Select your project Expand the Storage panel then go to the Persistent Volume Claim panel Click on the three dots (⋮) next to the Persistent Volume Claim you want to expand. Click on Expand PVC in the menu. Enter the size you want to expand your PVC with. Hit Expand. It can take upto 2 minutes before your PVC is expanded.","s":"Expand existing Persistent Storage","u":"/docs/openshift-storage","h":"#expand-existing-persistent-storage","p":628},{"i":637,"t":"Dynamic persistent volumes can be created automatically by an application template. Dynamic storage can also be created manually, go to Storage on the left sidebar in a project: Click Create Storage top right of the Storage page. Storage class: ceph-fs Access Mode: Single User (RWO): only the user who created this volume can read/write to this volume. Shared Access (RWX): all users with access to the projects can read/write this volume. Read Only (ROX): all users with access to the projects can read this volume.","s":"Use the dynamic storage","u":"/docs/openshift-storage","h":"#use-the-dynamic-storage","p":628},{"i":639,"t":"Disabled We currently disabled this solution by default, as it was confusing for users and would lead to data loss. When creating a pod, OpenShift will by default use ephemeral storage. It creates a volumes bind to the pod. So the volume will be deleted. It is recommended to use dynamic provisioning for a more sustainable storage solution. But ephemeral storage can be sufficient for testing.","s":"Use the ephemeral storage","u":"/docs/openshift-storage","h":"#use-the-ephemeral-storage","p":628},{"i":641,"t":"Deploy applications Install from Operators","s":"Install from Operators","u":"/docs/operators","h":"","p":640},{"i":643,"t":"Contact us Contact us on the DSRI Slack #helpdesk channel, if you want to install a new Operator on the DSRI.","s":"Install existing Operators","u":"/docs/operators","h":"#install-existing-operators","p":640},{"i":645,"t":"Install the operator-sdk tool. See the official documentation. Operators can be built using 3 different approaches: Helm: a framework to define the deployment logic based on regular kubernetes YAML, but less capabilities for complete auto-update and insights. Ansible: define the deployment logic with Ansible, provide maximum capabilities. Golang: define the deployment logic in Golang, provide maximum capabilities, but require more code.","s":"Build Operators","u":"/docs/operators","h":"#build-operators","p":640},{"i":647,"t":"Documentation: Official docs to build Operators Official docs to build Operator from Helm charts: https://sdk.operatorframework.io/docs/building-operators/helm/tutorial Official docs to build Operator with Ansible: https://sdk.operatorframework.io/docs/building-operators/ansible/quickstart RedHat Certified Operator guide Make an operator use anyuid: https://redhat-connect.gitbook.io/certified-operator-guide/what-if-ive-already-published-a-community-operator/applying-security-context-constraints Submit community Operators: https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/submitting-a-community-operator-to-operatorhub.io Examples: Deployment example: https://github.com/microcks/microcks-ansible-operator/blob/master/roles/microcks/tasks/main.yml Older OpenShift guide: https://docs.openshift.com/container-platform/4.1/applications/operator_sdk/osdk-ansible.html Simple older example with route: https://github.com/djzager/ansible-role-hello-world-k8s","s":"External resources","u":"/docs/operators","h":"#external-resources","p":640},{"i":649,"t":"Get started Prepare your project","s":"Prepare your project","u":"/docs/prepare-project-for-dsri","h":"","p":648},{"i":651,"t":"Using git is mandatory to deploy your code on the DSRI. Store your code in a git repository to keep track of changes, and make it easier to share and re-use your code outside of your computer. Platform recommendations We recommend those platforms depending on your use-case: GitHub for public repositories GitLab hosted at Maastricht University for private repositories Any other git platform, such as BitBucket or gitlab.com, is fine too.","s":"Code in a git repository","u":"/docs/prepare-project-for-dsri","h":"#code-in-a-git-repository","p":648},{"i":653,"t":"If your project is using a large amount of data that cannot be pushed to a git repository, you will need to use a persistent storage to store your data on the DSRI. See the Storage on the DSRI documentation for more details about creating a persistent storage. Here are the options to upload your data to the DSRI storage:","s":"Get your data ready","u":"/docs/prepare-project-for-dsri","h":"#get-your-data-ready","p":648},{"i":655,"t":"If the data is stored on a local machine, such as your computer: Drag and drop files from your computer to the VisualStudio Code or JupyterLab web UI, if applicable. Otherwise, use the oc cp command to copy data to your application pod. See the Load data documentation page for more information. Upload to persistent storage Make sure you upload the data to a folder mounted on a persistent storage in the pod to avoid losing your data if the pod restarts.","s":"Data is on your local machine","u":"/docs/prepare-project-for-dsri","h":"#data-is-on-your-local-machine","p":648},{"i":657,"t":"Same as for your laptop, you will need to install and use the oc cp command to copy data to your application pod. See the Load data documentation page for more information.","s":"Data is on a server","u":"/docs/prepare-project-for-dsri","h":"#data-is-on-a-server","p":648},{"i":659,"t":"In certain cases, UM servers are not accessible by default from the DSRI. This is even the case for servers that are normally publicly accessible. To be able to access these UM servers from the DSRI, we need to put in the request to open the connection. Please let us know either the servername and port you like to access, or the URL (e.g. um-vm0057.unimaas.nl on port 443 or https://gitlab.maastrichtuniversity.nl). You can reach out to us either by mail or by Slack. The procedure is described in the diagram below:","s":"Request access to internal UM servers","u":"/docs/prepare-project-for-dsri","h":"#request-access-to-internal-um-servers","p":648},{"i":661,"t":"Get started Install the client","s":"Install the client","u":"/docs/openshift-install","h":"","p":660},{"i":664,"t":"Download the oc and kubectl Command Line Interface clients: wget https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz && tar xvf oc.tar.gzsudo mv oc kubectl /usr/local/bin/","s":"On Linux","u":"/docs/openshift-install","h":"#on-linux","p":660},{"i":666,"t":"Use brew: brew install openshift-cli Or manually download the program and add it to your path: Download https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/macosx/oc.tar.gz Unzip the archive Move the oc binary to a directory on your PATH. To check your PATH, open a terminal and execute the following command: echo $PATH","s":"On Mac","u":"/docs/openshift-install","h":"#on-mac","p":660},{"i":668,"t":"Create a folder for OpenShift in Program Files: C:\\Program Files (x86)\\OpenShift Click here to download the oc tool .zip file, and move it to C:\\Program Files (x86)\\OpenShift. Extract the .zip file. Next set the system PATH environment variables for the directory containing the oc.exe file, which now resides in your newly created OpenShift folder inside of C:\\Program Files (x86)\\OpenShift Open the Control Panel, and click on System Click on Advance system settings on the left or open the Advance tab of System Properties. Click the button labeled Environment Variables... at the bottom. Look for the option Path in either the User variables section (for the current user) or the System variables section (for all users on the system). This makes it easy to access the oc command line interface by simply opening up the PowerShell and typing in the oc command, e.g.: oc version Official documentation See the official documentation to install the client if needed.","s":"On Windows","u":"/docs/openshift-install","h":"#on-windows","p":660},{"i":670,"t":"To use the oc Command Line Interface, you will need to authenticate to the DSRI in your terminal: PASSWORD NOT SUPPORTED Authentication to the oc Command Line Interface using your password is not supported. oc login --token= The token is provided by the Web UI: Go to the DSRI web UI. Click on the Copy Login Command button (in the top right of the page). Paste the copied command in your terminal, and execute it to login with oc 🔑 Login command The command should look like this: oc login https://api.dsri2.unimaas.nl:6443 --token=$GENERATED_TOKEN","s":"Login in the terminal with oc","u":"/docs/openshift-install","h":"#login-in-the-terminal-with-oc","p":660},{"i":672,"t":"Miscellaneous PyTorch Profiling","s":"PyTorch Profiling","u":"/docs/profile-pytorch-code","h":"","p":671},{"i":674,"t":"According to wikipedia: \"Profiling is a form of dynamic program analysis that measures, for example, the space (memory) or time complexity of a program, the usage of particular instructions, or the frequency and duration of function calls. Most commonly, profiling information serves to aid program optimization, and more specifically, performance engineering.\"","s":"What is profiling?","u":"/docs/profile-pytorch-code","h":"#what-is-profiling","p":671},{"i":676,"t":"You may know that training large models like GPT-3 takes several million dollars source and a few hundred MWh source. If the engineers that trained these models did not spend time on optimization, it might have been several million dollars and hunderds of MWh more. Sure, the model you'd like to train is probably not quite as big. But maybe you want to train it 10000 times, because you want to do hyperparameter optimization. And even if you only train it once, it may take quite a bit of compute resources, i.e. money and energy.","s":"Why should I care about profiling?","u":"/docs/profile-pytorch-code","h":"#why-should-i-care-about-profiling","p":671},{"i":678,"t":"Well, you should always care if your code runs efficiently, but there's different levels of caring. From personal experience: if I know I'm going to run a code only once, for a few days, on a single GPU, I'll probably not create a full profile. What I would do is inspect my GPU and CPU utilization during my runs, just to see if it is somewhat efficient, and if I didn't make any obvious mistakes (e.g. accidentally not using the GPU, even if I have one available). If I know that I'll run my code on multiple GPUs, for multiple days, (potentially) on multiple nodes, and/or I need to run it multiple times, I know that my resource footprint is going to be large, and it's worth spending some time and effort to optimize the code. That's when I'll create a profile. The good part is: the more often you do it, the quicker and more adapt you become at it.","s":"When should I care about profiling?","u":"/docs/profile-pytorch-code","h":"#when-should-i-care-about-profiling","p":671},{"i":680,"t":"We can assist you with analyzing the bottleneck/s in your deep learning pipeline and recommend the improvments to speed up your pipeline.","s":"How DSRI team can help you?","u":"/docs/profile-pytorch-code","h":"#how-dsri-team-can-help-you","p":671},{"i":682,"t":"This documentation is taken from the Surf's PyTorch profiling wiki (https://servicedesk.surf.nl/wiki/display/WIKI/PyTorch+Profiling) Tutorial on PyTorch profiling can be found here: (https://github.com/sara-nl/PraceHPML2022/blob/master/notebooks/PyTorch_profiling/PyTorch_profiling.ipynb)","s":"External Resources and references","u":"/docs/profile-pytorch-code","h":"#external-resources-and-references","p":671},{"i":684,"t":"Guides Create a new Project","s":"Create a new Project","u":"/docs/project-management","h":"","p":683},{"i":686,"t":"Avoid creating multiple projects Try to avoid to create multiple projects for nothing please. Be responsible and delete applications you are not using anymore in your project to free resources, instead of creating a new project with a different number at the end. It is also easier to connect your different applications containers and storages when you create them in the same project. You can create a project using the Developer perspective, as follows: Click the Project drop-down menu to see a list of all available projects. Select Create Project. In the Create Project dialog box, enter a unique name in the Name field. Use a short and meaningful name for your project as the project identifier is unique across all projects, such as workspace-yourname or ml-covid-pathways Add the Display Name DSR Workshopand Description DSRI Community Workshop Projectsdetails for the project. Click Create. Use the left navigation panel to navigate to the Project view and see the dashboard for your project. Optional: Use the Project drop-down menu at the top of the screen and select all projects to list all of the projects in your cluster. Use the Details tab to see the project details. If you have adequate permissions for a project, you can use the Project Access tab to provide or revoke administrator, edit, and view privileges for the project.","s":"Create a project using the web UI","u":"/docs/project-management","h":"#create-a-project-using-the-web-ui","p":683},{"i":688,"t":"You need to be logged in to the DSRI and copy the login command. Run oc new-project --description=\"\" --display-name=\"\" Example oc new-project dsri-workshop --description=\"DSRI Workshop\" \\ --display-name=\"DSRI Community Workshop Projects\" Reuse your project Only create new projects when it is necessary (for a new project). You can easily clean up your current project instead of creating a new one every time you want to try something.","s":"Create a project using the CLI","u":"/docs/project-management","h":"#create-a-project-using-the-cli","p":683},{"i":690,"t":"You can use the Project view in the Developer perspective to grant or revoke access permissions to your project. To add users to your project and provide Admin, Edit, or View access to them: In the Developer perspective, navigate to the Project view. In the Project page, select the Project Access tab. Click Add Access to add a new row of permissions to the default ones. Enter the user name, click the Select a role drop-down list, and select an appropriate role. Click Save to add the new permissions. You can also use: The Select a role drop-down list, to modify the access permissions of an existing user. The Remove Access icon, to completely remove the access permissions of an existing user to the project. info Advanced role-based access control is managed in the Roles and Roles Binding views in the Administrator perspective","s":"Access permissions for developers to your project","u":"/docs/project-management","h":"#access-permissions-for-developers-to-your-project","p":683},{"i":692,"t":"Navigate to Home → Projects. Locate the project that you want to delete from the list of projects. On the far right side of the project listing, select Delete Project from the Options menu . When the Delete Project pane opens, enter the name of the project that you want to delete in the field. Click Delete.","s":"Delete a project using the web UI","u":"/docs/project-management","h":"#delete-a-project-using-the-web-ui","p":683},{"i":694,"t":"Delete Project When you delete a project, the server updates the project status to Terminating from Active. Then, the server clears all content from a project that is in the Terminating state before finally removing the project. While a project is in Terminating status, you cannot add new content to the project. Projects can be deleted from the CLI or the web console. You need to be logged in to the DSRI and copy the login command. Run oc delete project Example oc delete project dsri-workshop","s":"Delete a project using the CLI","u":"/docs/project-management","h":"#delete-a-project-using-the-cli","p":683},{"i":696,"t":"On this page","s":"Working with sensible data","u":"/docs/sensible-data","h":"","p":695},{"i":698,"t":"Since DSRI can only be accessed when on the physical UM network or using the UM VPN, deployed services will not be available on the public Internet 🔒 All activities must be legal in basis. You must closely examine and abide by the terms and conditions of any data, software, or web service that you use as part of your work 📜","s":"Reminder: DSRI restrictions","u":"/docs/sensible-data","h":"#reminder-dsri-restrictions","p":695},{"i":700,"t":"The DSRI administration disclaims all responsibility in the misuse of sensible data processed on the DSRI We can guarantee you that only you, and 4 administrators are able to access the data (you might need to see with the data owner if that is not a problem) Feel to ask us more details","s":"Disclaimer","u":"/docs/sensible-data","h":"#disclaimer","p":695},{"i":702,"t":"Get started Start your workspace","s":"Start your workspace","u":"/docs/start-workspace","h":"","p":701},{"i":704,"t":"Anything running in DSRI needs to be running in a docker container. Docker containers are namespaces that share the kernel on a linux system, you can see them as a clean minimalist Linux computers with only what you need to run your programs installed. This allows you to completely control the environment where your code runs, and avoid conflicts. When running experiments we can start from existing images that have been already published for popular data science applications with a web interface. You can use, for example, JupyterLab when running python, RStudio when running R, or VisualStudio Code if you prefer. Once you access a running container, you can install anything you need like if it was a linux/ubuntu computer (most of them runs with admin privileges), and run anything via the notebook/RStudio/VSCode interface, or the terminal.","s":"Introduction to containers","u":"/docs/start-workspace","h":"#introduction-to-containers","p":701},{"i":706,"t":"First step to get your code running on the DSRI is to pick the base interface you want to use to access your workspace on the DSRI. We prepared generic Docker images for data science workspaces with your favorite web UI pre-installed to easily deploy your workspace. So you just need to choose your favorite workspace, start the container, access it, add your code, and install your dependencies. Login to the DSRI dashboard Select your project, or create one with a meaningful short name representing your project, e.g. workspace-yourname Go to the +Add page, and select to add From Developer Catalog => All services Search for templates corresponding to the application you want to deploy among the one described below (make sure the filter for templates is properly checked). JupyterLab: Access and run your code using the popular Jupyter notebooks, with kernel for python, java, R, julia. It also provides a good web interface to access the terminal, upload and browse the files. VisualStudio Code: Your daily IDE, but in your browser, running on the DSRI. RStudio: R users favorite's. The terminal: For people who are used to the terminal and just want to run scripts, it provides smaller and more stable images, which makes installation and deployment easier. You can use the Ubuntu template to start a basic ubuntu image and access it from the terminal. Any web interface: You can easily run and access most programs with a web interface on the DSRI. You can use the template Custom workspace if your application is exposed on port 8888. Otherwise visit the page Anatomy of a DSRI application for more details. Desktop interface: there is the possibility to start a container as a Linux operating system with a graphical desktop interface. It can be useful to deploy software like Matlab, but the setup can be a bit more complex. You will get an Ubuntu computer with a basic Desktop interface, running on the DSRI, that you can access directly in your web browser. The desktop interface is accessed through a web application by using noVNC, which exposes the VNC connection without needing a VNC client. More applications You can also find more documentation on the different applications that can be deployed from the DSRI under Deploy applications in the menu on the left.","s":"Choose your interface","u":"/docs/start-workspace","h":"#choose-your-interface","p":701},{"i":707,"t":"Once you chose your favorite way to run your experiments, you can click on the application you want to use for your workspace. Checkout the description to learn more details about the application that will be deployed. Then click on Instantiate Template, and fill the parameters, such as the password to access the web UI. Note that the application name needs to be unique in the project. Finally click on the Create button. You should see your application in your project dashboard, it can take a few seconds to a few minutes to pull the docker image and start the application. Once the application has started you will be able to access it by clicking on its circle, then click the Route, that has been automatically generated for the web interface, in the Resources tab. Check the workshop For a more detailed tutorial, you can follow the workshop to start Data Science applications on the DSRI","s":"Start your workspace","u":"/docs/start-workspace","h":"#start-your-workspace","p":701},{"i":709,"t":"We recommend you to use git to clone your project code in your workspace, as it helps sharing and managing the evolution of your project. It will be preinstalled in most images, otherwise you can install it easily with apt-get install git With web interface like JupyterLab, VisualStudio Code and Rstudio you can easily upload small and medium size files directly through the UI with a drag and drop. Otherwise you can use the terminal, install the oc client, and use the oc cp or oc rsync commands to upload large files to your workspace on the DSRI. See the Upload data page for more details.","s":"Upload your code and data","u":"/docs/start-workspace","h":"#upload-your-code-and-data","p":701},{"i":711,"t":"Once the workspace is started, you can install the different dependencies you need to run your experiments. It is recommended to save all the commands you used to install the different requirements in a script (e.g. install.sh). This will insure you can reinstall the environment easily and faithfully if the container is restarted. You can also use them to create a Docker image with everything prepared for your application. Most containers for science are based on debian/ubuntu, so you can install new packages with apt-get: apt-get updateapt-get install -y build-essentials wget curl","s":"Install your dependencies","u":"/docs/start-workspace","h":"#install-your-dependencies","p":701},{"i":713,"t":"You can use your web interface to run your code as you like to do: notebooks, rstudio, execute via VSCode Note that for jobs which are running for a long time the web UI is not always the best solution, e.g. Jupyter notebooks can be quite instable when running a 30 min codeblock. A quick solution for that is to run your code in scripts, using the bash terminal. You can use the nohup prefix, and & suffix to run your script in the background, so that you can even disconnect, and come back later to check the results and logs. For example with a python script, you would do: nohup python my_script.py & The script will run in the background, and all terminal output will be stored in the file nohup.out You can also check if the process is currently running by typing ps aux or top You can kill the process by getting the process ID (PID) using the previous commands, and then: kill -9 PID","s":"Run your code","u":"/docs/start-workspace","h":"#run-your-code","p":701},{"i":715,"t":"When you are not using your application anymore you can stop the pod. If you are using a Dynamic or Persistent storage you can restart the pod and continue working with all your data in the same state as you left it. Do not waste resources Please think of stopping applications you are not using to avoid consuming unnecessary resources. On the Topology page click on the down arrow ⬇️ next to the number of pods deployed. You can then restart the pod by clicking the up arrow ⬆️ Note that starting more than 1 pod will not increase the amount of resources you have access to, most of the time it will only waste resources and might ends up in weird behavior on your side. The web UI will randomly assign you to 1 of the pod started when you access it. This only works for clusters with multiple workers, such as Apache Flink and Spark. Or if you connect directly to each pod with the terminal to run different processes.","s":"Stop your application","u":"/docs/start-workspace","h":"#stop-your-application","p":701},{"i":717,"t":"When you try to access your workspace and you encounter the page below, usually this indicates that your pod is not running. For example, this will be the case if you stopped your pod, or if there was maintenance on the cluster. To start the pod, go to the Topology page, and click on the up arrow ⬆️ next to the number of pods deployed. Make sure you scale it to 1. Scaling it to more than 1 will not increase the amount of resources you have access to, most of the time it will only waste resources and causes weird behavior on your side. Do not waste resources Please only scale up resources you're using, and scale down when you're not using them anymore. Consuming resources consumes unnecessary power and might prevent other users from using the DSRI.","s":"Start your application","u":"/docs/start-workspace","h":"#start-your-application","p":701},{"i":719,"t":"Once you have tested your workspace and you know how to set it up it can be helpful to define a Dockerfile to build and publish a Docker image with everything directly installed (instead of installing your requirements after starting a generic workspace) Start from an existing generic Docker image, depending on the base technologies you need, such as Debian, Ubuntu, Python, JupyterLab, VisualStudio Code, RStudio... Add your source code in the Docker image using ADD . . or COPY . . Install dependencies (e.g. RUN apt-get install gfortran) Define which command to run when starting the container (e.g. ENTRYPOINT[\"jupyter\", \"lab\"]) Here is a simple example Dockerfile for a python application: # The base image to start from, choose the one with everything you need installedFROM python:3.8# Change the user and working directory to make sure we are using rootUSER rootWORKDIR /root# Install additional packagesRUN apt-get update && \\ apt-get install build-essentials# This line will copy all files and folder that are in the same folder as the Dockerfile (usually the code you want to run in the container)ADD . . # This line will install all the python packages described in the requirements.txt of your source codeRUN pip install -r requirements.txt && \\ pip install notebook jupyterlab# Command to run when the container is started, here it starts JupyterLab as a serviceENTRYPOINT [ \"jupyter\", \"lab\" ] Here are some examples of Dockerfile for various type of web applications: Custom JupyterLab based on the official jupyter/docker-stacks Custom RStudio VisualStudio Code server Python web app See the guide to Publish a Docker image for more details on this topic.","s":"Optional: define a docker image","u":"/docs/start-workspace","h":"#optional-define-a-docker-image","p":701},{"i":721,"t":"Miscellaneous Tensorflow Optimization","s":"Tensorflow Optimization","u":"/docs/speeding-tensorflow-dl","h":"","p":720},{"i":723,"t":"The amount of resources that you have is not nearly as important as using them to their maximum potential. It’s all about doing more with less.In this write up, we discuss optimizations related to data preparation, data reading, data augmentation,training, and inference.","s":"🔶 Speeding up Tensorflow based deep learning pipelines","u":"/docs/speeding-tensorflow-dl","h":"#-speeding-up-tensorflow-based-deep-learning-pipelines","p":720},{"i":725,"t":"Let’s look at each area of the deep learning pipeline step by step, including data preparation, data reading, data augmentation, training, and, finally, inference.","s":"A possible checklist for speeding up your deep learning pipeline in Tensorflow?","u":"/docs/speeding-tensorflow-dl","h":"#a-possible-checklist-for-speeding-up-your-deep-learning-pipeline-in-tensorflow","p":720},{"i":727,"t":"1) Store as TFRecords 2) Reduce Size of Input Data 3) Use TensorFlow Datasets","s":"Data Preparation","u":"/docs/speeding-tensorflow-dl","h":"#data-preparation","p":720},{"i":729,"t":"1) Use tf.data 2) Prefetch Data 3) Parallelize CPU Processing 4) Parallelize I/O and Processing 5) Enable Nondeterministic Ordering 6) Cache Data 7) Turn on Experimental Optimizations 8) Autotune Parameter Values","s":"Data Reading","u":"/docs/speeding-tensorflow-dl","h":"#data-reading","p":720},{"i":731,"t":"1) Use GPU for Augmentation","s":"Data Augmentation","u":"/docs/speeding-tensorflow-dl","h":"#data-augmentation","p":720},{"i":733,"t":"1) Use Automatic Mixed Precision 2) Use Larger Batch Size 3) Use Multiples of Eight 4) Find the Optimal Learning Rate 5) Use tf.function 6) Overtrain, and Then Generalize 6a) Use progressive sampling 6b) Use progressive augmentation 6c) Use progressive resizing” 7) Install an Optimized Stack for the Hardware 8) Optimize the Number of Parallel CPU Threads 9) Use Better Hardware 10) Distribute Training 11) Examine Industry Benchmarks","s":"Training","u":"/docs/speeding-tensorflow-dl","h":"#training","p":720},{"i":735,"t":"1) Use an Efficient Model 2) Quantize the Model 3) Prune the Model 4) Use Fused Operations 5) Enable GPU Persistence","s":"Inference","u":"/docs/speeding-tensorflow-dl","h":"#inference","p":720},{"i":737,"t":"We can assist you with analyzing the bottleneck/s in your deep learning pipeline and recommend the improvments to speed up your pipeline.","s":"How DSRI team can help you?","u":"/docs/speeding-tensorflow-dl","h":"#how-dsri-team-can-help-you","p":720},{"i":739,"t":"This documentation is adopted from the \"Practical Deep Learning for Cloud, Mobile, and Edge by Koul etl (publish by O’Reilly)","s":"External Resources and references","u":"/docs/speeding-tensorflow-dl","h":"#external-resources-and-references","p":720},{"i":741,"t":"Guides Libraries for Machine Learning","s":"Libraries for Machine Learning","u":"/docs/tools-machine-learning","h":"","p":740},{"i":743,"t":"See this vulgarisation article explaining the different principles of Machine Learning. The Azure Machine Learning Algorithm Cheat Sheet helps you choose the right algorithm for a predictive analytics model. This repository provides tutorials and examples to a vast number of Machine / Deep Learning library.","s":"Machine Learning libraries","u":"/docs/tools-machine-learning","h":"#machine-learning-libraries","p":740},{"i":745,"t":"https://scikit-learn.org/stable/","s":"SciKit Learn","u":"/docs/tools-machine-learning","h":"#scikit-learn","p":740},{"i":747,"t":"See this article for more details about modern Deep Learning libraries.","s":"Deep Learning libraries","u":"/docs/tools-machine-learning","h":"#deep-learning-libraries","p":740},{"i":749,"t":"Python library developed by Google. https://www.tensorflow.org/","s":"Tensorflow","u":"/docs/tools-machine-learning","h":"#tensorflow","p":740},{"i":751,"t":"Python library developed by Facebook. https://pytorch.org/","s":"PyTorch","u":"/docs/tools-machine-learning","h":"#pytorch","p":740},{"i":753,"t":"Java library developed by Amazon. See the introduction article. https://djl.ai/","s":"Deep Java Library","u":"/docs/tools-machine-learning","h":"#deep-java-library","p":740},{"i":755,"t":"Layer on top of Tensorflow. https://sonnet.readthedocs.io/en/latest/","s":"Sonnet","u":"/docs/tools-machine-learning","h":"#sonnet","p":740},{"i":757,"t":"Python library. Layer on top of Tensorflow, CNTK, Theano. https://keras.io/","s":"Keras","u":"/docs/tools-machine-learning","h":"#keras","p":740},{"i":759,"t":"Layer on top of Tensorflow, PyTorch, SciKit Learn developed by Netflix. https://metaflow.org/","s":"Metaflow","u":"/docs/tools-machine-learning","h":"#metaflow","p":740},{"i":761,"t":"Miscellaneous SURF Offerings","s":"SURF Offerings","u":"/docs/surf-offerings","h":"","p":760},{"i":764,"t":"SURF is the ICT cooperative for Dutch education and research institutions. As a collaborative organization, SURF’s members—its owners—work together to deliver top-tier digital services, address complex innovation challenges, and exchange valuable knowledge. Computing and storage infrastructure are essential for cutting-edge research. SURF supports researchers with a diverse range of computing and storage services. But before diving into these services, let’s briefly explore what a cluster computer is.","s":"What is SURF?","u":"/docs/surf-offerings","h":"#what-is-surf","p":760},{"i":766,"t":"A cluster computer is essentially a group of interconnected computers, called nodes, working together as a unified system. Each node has its own CPU, memory, and disk space, along with access to a shared file system. Imagine these nodes connected by network cables, like those in your home or office. Cluster computers are designed for high-performance workloads, allowing users to run hundreds of computational tasks simultaneously.","s":"What is a cluster computer?","u":"/docs/surf-offerings","h":"#what-is-a-cluster-computer","p":760},{"i":768,"t":"Some of the computing and storage solution provided by SURF are: 1) Spider Cluster - High-performance Data Processing (DP) platform: Spider is a versatile DP platform aimed at processing large structured data sets. Spider is an in house compute cluster built on top of SURF’s in-house elastic Cloud. This allows for scalable processing of many terabytes or even petabytes of data, utilizing many hundreds of cores simultaneously, in exceedingly short timespans. Superb network throughput ensures connectivity to external data storage systems. Spider is used for large scale multi-year data intensive projects, for users to actively process their data, such are large static data sets or continuously growing data sets. Examples include genomics data, astronomic telescope data, physics detector data and satellite earth observations. 2) Snellius Cluster - the Dutch National supercomputer: Snellius is the Dutch National supercomputer hosted at SURF. The system facilitates scientific research carried out in many Universities, independent research institutes, governmental organizations, and private companies in the Netherlands. Snellius is a cluster of heterogeneous nodes built by Lenovo, containing predominantly AMD technology, with capabilities for high performance computing (parallel, symmetric multiprocessing). The system also has several system-specific storage resources, that are geared towards supporting the various types of computing. 3) SURF Research Cloud (SRC): SURF Research Cloud is a service to facilitate scientists’ collaborative work. The central idea in SRC is collaborative workspace. A workspcae translates directly to a \"Virtual Machine\". These hosted workspaces aka virtual machines can be used for conducting research and development individually or together with your team/project members. 4) Research Data Storage Services: 4.1) Data Archive : The SURF Data Archive allows users to safely archive up to petabytes of valuable research data to ensure the long term accessibility and reproducibility of their work. The Data Archive is also connected to SURF’s compute infrastructure, via a fast network connection, allowing for the seamless depositing and retrieval of data. 4.2) Data Repository : The Data Repository service is a web-based data publication and archiving platform that allows researchers to store, annotate and publish research data to ensure long-term preservation and availability of their datasets. All published datasets get their own DOI and Handle, while every file gets its own independent Handle to allow persistent reference on all levels. 4.3) dCache : dCache is scalable storage system. It contains more than 50 petabytes of scientific data, accessible through several authentication methods and protocols. It consists of magnetic tape storage and hard disk storage and both are addressed by a common file system. 4.4) Object Store : Object storage is ideal for storing unstructured data that can grow without bound. Object storage does not have a directory-type structure like a normal file system has but it organises its data in so-called containers that contain objects. There is no tree-like structure with files and directories. There are only containers with objects in them. SURF Object Store service is based on Ceph RGW and provides access using the S3 protocol, which is the defacto standard for addressing object storage.","s":"Different types of Services provided by SURF:","u":"/docs/surf-offerings","h":"#different-types-of-services-provided-by-surf","p":760},{"i":770,"t":"The DSRI team is here to help you navigate SURF’s services, including: 1) Grant Applications: We assist researchers in applying for SURF grants. For instance: * Small applications: Up to 1 million System Billing Units (SBU) on Snellius and/or 100 TB of dCache storage.(https://www.surf.nl/en/small-compute-applications-nwo)* Large applications: Customized resource allocations based on project needs. 2) Resource Estimation: Unsure about your computing and storage requirements? We help estimate your needs in terms of SURF’s billing units. 3) Use Case Analysis: We assess whether your research project is a good fit for SURF’s services.","s":"How to Get Started with SURF Services?","u":"/docs/surf-offerings","h":"#how-to-get-started-with-surf-services","p":760},{"i":772,"t":"SURF: https://www.surf.nl/en Deep Learning Tutorials by UvA: https://uvadlc-notebooks.readthedocs.io/en/latest/index.html","s":"External Resources and references","u":"/docs/surf-offerings","h":"#external-resources-and-references","p":760},{"i":774,"t":"Guides Workflows Deploy Airflow","s":"Deploy Airflow","u":"/docs/workflows-airflow","h":"","p":773},{"i":776,"t":"You will need to have Helm installed on your computer to deploy a Helm chart, see the Helm docs for more details. Install the Helm chart to be able to deploy Airflow on the DSRI: helm repo add apache-airflow https://airflow.apache.orghelm repo update","s":"Install the chart","u":"/docs/workflows-airflow","h":"#install-the-chart","p":773},{"i":777,"t":"You can quickly deploy Airflow on the DSRI, with DAGs automatically synchronized with your Git repository. We use a values.yml file with all default parameters pre-defined for the DSRI, so you just need to edit the password and git repository configuration in this command, and run it: helm install airflow apache-airflow/airflow \\ -f https://raw.githubusercontent.com/MaastrichtU-IDS/dsri-documentation/master/applications/airflow/values.yml \\ --set webserver.defaultUser.password=yourpassword \\ --set dags.gitSync.repo=https://github.com/bio2kg/bio2kg-etl.git \\ --set dags.gitSync.branch=main \\ --set dags.gitSync.subPath=workflows/dags info If you need to do more configuration you can download the a values.yml file, edit it directly to your settings and use this file locally with -f values.yml A few seconds after Airflow started to install, you will need to fix the postgresql deployment in a different terminal window (unfortunately setting the serviceAccount.name of the sub chart postgresql don't work, even if it should be possible according to the official helm docs). Run this command to fix postgresql: oc patch statefulset/airflow-postgresql --patch '{\"spec\":{\"template\":{\"spec\": {\"serviceAccountName\": \"anyuid\"}}}}' Once Airflow finished to deploy, you can access its web interface temporarily by forwarding the webserver on your machine at http://localhost:8080 oc port-forward svc/airflow-webserver 8080:8080 Or permanently expose the interface on a URL accessible when logged to the UM VPN, with HTTPS enabled: oc expose svc/airflow-webserveroc patch route/airflow-webserver --patch '{\"spec\":{\"tls\": {\"termination\": \"edge\", \"insecureEdgeTerminationPolicy\": \"Redirect\"}}}' Finally, get the route to the Airflow web interface, or access it via the DSRI web UI: oc get routes","s":"Deploy Airflow","u":"/docs/workflows-airflow","h":"#deploy-airflow","p":773},{"i":779,"t":"You can find example DAGs for bash operator, python operator and Kubernetes pod operator here. Here an example of a DAG using the Kubernetes pod operator to run tasks as pods, you will need to change the namespace parameter to your DSRI project where Airflow is deployed: from airflow import DAGfrom datetime import datetime, timedeltafrom airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperatorfrom airflow.operators.dummy_operator import DummyOperatordefault_args = { 'owner': 'airflow', 'depends_on_past': False, 'start_date': datetime.utcnow(), 'email': ['airflow@example.com'], 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(minutes=5)}dag = DAG( 'kubernetes_pod_operator', default_args=default_args, schedule_interval=None # schedule_interval=timedelta(minutes=10))start = DummyOperator(task_id='run_this_first', dag=dag)passing = KubernetesPodOperator( namespace='CHANGEME', image=\"python:3.6\", cmds=[\"python\",\"-c\"], arguments=[\"print('hello world')\"], labels={\"app\": \"airflow\"}, name=\"passing-test\", task_id=\"passing-task\", get_logs=True, dag=dag)passing.set_upstream(start)","s":"Example workflows","u":"/docs/workflows-airflow","h":"#example-workflows","p":773},{"i":781,"t":"helm uninstall airflow","s":"Delete the chart","u":"/docs/workflows-airflow","h":"#delete-the-chart","p":773},{"i":783,"t":"Here are a few links for more details on the official Airflow Helm chart: Helm chart docs Helm chart source code Helm chart parameters Other ways to deploy Airflow on OpenShift: Community Helm chart GitHub repo Airflow template for OpenShift","s":"See also","u":"/docs/workflows-airflow","h":"#see-also","p":773},{"i":785,"t":"Guides Workflows Introduction to workflows","s":"Introduction to workflows","u":"/docs/workflows-introduction","h":"","p":784},{"i":787,"t":"Multiple technologies are available to run workflows on OpenShift/Kubernetes clusters. Each has its strengths and weaknesses in different areas. Use-case dependant The technology to use needs to be chosen depending on your use-case.","s":"Introduction","u":"/docs/workflows-introduction","h":"#introduction","p":784},{"i":789,"t":"Those solutions can easily be deployed on the DSRI. Let","s":"Current solutions on the DSRI","u":"/docs/workflows-introduction","h":"#current-solutions-on-the-dsri","p":784},{"i":791,"t":"GitHub Actions allows you to define automatically containerized workflows through a simple YAML file hosted in your GitHub repository. See the page about GitHub Actions runners for more details, and to deploy runners on the DSRI.","s":"GitHub Actions workflows","u":"/docs/workflows-introduction","h":"#github-actions-workflows","p":784},{"i":793,"t":"Airflow is a platform to programmatically author, schedule and monitor workflows, aka. DAGs (directed acyclic graphs). See the page about Airflow for more details, and to deploy Airflow on the DSRI.","s":"Apache Airflow","u":"/docs/workflows-introduction","h":"#apache-airflow","p":784},{"i":795,"t":"Argo is a container native workflow engine for Kubernetes supporting both DAG and step based workflows. Workflows easy to define using Kubernetes-like YAML files. Easy to define if your workflow is composed of Docker containers to run with arguments. Contact us Contact us if you want to run Argo workflow on the DSRI","s":"Argo","u":"/docs/workflows-introduction","h":"#argo","p":784},{"i":797,"t":"Let us know if you are interested in deploying, and using, any of those workflows on the DSRI.","s":"More options","u":"/docs/workflows-introduction","h":"#more-options","p":784},{"i":799,"t":"Optimized for Tensorflow workflows on Kubernetes. Pipelines written in Python.","s":"Kubeflow","u":"/docs/workflows-introduction","h":"#kubeflow","p":784},{"i":801,"t":"Define, schedule and run workflows. Can be deployed with OpenDataHub, see also this deployment for OpenShift. See also: Airflow on Kubernetes blog, and Kubernetes in Airflow documentation.","s":"Apache Airflow","u":"/docs/workflows-introduction","h":"#apache-airflow-1","p":784},{"i":803,"t":"Run batch pipelines on Kubernetes with Volcano. More a scheduler than a workflow engine. Volcano can be used to run Spark, Kubeflow or KubeGene workflows.","s":"Volcano","u":"/docs/workflows-introduction","h":"#volcano","p":784},{"i":805,"t":"Nextflow has been developed by the genomic research scientific community and is built to run bioinformatics pipeline. Define your workflow in a Bash script fashion, providing input, output and the command to run. Without the need to create and use Docker container for Conda pipelines.","s":"Nextflow","u":"/docs/workflows-introduction","h":"#nextflow","p":784},{"i":807,"t":"Developed by the genomic research scientific community. Good support for provenance description (export as RDF). Support on OpenShift still in development Apache Airflow workflows-cwl Propose a GUI to build the workflows: Rabix Composer","s":"CWL","u":"/docs/workflows-introduction","h":"#cwl","p":784},{"i":809,"t":"KubeGene is a turn-key genome sequencing workflow management framework. See the Workflow example, and how to define a tool.","s":"KubeGene","u":"/docs/workflows-introduction","h":"#kubegene","p":784},{"i":811,"t":"Open-source platform for rapidly deploying machine learning models on Kubernetes. Manage, serve and scale models built in any framework on Kubernetes. Contact us Feel free to contact us if you have any questions about running workflows on DSRI or to request the support of a new technology.","s":"Seldon","u":"/docs/workflows-introduction","h":"#seldon","p":784},{"i":813,"t":"Guides Workflows Run Argo workflows","s":"Run Argo workflows","u":"/docs/workflows-argo","h":"","p":812},{"i":815,"t":"Argo 🦑 is a container native workflow engine for Kubernetes supporting both DAG and step based workflows. Download and install the Argo client on your computer to start workflows on the DSRI.","s":"Install the argo client","u":"/docs/workflows-argo","h":"#install-the-argo-client","p":812},{"i":817,"t":"sudo curl -L -o /usr/local/bin/argo https://github.com/argoproj/argo/releases/download/v2.4.2/argo-linux-amd64sudo chmod +x /usr/local/bin/argo","s":"On Ubuntu","u":"/docs/workflows-argo","h":"#on-ubuntu","p":812},{"i":819,"t":"brew install argoproj/tap/argo","s":"On MacOS","u":"/docs/workflows-argo","h":"#on-macos","p":812},{"i":821,"t":"Get Argo executable version 2.4.2 from Argo Releases on GitHub. See official Argo documentation.","s":"On Windows","u":"/docs/workflows-argo","h":"#on-windows","p":812},{"i":823,"t":"Run Hello world workflow to test if Argo has been properly installed. And take a look at the examples provided in Argo documentation to discover how to use the different features available. argo submit --watch https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml Logged in You will need to have the oc client installed and be logged in with oc login, see the install documentation page.","s":"Test Argo","u":"/docs/workflows-argo","h":"#test-argo","p":812},{"i":826,"t":"Deploy the Argo Helm chart. Install and use helm Add the Helm charts repository: helm repo add argo https://argoproj.github.io/argo-helm Install chart: helm install my-argo argo/argo --version 0.15.2","s":"Argo workflows with Helm","u":"/docs/workflows-argo","h":"#argo-workflows-with-helm","p":812},{"i":828,"t":"Ask on the DSRI Slack #helpdesk channel to have the ArgoCD Operator installed in your project.","s":"ArgoCD Operator","u":"/docs/workflows-argo","h":"#argocd-operator","p":812},{"i":830,"t":"On Ubuntu​ sudo rm /usr/local/bin/argo You can now reinstall a newer version of Argo.","s":"Uninstall argo","u":"/docs/workflows-argo","h":"#uninstall-argo","p":812},{"i":832,"t":"We will use examples from the MaastrichtU-IDS/d2s-core project.","s":"Run workflows to convert structured data to RDF","u":"/docs/workflows-argo","h":"#run-workflows-to-convert-structured-data-to-rdf","p":812},{"i":834,"t":"git clone --recursive https://github.com/MaastrichtU-IDS/d2s-project-template.gitcd d2s-project-template Authenticate to the OpenShift cluster using oc login .","s":"Clone the repository","u":"/docs/workflows-argo","h":"#clone-the-repository","p":812},{"i":836,"t":"Steps-based workflow for XML files, see the example workflow YAML file on GitHub. argo submit d2s-core/argo/workflows/d2s-workflow-transform-xml.yaml \\ -f support/config/config-transform-xml-drugbank.yml Provide config files Config files can be provided using the -f arguments, but are not necessary. DAG workflow for XML files, see the YAML file on GitHub. argo submit d2s-core/argo/workflows/d2s-workflow-transform-xml-dag.yaml \\ -f support/config/config-transform-xml-drugbank.yml","s":"Workflow to convert XML files to RDF","u":"/docs/workflows-argo","h":"#workflow-to-convert-xml-files-to-rdf","p":812},{"i":838,"t":"Steps-based workflow for CSV files argo submit d2s-core/argo/workflows/d2s-workflow-transform-csv.yaml \\ -f support/config/config-transform-csv-stitch.yml DAG workflow for CSV files argo submit d2s-core/argo/workflows/d2s-workflow-transform-csv-dag.yaml \\ -f support/config/config-transform-csv-stitch.yml Solve issue Try this to solve issue related to steps services IP: {{steps.nginx-server.pod-ip}}","s":"Workflow to convert CSV files to RDF","u":"/docs/workflows-argo","h":"#workflow-to-convert-csv-files-to-rdf","p":812},{"i":841,"t":"argo list","s":"List running Argo workflows","u":"/docs/workflows-argo","h":"#list-running-argo-workflows","p":812},{"i":843,"t":"argo terminate my-workflow Workflow This might not stop the workflow, in this case use: argo delete my-workflow","s":"Stop a workflow","u":"/docs/workflows-argo","h":"#stop-a-workflow","p":812},{"i":845,"t":"argo delete my-workflow","s":"Delete a workflow","u":"/docs/workflows-argo","h":"#delete-a-workflow","p":812},{"i":847,"t":"Get into a container, to understand why it bugs, by creating a YAML with the command tail -f /dev/null to keep it hanging. See the example in the d2s-argo-workflow repository: apiVersion: argoproj.io/v1alpha1kind: Workflowmetadata: generateName: test-devnull-argo-spec: entrypoint: execute-workflow # Use existing volume volumes: - name: workdir persistentVolumeClaim: claimName: pvc-mapr-projects-test-vincent templates: - name: execute-workflow steps: - - name: run-rdfunit template: rdfunit - name: rdfunit container: image: umids/rdfunit:latest command: [tail] args: [\"-f\", \"/dev/null\"] volumeMounts: - name: workdir mountPath: /data subPath: dqa-workspace Then start the workflow: argo submit --serviceaccount argo tests/test-devnull-argo.yaml And connect with the Shell (change the pod ID to your pod ID): oc rsh test-devnull-argo-pod","s":"Debug a workflow","u":"/docs/workflows-argo","h":"#debug-a-workflow","p":812},{"i":849,"t":"Guides Workflows Run Nextflow workflows","s":"Run Nextflow workflows","u":"/docs/workflows-nextflow","h":"","p":848},{"i":851,"t":"Install the nextflow client on your computer: wget -qO- https://get.nextflow.io | bash Official documentation See the Nextflow documentation.","s":"Install Nextflow","u":"/docs/workflows-nextflow","h":"#install-nextflow","p":848},{"i":853,"t":"Try the hello world workflow from Nextflow using an existing storage: nextflow kuberun https://github.com/nextflow-io/hello -v pvc-mapr-projects-showcase:/data Use Conda environments You can easily define Conda environments and workflows with Nextflow.","s":"Run workflow","u":"/docs/workflows-nextflow","h":"#run-workflow","p":848},{"i":855,"t":"Guides Workflows Run CWL workflows","s":"Run CWL workflows","u":"/docs/workflows-cwl","h":"","p":854},{"i":857,"t":"Git clone in /calrissian on a persistent volume on the cluster from a terminal. cd /data/calrissiangit clone --recursive https://github.com/MaastrichtU-IDS/d2s-project-template.gitcd d2s-project-template You will need to create the folder for the workflow output data, in our example it is output-data: mkdir /data/calrissian/output-data You might need to give permissions (CWL execution will fail due to permissions issues otherwise). chmod -R 777 /data/calrissian","s":"Clone the repository","u":"/docs/workflows-cwl","h":"#clone-the-repository","p":854},{"i":859,"t":"Start the CWL execution from your computer using the oc client. Define the CWL command arguments to run in run-workflows-cwl.yaml (be careful to properly define the paths to the CWL files in the pod storage). oc create -f d2s-core/support/run-workflows-cwl.yaml Delete the pod You will need to delete the pod if you want to re-create it.","s":"Start pod","u":"/docs/workflows-cwl","h":"#start-pod","p":854},{"i":861,"t":"oc delete -f d2s-core/support/run-workflows-cwl.yaml","s":"Delete created pod","u":"/docs/workflows-cwl","h":"#delete-created-pod","p":854},{"i":863,"t":"Guides Workflows Deploy GitHub Runners","s":"Deploy GitHub Runners","u":"/docs/workflows-github-actions","h":"","p":862},{"i":865,"t":"You will need to have Helm installed on your computer to deploy a GitHub Actions Runner, see the Helm docs for more details. Install the Helm chart to be able to deploy the GitHub Actions Runner on the DSRI: helm repo add openshift-actions-runner https://redhat-actions.github.io/openshift-actions-runner-charthelm repo update Then create a GitHub Personal Access Token as per the instructions in the runner image README. tl;dr: go to your Settings on GitHub: https://github.com/settings/tokens, click the button to create a new token, give it a meaningful name (e.g. DSRI Runner my-project), and check the following permissions: ✅️ repo (maybe also workflow?) ✅️ admin:org if the Runner is for an organization","s":"Install the chart","u":"/docs/workflows-github-actions","h":"#install-the-chart","p":862},{"i":867,"t":"Before deploying the runner, make sure you are in the project where you want to deploy it: oc project my-project","s":"Deploy a Runner","u":"/docs/workflows-github-actions","h":"#deploy-a-runner","p":862},{"i":869,"t":"Deploy a runner available for all repositories of an organization (you can fine tune the access via GitHub Settings) Provide the token previously created, and the organization name export GITHUB_PAT=\"TOKEN\"export GITHUB_OWNER=My-Org Deploy the runner for the organization: helm install actions-runner openshift-actions-runner/actions-runner \\ --set-string githubPat=$GITHUB_PAT \\ --set-string githubOwner=$GITHUB_OWNER \\ --set runnerLabels=\"{ dsri, $GITHUB_OWNER }\" \\ --set replicas=3 \\ --set serviceAccountName=anyuid \\ --set memoryRequest=\"512Mi\" \\ --set memoryLimit=\"100Gi\" \\ --set cpuRequest=\"100m\" \\ --set cpuLimit=\"64\" You can also change the default runner image: --set runnerImage=ghcr.io/vemonet/github-actions-conda-runner \\ --set runnerTag=latest Checkout all available parameters here Check the deployment: helm get manifest actions-runner | kubectl get -f - Go to your organization Settings page on GitHub, then go to the Actions tab, and scroll to the bottom. In the list of active runners you should see the runners you just deployed.","s":"For an organization","u":"/docs/workflows-github-actions","h":"#for-an-organization","p":862},{"i":871,"t":"You can also deploy a runner for a specific repository: export GITHUB_PAT=\"TOKEN\"# For an org runner, this is the org.# For a repo runner, this is the repo owner (org or user).export GITHUB_OWNER=vemonet# For an org runner, omit this argument. # For a repo runner, the repo name.export GITHUB_REPO=shapes-of-you Deploy the runner: helm install actions-runner openshift-actions-runner/actions-runner \\ --set-string githubPat=$GITHUB_PAT \\ --set-string githubOwner=$GITHUB_OWNER \\ --set-string githubRepository=$GITHUB_REPO \\ --set runnerLabels=\"{ dsri, anything-helpful }\"","s":"For a repository","u":"/docs/workflows-github-actions","h":"#for-a-repository","p":862},{"i":873,"t":"You can now set GitHub Action workflows, in the .github/workflows folder, to be run on this runner (the repository needs to be under the organization, or user you added the workflow to). The job will be sent to run on the DSRI: jobs: your-job: runs-on: [\"self-hosted\", \"dsri\", \"my-org\" ] steps: ...","s":"Define Actions to run on DSRI","u":"/docs/workflows-github-actions","h":"#define-actions-to-run-on-dsri","p":862},{"i":875,"t":"helm uninstall actions-runner","s":"Uninstall the runner","u":"/docs/workflows-github-actions","h":"#uninstall-the-runner","p":862},{"i":877,"t":"Experimental Experimental: this deployment workflow is still experimental, let us know on Slack if you are interested in using it. Alternatively you can also build and deploy your application using a GitHub Actions workflow. You will need to connect to the UM VPN in your workflow by defining 2 secrets for VPN_USER and VPN_PASSWORD, this is done by this step: - name: Connect to the VPN run: | sudo apt-get install -y openconnect network-manager-openconnect echo '${{ secrets.VPN_PASSWORD }}' | sudo openconnect --passwd-on-stdin --no-xmlpost --non-inter --background --authgroup 01-Employees --user ${{ secrets.VPN_USER }} vpn.maastrichtuniversity.nl sleep 10 RedHat documentation RedHat provides the following instructions and template to deploy an application on OpenShift The OpenShift Starter workflow will: Checkout your repository Perform a Docker build Push the built image to an image registry Log in to your OpenShift cluster Create an OpenShift app from the image and expose it to the internet. Before you begin: Have write access to a container image registry such as quay.io or Dockerhub. Have access to an OpenShift cluster. For instructions to get started with OpenShift see https://www.openshift.com/try The project you wish to add this workflow to should have a Dockerfile. If you don't have a Dockerfile at the repository root, see the buildah-build step. Builds from scratch are also available, but require more configuration. To get the workflow running: Add this workflow to your repository. Edit the top-level 'env' section, which contains a list of environment variables that must be configured. Create the secrets referenced in the 'env' section under your repository Settings. Edit the 'branches' in the 'on' section to trigger the workflow on a push to your branch. Commit and push your changes. For a more sophisticated example, see https://github.com/redhat-actions/spring-petclinic/blob/main/.github/workflows/petclinic-sample.yaml Also see our GitHub organization, https://github.com/redhat-actions/ name: Deploy to OpenShift# ⬇️ Modify the fields marked with ⬇️ to fit your project, and create any secrets that are referenced.# https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secretsenv: # ⬇️ EDIT with your registry and registry path. REGISTRY: ghcr.io/maastrichtu-ids # ⬇️ EDIT with your registry username. REGISTRY_USER: REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }} # ⬇️ EDIT to log into your OpenShift cluster and set up the context. # See https://github.com/redhat-actions/oc-login#readme for how to retrieve these values. OPENSHIFT_SERVER: ${{ secrets.OPENSHIFT_SERVER }} OPENSHIFT_TOKEN: ${{ secrets.OPENSHIFT_TOKEN }} # ⬇️ EDIT with the port your application should be accessible on. APP_PORT: 8080 # ⬇️ EDIT if you wish to set the kube context's namespace after login. Leave blank to use the default namespace. OPENSHIFT_NAMESPACE: \"\" # If you wish to manually provide the APP_NAME and TAG, set them here, otherwise they will be auto-detected. APP_NAME: \"my-app\" TAG: \"\"on: # https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows push: # Edit to the branch(es) you want to build and deploy on each push. branches: [ main ]jobs: openshift-ci-cd: name: Build and deploy to OpenShift runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 - name: Determine app name if: env.APP_NAME == '' run: | echo \"APP_NAME=$(basename $PWD)\" | tee -a $GITHUB_ENV - name: Determine tag if: env.TAG == '' run: | echo \"TAG=${GITHUB_SHA::7}\" | tee -a $GITHUB_ENV # https://github.com/redhat-actions/buildah-build#readme - name: Build from Dockerfile uses: redhat-actions/buildah-build@v1 with: image: ${{ env.APP_NAME }} tag: ${{ env.TAG }} # If you don't have a dockerfile, see: # https://github.com/redhat-actions/buildah-build#building-from-scratch # Otherwise, point this to your Dockerfile relative to the repository root. dockerfiles: | ./Dockerfile # https://github.com/redhat-actions/push-to-registry#readme - name: Push to registry id: push-to-registry uses: redhat-actions/push-to-registry@v1 with: image: ${{ env.APP_NAME }} tag: ${{ env.TAG }} registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} # The path the image was pushed to is now stored in ${{ steps.push-to-registry.outputs.registry-path }} - name: Connect to the VPN run: | sudo apt-get install -y openconnect network-manager-openconnect echo '${{ secrets.VPN_PASSWORD }}' | sudo openconnect --passwd-on-stdin --no-xmlpost --non-inter --background --authgroup 01-Employees --user ${{ secrets.VPN_USER }} vpn.maastrichtuniversity.nl sleep 10 # oc-login works on all platforms, but oc must be installed first. # The GitHub Ubuntu runner already includes oc. # https://github.com/redhat-actions/oc-login#readme - name: Log in to OpenShift uses: redhat-actions/oc-login@v1 with: openshift_server_url: ${{ env.OPENSHIFT_SERVER }} openshift_token: ${{ env.OPENSHIFT_TOKEN }} insecure_skip_tls_verify: true namespace: ${{ env.OPENSHIFT_NAMESPACE }} # This step should create a deployment, service, and route to run your app and expose it to the internet. # Feel free to replace this with 'oc apply', 'helm install', or however you like to deploy your app. - name: Create and expose app run: | export IMAGE=\"${{ steps.push-to-registry.outputs.registry-path }}\" export PORT=${{ env.APP_PORT }} export SELECTOR=\"app=${{ env.APP_NAME }}\" echo \"SELECTOR=$SELECTOR\" >> $GITHUB_ENV set -x # Take down any old deployment oc delete all --selector=\"$SELECTOR\" oc new-app --name $APP_NAME --docker-image=\"$IMAGE\" # Make sure the app port is exposed oc patch svc $APP_NAME -p \"{ \\\"spec\\\": { \\\"ports\\\": [{ \\\"name\\\": \\\"$PORT-tcp\\\", \\\"port\\\": $PORT }] } }\" oc expose service $APP_NAME --port=$PORT oc get all --selector=\"$SELECTOR\" set +x export ROUTE=\"$(oc get route $APP_NAME -o jsonpath='{.spec.host}')\" echo \"$APP_NAME is exposed at $ROUTE\" echo \"ROUTE=$ROUTE\" >> $GITHUB_ENV - name: View application route run: | [[ -n ${{ env.ROUTE }} ]] || (echo \"Determining application route failed in previous step\"; exit 1) echo \"======================== Your application is available at: ========================\" echo ${{ env.ROUTE }} echo \"===================================================================================\" echo echo \"Your app can be taken down with: \\\"oc delete all --selector='${{ env.SELECTOR }}'\\\"\"","s":"Deploy using GitHub Actions workflows","u":"/docs/workflows-github-actions","h":"#deploy-using-github-actions-workflows","p":862},{"i":879,"t":"GitHub runner chart repository: https://github.com/redhat-actions/openshift-actions-runner-chart Image for the runner: https://github.com/redhat-actions/openshift-actions-runner An action to automatically deploy a runner on a cluster (require to run openconnect to VPN first): https://github.com/redhat-actions/openshift-actions-runner","s":"See also","u":"/docs/workflows-github-actions","h":"#see-also","p":862}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/2",[0,3.628,1,5.119,2,5.495,3,5.447,4,5.828,5,2.958,6,2.16,7,3.628,8,2.958,9,2.958,10,2.16,11,3.628,12,1.194,13,2.958,14,2.958,15,3.628,16,3.628,17,1.194,18,1.194,19,2.958,20,5.119,21,5.119,22,2.958,23,2.16,24,3.628,25,3.628,26,2.958,27,2.958,28,5.119,29,3.628,30,2.958,31,2.16,32,3.628,33,3.628,34,2.958,35,1.194,36,2.16,37,4.199,38,3.628,39,5.662,40,3.628,41,4.69,42,4.199,43,1.194,44,2.958,45,2.16,46,2.958,47,2.958,48,2.16,49,4.69,50,3.391,51,2.958,52,2.958,53,2.16,54,4.199,55,2.958,56,3.628,57,2.958,58,2.16,59,5.828,60,5.495,61,2.16,62,3.628,63,1.194,64,3.628,65,3.628,66,2.958,67,5.119,68,2.019,69,2.16,70,2.16,71,2.16,72,1.194,73,2.16,74,2.16,75,2.958,76,3.628,77,2.958,78,1.194,79,2.16,80,2.958,81,1.194,82,1.194,83,2.958,84,2.16,85,1.194,86,3.628,87,1.194,88,2.958,89,2.16,90,1.194,91,2.958,92,2.958,93,2.958,94,5.119,95,2.958,96,2.958,97,2.958,98,2.958,99,2.958,100,2.958,101,2.958,102,2.958,103,1.194,104,2.16,105,3.628,106,1.194,107,2.16,108,1.194,109,3.628,110,2.16,111,2.958,112,1.194,113,1.194,114,1.194,115,1.194,116,2.16,117,2.16,118,1.194,119,2.16,120,2.958,121,1.194,122,1.194,123,2.37,124,2.958,125,2.16,126,1.194,127,1.194,128,1.194,129,1.194,130,1.194,131,2.16,132,1.194,133,3.628,134,4.69,135,1.194,136,2.16,137,1.194,138,1.194,139,1.194,140,2.16,141,2.16,142,1.194,143,1.194,144,1.194,145,1.194,146,1.194,147,2.16,148,1.194,149,2.958]],["t/4",[0,3.628,1,5.119,2,5.495,3,5.447,4,5.828,5,2.958,6,2.16,7,3.628,8,2.958,9,2.958,10,2.16,11,3.628,12,1.194,13,2.958,14,2.958,15,3.628,16,3.628,17,1.194,18,1.194,19,2.958,20,5.119,21,5.119,22,2.958,23,2.16,24,3.628,25,3.628,26,2.958,27,2.958,28,5.119,29,3.628,30,2.958,31,2.16,32,3.628,33,3.628,34,2.958,35,1.194,36,2.16,37,4.199,38,3.628,39,5.662,40,3.628,41,4.69,42,4.199,43,1.194,44,2.958,45,2.16,46,2.958,47,2.958,48,2.16,49,4.69,50,3.391,51,2.958,52,2.958,53,2.16,54,4.199,55,2.958,56,3.628,57,2.958,58,2.16,59,5.828,60,5.495,61,2.16,62,3.628,63,1.194,64,3.628,65,3.628,66,2.958,67,5.119,68,2.019,69,2.16,70,2.16,71,2.16,72,1.194,73,2.16,74,2.16,75,2.958,76,3.628,77,2.958,78,1.194,79,2.16,80,2.958,81,1.194,82,1.194,83,2.958,84,2.16,85,1.194,86,3.628,87,1.194,88,2.958,89,2.16,90,1.194,91,2.958,92,2.958,93,2.958,94,5.119,95,2.958,96,2.958,97,2.958,98,2.958,99,2.958,100,2.958,101,2.958,102,2.958,103,1.194,104,2.16,105,3.628,106,1.194,107,2.16,108,1.194,109,3.628,110,2.16,111,2.958,112,1.194,113,1.194,114,1.194,115,1.194,116,2.16,117,2.16,118,1.194,119,2.16,120,2.958,121,1.194,122,1.194,123,2.37,124,2.958,125,2.16,126,1.194,127,1.194,128,1.194,129,1.194,130,1.194,131,2.16,132,1.194,133,3.628,134,4.69,135,1.194,136,2.16,137,1.194,138,1.194,139,1.194,140,2.16,141,2.16,142,1.194,143,1.194,144,1.194,145,1.194,146,1.194,147,2.16,148,1.194,149,2.958]],["t/6",[150,8.815,151,8.815,152,4.74,153,7.495,154,8.815,155,8.019]],["t/8",[156,4.965,157,7.198,158,8.933,159,8.933,160,5.053]],["t/10",[156,4.713,157,6.833,161,7.21,162,2.912,163,2.49,164,7.714,165,4.979,166,7.714,167,6.282]],["t/13",[168,2.673,169,7.498]],["t/16",[168,1.045,170,2.16,171,3.639,172,1.317,173,1.416,174,3.31,175,1.069,176,2.27,177,2.892,178,1.768,179,2.275,180,2.023,181,1.603,182,2.179,183,3.094,184,2.523,185,2.777,186,2.932,187,2.523,188,2.604,189,1.922,190,2.387,191,2.058,192,4.171,193,1.897,194,2.329,195,0.867,196,2.275,197,1.519,198,1.503,199,3.695,200,1.585,201,3.31,202,1.722,203,1.989,204,3.31,205,2.096,206,2.932,207,1.488,208,2.058,209,3.639,210,2.35,211,3.639,212,2.225,213,1.816,214,2.096,215,1.641,216,3.13,217,1.701,218,2.932,219,2.023,220,3.094,221,3.31,222,2.523,223,2.225,224,2.523,225,1.053,226,2.329,227,1.585,228,2.329,229,2.096,230,1.869,231,2.191,232,2.932,233,2.275,234,1.842,235,4.029,236,2.539,237,1.43,238,3.31,239,3.639,240,3.639,241,3.31,242,3.639,243,2.696,244,2.451,245,2.803,246,1.926,247,2.932,248,2.451,249,3.639,250,2.604]],["t/18",[170,1.966,172,1.138,173,1.285,175,2.128,185,2.176,189,1.427,195,1.218,197,3.513,207,2.962,216,2.841,217,2.39,250,3.657,251,3.777,252,3.196,253,3.001,254,4.119,255,1.806,256,3.353,257,4.65,258,4.65,259,4.65,260,4.346,261,4.65,262,4.346,263,3.657,264,1.727,265,3.353,266,5.185,267,3.126,268,5.024,269,5.024,270,2.252,271,4.323,272,3.544,273,2.36,274,2.841,275,2.664,276,3.001,277,4.346,278,4.65,279,4.65,280,3.835,281,1.607,282,1.989]],["t/20",[170,1.798,172,1.209,185,2.312,206,6.204,207,3.147,210,3.213,216,4.28,219,4.28,234,3.897,273,3.555,283,6.546,284,2.712,285,7.7,286,7.7,287,1.989,288,2.579,289,5.931]],["t/23",[172,1.276,191,3.413,195,1.437,216,3.354,231,3.162,264,2.038,284,2.126,287,1.559,290,5.635,291,6.034,292,4.862,293,4.183,294,6.449,295,4.79,296,5.13,297,6.548,298,2.394,299,2.657,300,6.034,301,3.861,302,4.183,303,2.182,304,2.417,305,4.648,306,4.065,307,4.065,308,2.348,309,5.489,310,2.107,311,3.476,312,4.47]],["t/25",[180,4.842,186,4.387,203,5.147,231,2.119,313,5.445,314,5.063,315,6.891,316,8.416,317,7.332,318,7.924,319,7.924,320,7.575,321,4.629,322,4.953,323,5.445,324,4.387,325,5.835,326,5.445,327,3.49,328,5.445,329,5.445,330,4.194,331,5.445,332,5.445,333,5.445,334,5.445,335,5.445,336,4.953]],["t/27",[167,6.445,185,2.613,232,7.011,234,4.404,281,2.735,337,3.709,338,6.032]],["t/29",[168,2.636,170,2.143,175,2.695]],["t/31",[160,3.866,170,2.061,173,1.718,175,2.007,255,1.466,268,4.739,269,4.739,271,3.51,280,4.67,284,2.408,339,5.396,340,4.739,341,5.811,342,6.218,343,4.373,344,5.396,345,6.796,346,5.811,347,3.365,348,6.218,349,2.882,350,3.735,351,3.799]],["t/33",[123,2.163,160,2.382,170,0.984,172,0.988,173,2.249,175,2.455,198,2.599,200,1.835,202,1.993,217,1.969,223,2.575,224,4.363,225,2.18,230,3.232,251,3.927,252,6.251,255,1.616,268,2.92,269,2.92,271,3.232,284,1.484,294,2.763,337,1.795,339,3.848,352,2.763,353,2.695,354,5.071,355,3.12,356,2.302,357,3.831,358,3.244,359,3.831,360,3.581,361,3.581,362,3.581,363,5.35,364,3.581,365,2.575,366,2.229,367,2.195,368,7.606,369,2.046,370,2.382,371,4.211,372,4.211,373,2.575,374,3.581,375,2.575,376,4.211,377,3.244,378,3.244,379,3.831,380,5.071,381,1.192,382,3.244,383,3.244]],["t/35",[152,3.961,170,1.72,172,1.339,175,2.163,182,3.133,184,3.627,185,1.571,195,1.246,225,2.132,251,2.727,255,1.122,264,1.767,268,3.627,273,2.416,287,1.903,298,2.922,310,1.827,350,2.859,367,2.727,384,6.701,385,7.367,386,3.401,387,2.687,388,3.525,389,3.246,390,4.216,391,4.76,392,4.76,393,1.909,394,3.133,395,2.64,396,4.094,397,2.859,398,2.179,399,3.627,400,3.743,401,2.648,402,2.648,403,2.908,404,3.432,405,3.014,406,2.727,407,3.627]],["t/37",[163,1.468,170,1.167,172,0.785,175,2.441,176,1.647,193,2.605,195,1.699,199,2.202,200,2.177,202,2.365,227,2.177,264,1.688,287,1.842,298,1.983,310,1.746,370,2.827,381,3.134,402,2.53,408,2.007,409,3.528,410,3.702,411,3.278,412,3.576,413,4.249,414,1.674,415,4.249,416,3.85,417,3.85,418,2.396,419,2.934,420,3.124,421,4.546,422,5.454,423,4.149,424,2.566,425,3.85,426,3.85,427,4.027,428,3.85,429,3.367]],["t/39",[172,1.114,195,1.69,197,2.07,198,2.049,218,6.672,264,2.396,284,2.499,287,2.335,298,2.814,299,2.319,311,2.858,381,2.008,402,2.511,406,2.586,408,1.198,409,1.93,423,4.51,425,6.96,430,6.031,431,6.031,432,3.675,433,3.549,434,2.476,435,4.961,436,4.434,437,3.997,438,2.667,439,2.049,440,3.254,441,2.806,442,4.961,443,2.806,444,2.547,445,4.961,446,1.622,447,4.961,448,6.672,449,4.961,450,3.675,451,2.319,452,4.513,453,3.821]],["t/41",[172,1.29,175,1.435,217,2.285,235,5.875,264,2.774,270,2.154,287,1.813,298,2.784,299,2.295,304,1.958,314,2.628,337,2.084,351,2.717,381,2.691,389,2.154,408,1.695,409,1.902,423,3.503,424,4.609,425,6.913,429,3.293,430,6.982,436,4.388,438,2.628,444,3.604,454,3.128,455,4.888,456,3.765,457,4.918,458,4.645,459,5.387,460,2.927,461,3.389,462,3.128,463,2.671,464,4.156]],["t/43",[175,2.659,216,5.032,251,4.719,465,4.719]],["t/45",[170,2.043,173,1.38,175,2.915,197,2.291,198,2.267,216,4.863,251,5.175,266,3.927,270,2.419,271,2.819,274,3.051,275,2.861,277,4.667,278,4.994,279,4.994,283,4.667,343,3.513,411,3.601,441,4.309,466,4.667,467,3.432,468,4.423,469,4.994,470,3.601,471,3.357,472,3.288,473,4.994,474,4.309,475,2.819,476,2.702,477,4.994,478,4.994,479,4.667,480,3.432,481,4.994,482,3.698,483,4.994,484,3.223]],["t/47",[282,3.672]],["t/49",[123,2.136,162,1.428,163,1.221,168,1.791,172,1.304,173,1.046,181,2.747,185,1.249,199,1.832,246,3.3,255,1.337,287,1.611,288,1.393,298,1.65,299,1.36,304,2.996,381,1.765,395,1.491,398,2.212,402,2.105,404,2.728,408,1.505,422,2.728,485,5.539,486,7.17,487,2.442,488,3.536,489,3.536,490,2.976,491,4.159,492,3.028,493,1.92,494,2.976,495,2.6,496,2.403,497,4.159,498,2.491,499,2.047,500,4.159,501,4.159,502,4.159,503,1.718,504,4.159,505,2.855,506,4.308,507,5.671,508,3.784,509,3.351,510,3.351,511,3.784,512,1.736,513,4.159,514,4.159,515,3.351,516,2.661,517,3.536,518,2.884,519,4.159,520,4.159,521,4.159,522,2.778,523,1.666,524,3.536]],["t/51",[185,2.613,195,2.073,234,4.404,263,6.225,287,2.248,446,2.845,503,3.593]],["t/53",[152,2.293,157,3.437,160,2.413,168,2.181,170,1.484,172,1.412,175,1.252,176,1.406,185,1.908,189,1.19,231,2.472,236,1.923,237,3.304,255,1.804,263,3.051,264,2.564,267,2.608,281,1.997,299,1.395,308,1.66,310,2.651,347,2.1,373,2.608,395,1.529,398,1.262,414,1.429,424,2.19,438,2.293,444,2.19,446,2.077,492,2.072,496,2.926,503,1.761,505,1.628,512,2.651,516,2.729,525,6.353,526,3.437,527,3.051,528,8.411,529,1.66,530,1.901,531,2.798,532,1.879,533,2.019,534,2.504,535,3.88,536,3.285,537,2.072,538,2.831,539,2.331,540,1.761,541,3.051,542,4.265,543,4.265,544,4.265,545,3.437,546,2.957]],["t/55",[185,2.647,195,2.1,234,4.462,287,2.278,503,3.64,547,6.79]],["t/57",[123,2.204,168,1.233,170,1.002,172,1.197,173,1.079,181,1.891,185,2.534,195,1.023,231,2.484,236,1.935,237,3.315,251,2.237,255,1.635,264,2.156,267,2.625,281,1.349,287,1.109,299,2.492,308,1.67,310,2.229,414,1.438,424,2.204,438,2.308,444,2.204,446,1.404,474,2.428,492,2.085,496,2.937,503,1.773,512,2.664,529,1.67,530,1.913,531,2.816,532,1.891,535,3.905,536,3.306,537,2.085,538,2.845,539,2.346,548,6.932,549,4.293,550,4.293,551,3.905,552,4.293,553,8.438,554,8.438,555,3.649,556,4.293,557,2.892,558,3.18,559,4.293,560,4.293,561,4.293,562,4.293,563,4.293,564,1.624,565,1.913,566,2.386,567,4.293]],["t/59",[282,3.672]],["t/61",[156,4.774,170,2.006,213,4.287,401,4.347,448,6.921,568,4.858,569,4.411,570,7.302]],["t/63",[168,2.212,213,3.843,225,2.228,266,5.509,304,3.085,401,3.897,533,3.644,564,3.598,571,4.075,572,5.051,573,5.931,574,4.435,575,4.355,576,5.051,577,4.075,578,3.954]],["t/65",[170,1.476,173,1.588,185,1.897,195,1.997,202,2.991,213,3.154,255,1.356,265,4.145,273,2.917,287,1.633,401,3.198,431,7.126,448,6.753,472,3.784,488,5.372,568,3.574,569,3.245,570,5.372,573,4.867,579,4.145,580,3.453,581,6.319,582,6.319,583,6.319,584,6.319,585,6.319,586,8.381,587,3.95,588,6.319,589,3.512,590,4.257,591,3.784]],["t/67",[185,2.647,195,2.1,234,4.462,287,2.278,503,3.64,592,7.103]],["t/70",[123,1.988,168,2.056,170,0.904,172,1.258,173,0.973,175,1.137,181,2.601,185,1.773,195,0.922,198,1.598,223,5.558,225,1.12,226,2.477,231,2.297,236,1.745,237,3.146,255,1.536,264,1.994,267,2.367,281,1.217,282,1.506,287,1.525,299,1.931,308,2.297,310,2.062,356,2.115,386,2.726,389,1.705,398,1.747,405,2.23,414,1.297,422,2.539,424,1.988,438,2.081,444,1.988,446,1.266,454,2.477,474,2.189,492,3.89,496,3.087,503,1.598,505,2.733,512,2.464,516,2.477,529,1.506,530,1.725,531,2.539,532,1.705,534,2.272,536,2.981,537,1.88,538,2.631,539,2.115,540,2.956,541,2.769,564,1.464,593,1.959,594,3.291,595,3.871,596,3.119,597,3.291,598,2.318,599,2.42,600,3.978,601,3.119]],["t/72",[163,1.311,168,1.888,172,1.224,175,1.311,189,1.246,223,5.259,224,3.095,225,2.489,226,2.857,230,3.374,264,2.634,287,1.153,292,3.597,303,1.614,308,1.737,310,2.724,386,3.034,389,1.967,422,2.928,446,2.812,454,2.857,472,2.674,475,2.293,482,3.007,492,3.192,496,3.315,503,1.844,529,1.737,530,1.989,531,2.928,532,1.967,533,2.113,593,2.26,594,3.796,599,4.108,602,4.867,603,4.464,604,4.261,605,2.168,606,2.44,607,3.597,608,6.571,609,4.061,610,3.597,611,4.464,612,2.928,613,2.037,614,4.464,615,5.978,616,5.295,617,3.307]],["t/74",[162,2.678,163,2.037,168,1.057,170,0.859,172,1.225,173,1.428,175,1.08,181,1.621,185,1.105,189,1.027,195,1.652,210,1.535,214,2.119,215,1.659,216,2.045,219,2.045,231,3.466,237,3.499,255,0.789,264,2.343,282,1.432,284,2.747,287,1.467,299,1.203,303,2.053,307,2.478,310,2.422,337,1.568,366,1.947,369,1.787,386,2.622,387,1.889,395,1.319,414,1.233,441,2.081,444,2.916,475,1.889,492,1.787,496,2.189,503,1.519,512,1.535,529,1.432,530,1.64,531,2.413,532,1.621,537,1.787,538,3.969,539,3.79,575,2.081,618,4.99,619,2.3,620,2.25,621,1.585,622,1.862,623,2.551,624,5.166,625,3.679,626,2.834,627,3.679,628,3.679,629,2.725,630,3.725,631,4.576,632,3.128,633,3.128,634,2.834]],["t/76",[165,2.125,170,1.31,172,0.881,175,2.016,176,2.263,181,1.595,195,0.862,198,2.316,208,2.047,227,1.577,231,2.671,237,2.697,248,2.438,255,0.776,264,1.223,270,1.595,271,1.859,276,2.125,287,0.935,298,1.436,299,1.183,308,1.408,310,1.264,312,2.681,314,4.497,343,2.316,347,1.782,349,2.365,370,2.047,381,1.588,386,3.17,388,2.438,398,1.071,408,1.867,409,2.182,429,3.778,444,1.859,454,3.588,462,2.316,492,2.724,493,1.671,505,1.382,522,1.613,523,2.247,537,2.724,538,3.059,557,2.438,575,2.047,593,1.832,612,2.374,631,2.916,635,3.292,636,1.735,637,2.788,638,5.101,639,4.518,640,3.778,641,4.768,642,3.678,643,3.077,644,3.588,645,2.012,646,2.012,647,4.319,648,2.788,649,3.077,650,2.916,651,3.077,652,2.509,653,2.509,654,3.292,655,2.916,656,2.374,657,1.916,658,2.012,659,2.681,660,3.588]],["t/78",[465,4.656,661,6.881,662,5.053,663,5.146,664,6.018]],["t/80",[166,4.172,170,1.071,173,1.153,175,1.347,190,3.009,193,2.391,202,2.171,203,2.507,205,2.642,288,1.537,304,1.838,314,2.466,419,2.693,420,2.867,590,4.515,591,2.747,617,3.398,622,2.322,644,2.935,661,6.711,663,2.642,664,6.734,665,5.446,666,4.172,667,2.427,668,3.696,669,6.703,670,2.935,671,2.549,672,3.398,673,3.18,674,3.009,675,4.587,676,3.9,677,4.172,678,7.407,679,4.172,680,4.587,681,3.09,682,4.587,683,4.587,684,3.09,685,3.398,686,5.401,687,4.587,688,4.587,689,4.587,690,3.282,691,6.703,692,4.587,693,3.696,694,6.097,695,3.696,696,3.18,697,4.172,698,4.587]],["t/82",[152,1.434,168,0.766,173,1.104,184,1.849,191,2.485,193,5.14,194,1.707,202,1.263,205,1.537,227,1.162,231,1.038,255,2.196,266,1.908,281,0.838,288,0.894,304,1.069,324,4.513,327,1.069,400,1.908,403,2.442,408,0.644,572,1.75,590,2.959,597,2.268,661,7.342,664,5.749,665,5.692,668,2.149,677,2.426,678,5.521,679,2.426,685,1.976,699,2.667,700,3.143,701,5.002,702,1.976,703,2.667,704,3.996,705,2.149,706,2.667,707,2.667,708,3.384,709,2.055,710,2.667,711,1.597,712,2.667,713,1.976,714,2.667,715,2.426,716,2.426,717,3.384,718,2.268,719,1.537,720,2.426,721,2.667,722,2.667,723,2.667,724,2.667,725,2.268,726,2.055,727,2.426,728,2.426,729,2.149,730,1.908,731,2.268,732,2.667,733,4.393,734,3.735,735,1.976,736,4.007,737,3.996,738,1.263,739,2.882,740,1.566,741,2.667,742,5.601,743,5.601,744,5.601,745,4.393,746,2.667,747,2.667,748,2.055,749,2.426,750,2.149,751,2.667,752,2.667,753,2.667,754,2.426,755,2.667,756,2.667,757,4.393,758,2.667,759,2.667,760,2.667,761,2.667]],["t/84",[284,2.878,288,2.737,587,5.107,658,4.541,661,7.598,696,5.664,762,5.845,763,5.845,764,7.432,765,8.17,766,6.052]],["t/86",[172,0.649,173,1.039,182,4.462,210,1.725,212,2.528,237,1.624,255,2.074,288,2.079,369,2.008,408,1.799,443,3.51,463,2.259,468,3.331,505,1.578,538,1.842,590,5.579,622,2.092,661,6.379,664,2.785,665,5.177,767,4.133,768,7.45,769,4.133,770,4.133,771,4.133,772,4.133,773,4.133,774,4.133,775,4.133,776,4.133,777,4.133,778,4.133,779,4.133,780,4.133,781,4.133,782,4.133,783,3.51,784,2.866,785,4.133,786,4.133,787,4.133,788,4.133,789,4.133,790,4.133,791,4.133,792,6.206,793,4.133,794,4.133,795,4.133,796,4.133,797,4.133,798,4.133,799,4.133,800,4.133,801,6.206,802,5.276,803,3.184,804,3.76,805,4.133,806,4.133,807,2.528,808,4.133,809,4.133,810,4.133,811,4.133]],["t/88",[173,1.275,182,5.021,227,2.21,255,2.298,288,1.7,443,2.87,590,4.856,605,2.464,640,3.418,664,4.856,665,5.706,690,3.63,719,4.829,734,4.313,738,2.401,804,6.557,812,5.073,813,5.073,814,7.208,815,5.073,816,5.073,817,5.073,818,5.808,819,5.073,820,5.073,821,5.073,822,5.073,823,5.073,824,5.073,825,3.63,826,5.073,827,5.073,828,5.073,829,5.073,830,4.615,831,5.073,832,5.073,833,5.073,834,5.073,835,5.073,836,5.073,837,4.088]],["t/90",[180,4.231,182,4.559,203,4.16,212,4.655,564,3.573,662,4.306,663,5.441,763,5.446,764,6.925,838,7.612,839,7.612,840,7.612,841,5.446,842,7.612,843,7.612,844,7.612]],["t/93",[123,3.185,157,4.997,165,4.86,173,1.559,265,4.068,271,3.185,306,4.178,369,3.012,377,4.777,408,1.497,409,3.221,422,4.068,523,3.733,529,2.413,532,2.732,564,2.346,565,2.764,684,4.178,731,5.273,845,4.777,846,5.273,847,7.53,848,3.399,849,3.714,850,3.572,851,5.273,852,5.273,853,3.792,854,3.334,855,4.594,856,4.3]],["t/95",[169,5.507,200,2.978,282,3.802,288,2.29,393,2.493,394,4.093,414,2.29,432,5.063,523,2.738,527,4.89,529,2.659,564,2.585,565,3.046,626,5.265,670,4.373,847,6.218,848,2.493,854,3.675,857,5.507,858,4.67,859,5.507,860,3.937,861,4.273,862,4.604,863,6.835]],["t/97",[156,2.465,168,1.878,170,1.036,172,1.22,176,2.155,189,1.238,255,1.403,271,3.358,308,1.726,309,4.034,377,5.037,397,2.424,402,2.245,408,1.071,409,1.726,423,3.877,523,2.62,527,3.173,529,1.726,564,3.243,571,2.347,621,1.911,662,2.508,696,3.075,730,3.173,848,2.385,849,3.916,855,6.772,858,2.347,861,4.088,862,4.405,864,3.416,865,4.317,866,2.987,867,6.259,868,6.604,869,4.435,870,4.435,871,3.173,872,3.173,873,4.435,874,3.31,875,4.435,876,5.269,877,3.77,878,4.435,879,3.075,880,4.034,881,4.034,882,3.173,883,4.034,884,4.435]],["t/99",[172,1.267,439,3.333,523,3.234,527,5.774,564,3.053,565,3.597,606,4.411,855,5.979,856,5.596,860,4.649,885,3.516,886,7.342,887,4.565]],["t/101",[172,1.195,282,3.675,527,5.446,857,6.134,858,4.029,867,7.611,868,6.472,888,8.593,889,7.612,890,5.446,891,6.472,892,7.612,893,7.612,894,7.612,895,7.612]],["t/103",[161,5.932,162,3.389,170,1.629,282,2.715,446,2.281,527,4.992,564,2.639,613,3.183,670,4.465,731,5.932,856,4.838,867,7.204,896,8.941,897,4.992,898,4.838,899,6.977,900,5.622,901,6.347,902,6.977,903,6.977,904,3.482,905,6.977]],["t/105",[162,2.278,172,1.041,337,2.827,408,1.601,433,4.745,463,3.624,574,3.82,620,4.055,867,5.344,906,8.652,907,6.19,908,4.35,909,5.108,910,6.632,911,6.632,912,6.632,913,6.632,914,6.632,915,6.632,916,6.632,917,6.632,918,6.632,919,6.632,920,6.632,921,6.033,922,5.344,923,6.632,924,6.033,925,6.632]],["t/107",[168,1.924,170,1.564,207,2.738,271,4.472,377,6.708,381,1.896,409,2.606,523,3.489,527,4.792,529,2.606,530,2.985,564,2.534,605,3.253,849,5.215,855,4.962,876,5.397,879,4.644,880,6.093,881,6.093,926,4.394,927,5.397,928,3.789,929,4.962,930,6.698,931,6.698,932,5.695]],["t/109",[185,2.579,195,2.421,234,4.347,287,2.219,304,3.441,503,3.547,933,5.634]],["t/111",[162,1.933,231,2.19,237,2.211,255,2.049,282,2.19,304,2.255,393,2.053,395,2.017,408,1.359,409,2.19,414,1.885,418,2.698,460,4.642,484,4.55,492,2.734,505,2.149,538,2.508,539,3.076,577,4.692,618,3.601,621,3.82,646,3.128,766,4.169,904,2.809,933,5.084,934,2.425,935,3.868,936,3.304,937,3.026,938,3.601,939,5.12,940,4.535,941,5.628,942,5.628,943,3.902,944,5.741,945,2.664]],["t/113",[175,2.162,284,2.593,304,2.949,393,2.685,394,4.408,405,4.24,408,2.233,409,2.864,424,3.78,438,3.958,456,5.67,484,4.322,944,5.453,946,7.125,947,5.353,948,5.931,949,4.71]],["t/115",[168,2.114,172,1.156,175,2.162,189,2.581,246,3.896,351,4.091,398,2.736,418,3.529,505,2.81,537,3.575,540,3.04,577,4.896,578,3.78,803,5.67,933,4.829,950,9.25,951,4.829]],["t/117",[185,2.647,195,2.1,196,5.511,234,4.462,287,2.278,503,3.64]],["t/119",[163,2.007,170,1.596,172,1.385,196,6.108,197,4.077,198,2.823,200,2.978,217,3.195,264,2.309,287,1.766,298,2.711,310,2.387,381,2.498,398,2.61,408,1.65,496,2.634,503,2.823,657,3.617,952,4.739,953,4.89,954,4.739]],["t/121",[152,2.628,163,2.061,168,1.404,170,1.918,172,1.409,175,1.435,195,1.165,196,6.185,197,2.04,198,3.391,225,1.414,230,3.604,255,1.049,264,1.651,288,1.638,298,1.939,310,1.707,358,3.765,367,2.548,395,1.752,396,2.717,489,4.156,496,1.884,503,2.019,505,1.866,523,1.958,571,2.587,577,2.587,578,2.51,593,2.474,606,2.671,626,3.765,711,2.927,739,3.206,848,1.783,952,3.389,955,8.081,956,4.888,957,3.939,958,4.447,959,7.019,960,3.939,961,4.447,962,6.385,963,4.888,964,5.656]],["t/123",[152,2.609,163,1.425,168,1.394,170,1.91,172,1.488,175,1.425,176,1.599,195,1.156,196,5.925,197,2.025,198,3.378,225,1.404,230,3.586,255,1.041,264,1.639,281,1.525,282,1.888,288,2.339,298,1.925,310,1.695,349,2.046,367,2.529,395,1.739,396,2.697,489,4.126,496,1.87,503,2.004,505,1.853,518,3.364,564,1.836,571,2.568,577,2.568,578,2.492,606,2.652,626,3.738,739,3.183,848,1.77,934,3.009,952,3.364,957,3.91,958,4.414,961,4.414,964,5.626,965,8.647,966,4.853,967,4.126]],["t/126",[163,1.928,170,1.534,172,1.35,196,5.374,197,2.74,198,3.55,233,4.105,264,2.218,287,1.697,294,4.307,298,2.605,310,2.294,381,1.859,387,3.372,423,3.277,496,2.531,503,2.712,505,2.507,564,2.484,636,3.148,657,3.475,952,4.553,953,4.698,968,8.597,969,5.583,970,5.583,971,5.291,972,6.567]],["t/128",[163,1.804,170,1.435,172,1.292,185,1.845,196,3.841,197,2.564,198,3.397,216,3.415,236,2.77,264,2.075,272,4.26,298,2.438,299,2.009,310,2.146,381,1.74,458,3.476,496,2.368,503,2.538,505,2.346,624,5.59,657,3.252,667,3.252,945,3.894,952,4.26,953,4.396,973,8.227,974,5.396,975,4.552,976,4.396,977,5.224,978,4.733,979,6.145,980,6.145,981,6.145]],["t/131",[163,1.873,170,1.49,172,1.002,195,1.52,196,3.988,197,2.662,198,3.483,264,2.155,310,2.228,381,2.388,408,1.54,434,3.184,471,3.901,496,3.251,503,2.634,505,2.435,636,3.058,657,3.376,750,5.14,936,3.745,952,4.423,953,4.564,982,9.449,983,8.434,984,4.423,985,3.901,986,3.988,987,6.379,988,6.379]],["t/133",[162,1.385,163,2.156,168,1.159,170,1.423,172,1.284,178,1.959,195,0.961,196,2.522,197,1.683,198,2.516,233,2.522,246,2.135,255,0.865,274,2.242,275,2.102,303,2.655,337,1.719,381,2.316,398,2.172,408,1.471,409,1.57,414,1.351,446,2.675,451,3.823,496,2.348,505,1.54,512,2.542,529,1.57,530,1.798,532,1.777,533,1.909,534,2.368,540,1.666,565,1.798,572,2.646,605,1.959,657,2.135,665,2.522,858,3.885,879,2.797,928,2.282,936,2.368,952,2.797,953,2.886,984,2.797,989,3.67,990,3.43,991,10.461,992,2.368,993,3.67,994,4.034,995,6.764,996,3.107,997,4.034,998,2.797,999,4.034,1000,3.25]],["t/135",[156,3.878,170,1.629,172,1.096,195,1.662,196,4.362,237,2.741,287,1.803,295,3.637,367,3.637,399,4.838,658,3.878,934,3.006,970,5.932,1001,8.941,1002,5.622,1003,5.168,1004,5.622,1005,5.932,1006,6.977,1007,5.932,1008,6.347,1009,6.977,1010,6.977,1011,6.977,1012,6.977]],["t/137",[195,2.478,287,2.308,303,3.23,446,2.921]],["t/139",[170,1.409,172,1.276,191,3.413,195,2.189,197,3.835,202,2.856,208,3.413,255,1.294,276,3.542,281,1.897,303,2.182,406,4.236,407,5.635,441,3.413,446,3.356,533,2.856,534,5.772,589,3.354,656,3.958,936,3.542,1013,4.47,1014,3.476,1015,5.489,1016,6.034,1017,5.489,1018,5.489,1019,6.034]],["t/141",[123,2.979,163,2.072,170,0.883,172,0.911,173,1.459,175,1.111,181,1.667,189,1.056,195,2.363,207,1.546,255,1.245,264,1.278,274,2.103,275,1.972,281,1.189,282,2.746,284,1.333,287,2.205,298,1.501,299,1.897,303,1.368,308,2.258,310,1.321,314,2.034,337,1.612,347,2.856,349,1.595,381,2.241,393,2.887,394,3.475,408,0.913,414,1.944,446,2.791,457,2.266,460,2.266,476,1.862,495,2.365,505,2.215,523,1.516,617,2.802,621,3.041,623,2.623,630,2.482,644,2.421,719,2.179,858,2.002,887,2.14,936,2.221,937,2.034,938,2.421,947,3.071,1020,5.228,1021,2.067,1022,3.216,1023,3.783,1024,3.783,1025,2.707,1026,2.914,1027,2.548,1028,3.216,1029,3.048,1030,3.992,1031,3.441,1032,2.548]],["t/143",[173,1.851,202,3.484,225,2.927,281,2.314,287,1.902,303,3.953,441,4.164,446,3.024,451,4.324,533,3.484,564,2.784,850,5.328]],["t/145",[173,1.477,202,2.781,215,3.598,217,3.731,276,3.449,303,2.886,350,3.211,388,5.377,406,4.725,407,4.073,408,1.418,414,1.968,423,2.932,446,2.61,451,2.746,492,2.854,512,2.452,534,4.686,591,3.518,613,2.68,848,2.912,850,3.384,891,4.995,926,3.854,936,3.449,986,4.989,992,3.449,1033,3.323,1034,5.875,1035,3.958,1036,5.344,1037,5.875,1038,3.958,1039,5.875]],["t/147",[168,1.815,172,0.992,173,1.588,189,1.763,197,3.924,215,2.849,255,1.798,284,2.226,303,3.03,337,2.693,347,3.111,395,2.265,406,4.902,451,4.872,492,3.069,512,2.637,613,3.823,986,3.95,1040,6.456,1041,6.753,1042,4.681,1043,5.372,1044,6.319]],["t/149",[170,1.079,172,0.725,173,1.161,176,1.522,191,2.612,195,1.894,197,3.318,215,2.082,233,2.887,253,2.711,281,1.452,282,1.797,303,2.875,308,1.797,350,2.524,375,2.824,389,2.968,393,1.685,405,4.581,406,4.556,408,1.626,446,2.858,451,3.149,492,2.243,512,1.927,534,6.029,575,2.612,589,2.567,605,2.243,696,3.202,700,3.304,848,3.189,850,4.581,853,2.824,854,2.483,885,2.012,938,2.955,949,2.955,986,2.887,992,2.711,1041,3.721,1045,4.618,1046,3.721,1047,3.721,1048,4.618,1049,2.887]],["t/151",[162,1,163,2.009,176,0.959,178,1.414,179,1.82,195,0.693,207,1.929,225,0.842,255,1.727,282,1.133,293,2.018,295,1.517,298,1.872,304,1.166,337,1.241,369,1.414,381,0.824,383,2.242,387,1.495,398,1.76,401,3.011,408,1.944,418,1.396,419,1.709,440,1.909,479,4.011,499,1.433,529,1.133,530,1.297,599,1.82,621,2.564,633,2.475,807,3.638,908,3.095,951,5.561,1050,7.768,1051,6.453,1052,2.648,1053,8.05,1054,2.911,1055,2.911,1056,2.911,1057,2.911,1058,2.826,1059,2.648,1060,2.911,1061,2.911,1062,2.242,1063,3.178,1064,2.648,1065,4.503,1066,2.95,1067,2.911,1068,2.911,1069,2.911,1070,2.911,1071,2.911,1072,2.911,1073,2.911,1074,2.911,1075,2.911,1076,5.058,1077,2.911,1078,2.911,1079,2.911,1080,2.911,1081,2.242,1082,2.911,1083,2.648,1084,3.427,1085,4.718,1086,2.911,1087,2.475,1088,4.718,1089,4.718,1090,4.718,1091,4.718,1092,2.911,1093,2.911,1094,2.911,1095,2.911,1096,2.911]],["t/153",[185,2.613,195,2.073,234,4.404,287,2.248,503,3.593,577,4.605,578,4.468]],["t/155",[168,1.266,170,1.52,172,1.13,173,0.673,175,0.786,176,0.883,185,1.323,189,0.747,198,1.106,207,1.802,210,2.716,212,1.638,213,2.803,214,1.543,215,1.207,225,1.625,226,1.714,227,1.92,228,1.714,229,2.539,230,1.375,231,1.042,234,2.231,236,1.207,237,2.207,255,0.946,264,1.489,281,0.842,284,1.553,288,0.897,299,1.441,303,2.354,304,1.073,305,2.063,310,1.539,339,1.638,349,1.129,355,1.984,356,1.464,386,1.237,396,1.489,397,1.464,400,1.916,402,1.356,405,1.543,408,0.647,414,0.897,424,1.375,438,1.44,446,2.675,451,1.252,463,1.464,470,1.757,476,1.318,484,1.572,490,1.916,496,2.509,503,1.106,512,1.839,529,1.042,530,1.193,531,1.757,532,1.18,533,2.086,537,1.301,538,2.503,539,1.464,577,2.333,578,4.682,593,1.356,604,3.557,622,1.356,636,2.113,807,3.435,848,1.608,861,1.674,865,1.489,874,2.843,885,2.836,904,1.337,929,1.984,984,1.857,1003,3.265,1004,2.158,1097,4.16,1098,1.714,1099,1.572,1100,2.063,1101,2.436,1102,2.158,1103,3.056,1104,2.063,1105,4.16,1106,2.678,1107,1.417,1108,2.678,1109,2.436,1110,2.678,1111,2.678,1112,2.436,1113,2.678,1114,2.436,1115,2.277,1116,1.916,1117,2.277,1118,1.857,1119,2.436,1120,2.277,1121,2.063]],["t/157",[162,1.737,163,0.932,168,2.066,172,1.314,173,1.585,189,2.007,200,2.747,210,3.277,217,2.364,225,2.54,228,6.162,229,3.632,255,0.681,256,2.082,262,2.698,270,1.398,273,1.465,282,2.453,288,1.694,299,2.061,303,1.147,327,1.271,408,1.221,409,1.968,434,2.524,446,1.038,471,1.941,475,1.63,492,1.541,493,1.465,499,1.562,512,1.324,568,1.795,578,3.237,604,1.734,605,1.541,701,2.444,709,2.444,711,1.9,738,1.502,807,3.092,848,1.845,854,1.706,861,1.984,862,2.138,872,2.27,874,1.606,885,1.382,964,2.557,998,2.2,1103,2.2,1104,2.444,1105,2.351,1107,3.337,1122,5.36,1123,5.156,1124,4.299,1125,4.299,1126,3.173,1127,5.36,1128,4.299,1129,4.371,1130,2.887,1131,1.584,1132,2.887,1133,2.887,1134,3.173,1135,2.698,1136,2.698,1137,2.887,1138,2.887,1139,1.9,1140,2.138]],["t/159",[162,2.213,172,1.333,173,1.093,181,1.916,189,1.214,198,1.796,210,1.814,236,2.905,243,3.221,273,2.008,276,2.553,280,2.301,289,3.349,311,2.505,339,2.659,375,2.659,386,3.921,387,2.233,395,1.559,397,2.376,420,2.718,454,2.782,540,2.661,565,2.872,571,2.301,593,3.886,605,2.112,665,4.029,848,2.351,850,2.505,865,2.417,874,5.219,904,3.216,974,2.852,975,3.221,1050,3.349,1084,4.891,1141,3.015,1142,5.688,1143,3.697,1144,3.697,1145,3.697,1146,3.697,1147,3.697,1148,3.697,1149,5.193,1150,3.697,1151,3.111,1152,3.349,1153,5.479,1154,3.111,1155,3.349,1156,3.504,1157,3.015]],["t/161",[170,1.932,195,1.97,349,3.488,381,2.342,620,5.058,848,3.017,874,4.186,885,3.603,948,6.664,1158,7.524,1159,6.664]],["t/163",[170,2.086,195,2.128,287,2.687,1160,8.126]],["t/165",[160,1.803,163,1.857,168,0.916,170,1.185,172,0.797,173,1.276,185,1.524,195,1.507,199,1.405,210,3.67,213,1.591,225,1.469,226,2.04,229,1.837,230,1.637,231,2.461,234,1.614,236,1.437,237,2.484,251,1.662,255,1.547,264,1.715,265,2.091,273,1.472,284,1.123,287,0.824,288,1.068,303,1.153,308,1.975,310,2.209,347,1.57,356,1.742,369,1.549,408,1.527,414,1.068,424,1.637,438,1.714,446,2.068,451,1.49,459,2.091,475,2.607,490,2.281,492,1.549,496,2.437,503,1.317,512,1.33,533,1.509,537,1.549,538,1.421,539,1.742,571,1.687,577,2.686,578,3.248,604,2.774,613,1.454,636,1.529,784,2.211,807,1.95,856,2.211,861,1.993,885,1.389,908,2.091,944,2.362,1004,2.569,1097,3.76,1098,2.04,1102,2.569,1103,2.211,1105,2.362,1107,1.687,1115,2.711,1117,2.711,1120,2.711,1121,3.91,1155,2.456,1161,3.456,1162,2.9,1163,3.104,1164,2.091,1165,2.9,1166,2.9,1167,2.9,1168,2.569,1169,2.9,1170,2.9,1171,2.9,1172,2.456,1173,2.9,1174,2.9,1175,2.569,1176,2.9,1177,4.316,1178,4.316,1179,2.9,1180,2.9,1181,2.9]],["t/167",[163,2.526,168,1.105,170,0.899,172,1.254,175,1.13,195,0.917,210,2.976,231,2.775,237,1.512,255,0.826,264,1.986,275,2.006,287,2.063,303,1.391,308,2.287,310,2.491,344,2.353,381,1.09,386,2.714,408,0.929,411,2.524,443,2.177,446,1.922,493,3.687,496,2.749,503,1.589,568,2.177,577,2.037,578,3.019,613,2.682,622,3.61,623,2.668,636,1.845,667,4.226,735,2.851,784,2.668,976,2.753,984,4.076,1033,2.177,1097,2.851,1102,3.101,1103,2.668,1104,2.964,1161,3.213,1163,4.883,1182,4.459,1183,3.848,1184,3.501,1185,6.789,1186,2.851,1187,5.713,1188,3.272,1189,3.272,1190,3.272,1191,3.762,1192,3.501,1193,3.101,1194,3.848]],["t/169",[163,2.326,170,1.85,172,0.72,173,1.153,176,2.209,181,2.021,227,1.998,243,3.398,271,2.355,287,1.185,298,1.82,302,3.18,303,1.658,304,1.838,349,1.934,369,2.228,389,2.021,405,2.642,409,1.785,434,3.345,443,2.594,446,1.5,493,2.118,532,2.021,534,3.935,564,1.735,576,3.009,589,2.549,613,2.092,849,2.747,885,1.998,908,3.009,945,2.171,985,4.099,995,6.101,1030,2.594,1033,2.594,1066,2.867,1161,3.663,1164,4.397,1182,4.19,1187,5.667,1189,3.9,1195,4.587,1196,4.587,1197,6.703,1198,5.401,1199,4.172,1200,4.587,1201,4.172,1202,4.172,1203,3.282,1204,4.587,1205,4.172,1206,3.009,1207,2.747,1208,2.289,1209,4.172,1210,4.172]],["t/171",[162,2.587,163,1.586,172,0.848,178,2.623,185,1.622,207,2.208,231,2.931,237,3.407,247,4.352,271,2.773,287,2.241,505,2.876,512,3.143,538,3.357,613,2.464,639,4.352,642,3.543,655,4.352,671,4.821,686,4.352,784,3.745,928,3.055,1025,3.864,1030,3.055,1035,3.638,1049,3.376,1066,3.376,1164,3.543,1182,4.709,1190,4.592,1206,3.543,1207,3.235,1208,2.695,1211,5.58,1212,5.401,1213,3.745,1214,4.913,1215,4.913,1216,3.235,1217,3.864,1218,4.913]],["t/173",[163,2.261,236,3.471,287,1.989,386,4.766,1065,5.697,1066,4.813,1164,5.051,1182,5.947,1206,5.051,1207,4.611,1208,3.843,1217,5.509,1219,7.004]],["t/175",[163,1.668,172,1.089,176,1.213,178,1.787,195,2.122,197,1.535,210,2.37,227,2.474,231,1.432,237,1.445,243,2.725,263,2.632,287,1.467,298,1.459,299,1.203,303,1.33,327,1.474,349,1.551,369,1.787,383,2.834,409,1.432,418,1.764,434,1.836,446,1.857,463,2.011,494,2.632,506,2.119,523,1.474,529,1.432,530,1.64,538,1.64,539,2.011,564,1.392,571,1.947,577,1.947,578,1.889,606,3.104,613,2.591,646,2.045,657,1.947,685,2.725,885,1.603,904,1.836,928,2.081,937,1.978,952,2.551,960,2.964,985,3.473,992,2.16,1030,2.081,1033,2.081,1047,2.964,1049,2.3,1066,2.3,1131,1.836,1164,3.725,1182,4.875,1187,2.632,1206,2.413,1207,2.203,1208,2.834,1217,2.632,1220,5.138,1221,3.679,1222,2.632,1223,2.964,1224,5.166,1225,7.094,1226,3.679,1227,2.551,1228,2.834,1229,3.679,1230,3.679,1231,3.679,1232,3.679,1233,3.128,1234,4.241,1235,2.632,1236,3.347,1237,3.347,1238,3.347,1239,3.347,1240,3.347,1241,3.128]],["t/177",[123,3.756,163,1.177,168,1.742,172,0.952,181,1.767,189,1.693,195,1.743,202,1.898,207,1.639,255,1.301,270,1.767,273,1.851,284,1.412,287,1.036,298,1.591,299,3.128,303,2.193,308,2.846,311,2.31,327,1.606,395,2.174,397,2.191,408,0.968,412,2.869,446,1.983,451,1.874,472,2.401,474,3.431,476,1.974,492,1.948,493,2.8,495,3.791,496,1.545,505,2.793,577,2.122,578,2.059,596,3.231,613,3.337,618,2.566,630,2.63,646,2.229,684,2.701,891,3.409,927,3.231,940,3.231,943,2.78,945,1.898,1028,5.156,1030,3.431,1032,4.086,1182,3.791,1206,2.63,1207,2.401,1208,2.001,1220,4.493,1241,3.409,1242,3.409,1243,3.409,1244,2.97,1245,4.01,1246,4.01,1247,3.648,1248,3.409,1249,3.648,1250,3.231,1251,2.97]],["t/179",[163,2.986,172,1.031,181,2.893,273,3.032,287,1.697,299,2.811,356,3.589,386,3.969,402,3.324,613,2.996,622,3.324,667,4.55,945,3.108,974,4.307,1035,4.424,1123,3.714,1129,4.553,1182,4.105,1252,4.307,1253,5.974,1254,5.974,1255,5.974,1256,5.974,1257,7.82,1258,5.974]],["t/181",[163,2.591,173,1.718,185,2.649,199,3.011,210,2.852,255,1.466,299,2.235,302,4.739,408,1.65,538,3.932,613,3.118,642,4.483,784,6.117,1030,3.866,1046,5.507,1161,4.822,1182,4.273,1211,5.063,1259,6.835,1260,6.796,1261,5.265,1262,5.265]],["t/183",[189,2.564,210,3.038,287,1.881,440,4.776,446,2.381,492,4.462,493,3.362,533,3.446,577,3.853,613,3.322,646,4.047,748,5.608,1098,4.659,1263,7.81,1264,6.623,1265,5.393,1266,6.623,1267,6.19,1268,5.608]],["t/185",[163,2.58,172,0.869,195,1.319,197,3.197,210,3.667,264,1.87,287,1.98,299,1.81,310,1.933,352,3.631,381,2.169,408,1.336,474,4.97,476,2.725,506,3.188,630,3.631,943,6.575,953,3.96,1030,3.131,1066,3.46,1164,3.631,1182,5.929,1206,3.631,1207,3.315,1208,3.824,1217,3.96,1220,4.1,1234,3.385,1250,4.46,1251,5.675,1269,4.263,1270,5.535,1271,4.706,1272,5.035]],["t/187",[163,2.372,197,3.371,255,1.283,267,3.657,287,1.545,337,2.549,387,3.071,474,3.383,475,3.071,496,2.305,540,2.47,613,2.728,636,2.867,748,4.606,802,5.084,885,2.605,943,4.146,947,4.275,1030,3.383,1066,3.738,1164,5.299,1182,6.124,1206,3.923,1207,3.581,1208,2.984,1217,4.278,1251,4.43,1271,5.084,1273,3.511,1274,5.44,1275,5.98,1276,4.606,1277,4.028,1278,4.818,1279,4.606]],["t/189",[162,0.855,163,2.794,168,0.715,170,0.581,172,0.651,173,0.348,175,0.406,178,0.672,185,1.246,194,0.885,195,0.593,197,1.039,199,0.609,210,3.007,213,0.69,225,0.72,226,0.885,229,0.797,230,0.71,231,2.26,234,0.7,236,0.624,237,1.63,246,0.732,251,0.721,255,1.335,264,1.147,267,0.846,271,0.71,273,0.639,287,0.877,288,0.463,299,0.452,303,0.9,308,0.538,310,1.672,327,0.554,347,0.681,356,0.756,381,0.705,386,2.21,393,0.505,395,0.496,405,0.797,408,0.334,411,0.907,414,0.834,424,0.71,433,0.99,438,0.744,446,1.744,451,0.646,459,0.907,463,0.756,474,2.347,475,1.278,490,0.99,492,0.672,493,1.916,495,0.865,496,1.599,503,0.571,506,1.434,512,0.577,533,0.655,537,0.672,538,1.109,539,0.756,540,0.571,571,0.732,577,2.534,578,2.983,604,1.361,618,0.885,621,1.463,622,0.7,623,0.959,636,1.193,639,1.114,667,2.534,735,1.025,748,1.918,784,2.354,802,1.176,807,0.846,861,0.865,885,1.085,908,1.633,937,1.339,940,1.114,943,3.319,947,0.732,974,0.907,985,0.846,995,1.065,1004,1.114,1030,3.017,1032,0.932,1033,0.782,1065,0.828,1066,3.335,1097,2.515,1098,0.885,1102,2.006,1103,1.726,1104,1.065,1105,1.025,1107,0.732,1115,1.176,1117,1.176,1120,1.176,1121,1.918,1129,0.959,1155,1.065,1161,0.756,1163,2.927,1164,4.535,1165,1.258,1166,1.258,1167,1.258,1168,1.114,1169,1.258,1170,1.258,1171,1.258,1172,1.065,1173,1.258,1174,1.258,1175,1.114,1176,1.258,1177,2.117,1178,2.117,1179,1.258,1180,1.258,1181,1.258,1182,5.705,1185,3.527,1186,1.025,1187,3.816,1188,1.176,1189,2.117,1190,2.117,1191,1.593,1192,1.258,1205,1.258,1206,3.81,1207,3.478,1208,3.288,1209,1.258,1210,1.258,1211,1.844,1217,3.425,1218,1.258,1219,1.258,1220,3.073,1224,1.258,1225,1.258,1233,1.176,1234,2.537,1235,0.99,1236,1.258,1237,1.258,1238,1.258,1239,1.258,1240,1.258,1241,2.117,1248,1.176,1249,1.258,1250,2.006,1251,3.073,1253,1.258,1254,1.258,1255,1.258,1256,1.258,1257,2.265,1258,1.258,1260,1.065,1261,1.065,1262,1.065,1263,1.176,1264,1.258,1265,1.025,1266,1.258,1267,1.176,1268,1.065,1271,2.117,1272,1.258,1276,1.065,1277,0.932,1278,1.114,1279,1.065,1280,1.065]],["t/191",[162,2.056,163,2.373,167,1.734,168,2.079,173,0.588,176,0.772,185,1.532,189,1.1,195,0.558,199,1.031,200,1.02,210,2.129,215,1.055,225,1.933,227,1.02,230,2.023,237,0.92,255,1.917,272,1.623,273,1.819,284,1.798,287,0.605,288,0.784,299,1.668,308,1.985,327,1.579,356,1.279,395,1.412,408,1.232,414,0.784,418,1.122,433,1.675,443,1.324,458,1.324,463,2.153,475,3.074,492,1.137,493,1.819,496,1.519,512,0.977,523,1.579,538,2.274,577,1.239,578,1.202,604,1.279,606,1.279,613,1.798,618,1.498,620,1.432,621,1.698,622,2.583,642,1.536,667,1.239,739,1.536,783,1.324,784,1.623,848,1.437,865,4.023,874,3.38,885,1.717,940,1.886,986,1.463,1066,1.463,1076,4.338,1084,3.847,1100,4.61,1107,1.239,1118,1.623,1123,1.324,1129,1.623,1139,2.36,1161,1.279,1163,1.432,1164,1.536,1182,3.742,1185,1.99,1206,1.536,1207,1.402,1208,1.168,1211,1.734,1217,1.675,1220,1.734,1248,1.99,1252,1.536,1260,3.035,1261,3.035,1262,1.803,1267,1.99,1268,3.035,1273,1.374,1281,2.129,1282,1.99,1283,3.94,1284,2.521,1285,5.985,1286,3.94,1287,2.129,1288,6.584,1289,2.341,1290,2.341,1291,2.341,1292,3.94,1293,2.341,1294,2.341,1295,2.341,1296,2.341,1297,1.886,1298,2.341,1299,2.341,1300,2.341,1301,2.341,1302,1.99,1303,2.341,1304,2.341,1305,3.94,1306,2.341,1307,1.99]],["t/193",[163,1.524,168,1.491,172,0.815,175,1.524,176,2.415,191,2.937,193,2.706,210,3.057,225,1.502,264,2.475,287,2.194,298,2.059,299,2.395,310,1.813,336,4.723,349,2.189,418,2.489,457,3.109,474,4.144,532,2.287,564,1.964,571,2.748,577,2.748,578,2.666,604,2.837,605,2.522,621,2.237,646,2.885,700,3.714,738,2.457,848,1.894,865,2.885,885,2.262,943,5.079,998,3.599,1308,7.723,1309,5.191,1310,5.191,1311,3.322,1312,5.191,1313,7.326,1314,5.191,1315,5.191,1316,5.191,1317,5.191,1318,5.191,1319,5.191,1320,5.191]],["t/195",[178,4.556,179,5.864,180,5.213,271,4.817,322,6.847,327,3.015,613,3.434,674,6.153,860,4.336,1030,5.306,1220,5.575,1321,6.399,1322,7.527]],["t/197",[163,1.838,168,2.393,173,2.353,208,3.541,210,3.476,265,4.106,287,2.578,299,2.047,408,1.511,409,2.436,451,2.926,492,3.04,496,3.608,523,2.508,622,3.168,858,3.313,943,4.34,1013,4.637,1014,3.606,1161,4.552,1163,3.828,1263,5.322,1323,5.207,1324,4.637,1325,4.637,1326,6.26]],["t/199",[185,2.647,195,2.1,234,4.462,287,2.278,503,3.64,569,4.527]],["t/201",[207,3.26,270,3.514,284,2.809,366,5.144,565,3.554,569,4.095,605,3.874,672,5.907,848,2.909,1084,4.594,1327,6.22,1328,6.78]],["t/204",[168,1.468,170,1.194,173,1.285,176,1.685,185,1.535,231,3.276,237,3.945,255,1.965,264,1.727,270,3.709,276,3.001,308,3.276,310,1.786,319,4.65,408,1.234,414,1.713,424,2.625,438,2.749,467,3.196,523,2.903,537,2.483,538,4.309,539,3.961,569,3.721,572,3.353,667,2.706,837,4.119,860,2.945,885,2.227,1186,3.787,1191,4.637,1216,3.062,1321,6.161,1327,5.387,1329,4.119,1330,5.112]],["t/206",[156,4.13,172,0.528,173,0.846,175,0.988,194,2.153,200,1.466,202,3.517,207,1.375,245,2.592,255,1.734,265,2.207,270,2.334,273,1.554,281,1.665,337,1.434,344,2.058,369,1.634,389,1.482,392,3.061,414,1.127,418,3.141,428,4.08,437,2.711,453,2.592,475,1.728,480,3.311,523,1.348,557,5.782,564,1.273,565,1.499,569,4.15,572,5.301,574,1.938,599,2.103,646,1.87,657,1.781,658,1.87,670,2.153,697,3.061,944,2.492,945,3.825,969,4.503,976,2.407,1033,1.903,1084,1.938,1099,1.975,1131,1.679,1151,2.407,1156,2.711,1187,2.407,1213,2.333,1223,2.711,1244,2.492,1327,4.192,1331,3.061,1332,5.954,1333,6.76,1334,5.297,1335,4.503,1336,3.365,1337,3.061,1338,3.061,1339,2.861,1340,3.365,1341,3.061,1342,3.061,1343,3.061,1344,3.365,1345,2.711,1346,3.061,1347,3.365,1348,3.365,1349,3.061,1350,3.061,1351,3.365,1352,3.061,1353,3.365,1354,3.061,1355,3.061,1356,2.861,1357,2.711]],["t/208",[123,1.916,153,1.866,163,1.429,170,0.513,172,1.234,173,1.224,175,1.096,185,0.659,195,0.523,207,0.897,218,1.769,227,1.626,236,0.99,237,0.862,244,1.479,245,1.691,255,1.501,256,1.44,276,1.289,280,3.406,284,1.715,307,1.479,308,1.452,337,0.936,343,2.388,344,2.977,351,1.22,367,2.993,381,1.057,386,3.783,389,0.967,397,1.2,408,0.901,411,1.44,418,1.052,426,1.691,428,1.691,441,1.242,443,1.242,463,1.2,480,1.372,484,1.289,529,0.854,530,0.978,538,0.978,557,3.868,564,1.412,565,3.502,568,1.242,569,3.833,572,3.766,574,2.15,593,1.111,656,1.44,659,4.253,660,3.115,673,3.375,766,1.626,853,3.935,857,1.769,909,1.691,945,1.766,947,1.162,976,2.67,1013,1.626,1065,3.854,1084,2.804,1087,1.866,1151,2.67,1208,1.862,1213,2.587,1222,1.57,1223,3.007,1327,1.405,1332,4.776,1333,3.395,1335,4.138,1337,1.997,1338,4.428,1339,4.881,1343,1.997,1345,1.769,1346,1.997,1349,1.997,1350,3.395,1352,3.395,1358,5.742,1359,3.922,1360,2.195,1361,3.395,1362,1.866,1363,2.195,1364,3.732,1365,1.997,1366,2.195,1367,1.997,1368,2.195,1369,2.195,1370,2.195,1371,2.874,1372,2.195,1373,2.195,1374,2.195,1375,2.195,1376,2.764,1377,3.395,1378,1.866,1379,2.195,1380,2.195,1381,2.195,1382,2.195,1383,1.997,1384,2.195,1385,2.195,1386,2.195,1387,2.195,1388,2.195,1389,2.195,1390,3.732,1391,1.57,1392,1.22,1393,2.195]],["t/211",[207,3.26,270,3.514,284,2.809,366,5.144,565,3.554,569,4.095,605,3.874,672,5.907,848,2.909,1084,4.594,1327,6.22,1328,6.78]],["t/213",[168,1.85,170,1.982,172,1.011,173,2.134,175,1.891,217,3.967,225,2.456,343,4.121,381,2.403,408,2.049,436,4.026,480,4.026,537,3.128,564,2.436,569,3.307,575,4.801,848,2.349,934,4.091,935,4.738,976,4.608,1359,5.189,1391,4.608,1394,5.859]],["t/215",[327,3.234,381,2.285,393,2.944,408,1.949,414,2.704,423,4.028,640,5.437,738,3.82,848,2.944,934,3.478,1058,4.834,1395,6.503,1396,4.834]],["t/217",[163,2.94,344,6.122,459,5.493,475,4.3,935,4.179,1163,5.121,1397,5.641,1398,7.618]],["t/219",[393,3.348,408,2.216,1396,5.497]],["t/222",[156,4.231,170,1.778,207,3.112,429,5.128,434,5.36,658,4.231,904,3.799,934,3.28,935,3.799,985,4.655,1213,5.278,1399,6.472,1400,5.639,1401,6.134,1402,5.639]],["t/224",[327,3.195,381,2.258,393,2.909,408,1.925,414,2.672,423,3.98,640,5.372,738,3.775,934,4.188,1058,4.776,1227,5.529,1395,6.426,1396,4.776]],["t/226",[163,2.37,327,3.234,569,5.028,848,3.572,934,3.478,935,4.028,936,4.739,1058,4.834,1222,5.774,1391,5.774,1403,7.342]],["t/228",[393,3.395,408,2.247]],["t/230",[327,3.486,393,3.174,434,4.342,461,6.032,462,5.567,898,6.032,935,4.342]],["t/232",[156,4.091,207,3.009,274,4.091,327,2.949,434,5.296,443,4.164,574,4.24,658,4.091,904,3.674,935,3.674,1058,4.408,1213,5.104,1392,4.091,1399,7.864,1400,5.453,1401,5.931,1402,5.453]],["t/234",[225,2.062,246,3.771,275,3.714,366,3.771,393,2.599,408,1.72,409,2.773,433,6.483,523,2.855,565,3.176,569,3.659,848,2.599,907,5.098,934,3.071,935,3.556,1084,5.22,1280,5.489,1323,4.455,1327,5.799,1404,6.482,1405,5.742]],["t/237",[307,5.187,327,3.085,381,2.693,393,2.809,408,1.859,414,2.579,423,3.843,540,3.18,640,5.187,738,3.644,947,5.035,1276,5.931,1277,5.187,1395,6.204,1396,4.611]],["t/239",[393,3.348,408,2.216,1396,5.497]],["t/241",[163,1.928,197,2.74,253,3.855,255,1.409,307,4.424,327,2.631,344,4.016,393,2.395,408,2.075,474,3.714,540,2.712,738,3.108,740,3.855,825,4.698,907,4.698,947,4.55,1058,5.74,1154,4.698,1251,4.864,1273,3.855,1276,5.058,1277,4.424,1396,3.933,1406,5.058,1407,5.974,1408,6.567,1409,6.567,1410,4.105]],["t/243",[170,1.348,172,0.907,175,1.695,176,1.903,200,2.515,237,2.268,264,1.95,270,2.544,280,3.056,308,3.069,310,2.017,402,2.922,408,1.394,418,2.768,441,3.266,466,4.909,523,2.313,529,2.247,530,2.573,538,2.573,539,3.155,557,6.504,569,2.965,572,3.787,574,3.326,599,3.609,605,2.804,646,3.209,685,4.277,926,3.787,945,3.733,1327,3.694,1332,6.178,1405,4.652,1411,4.131,1412,5.774,1413,5.774,1414,4.447]],["t/245",[327,3.234,381,2.285,393,2.944,414,2.704,423,4.028,640,5.437,738,3.82,934,4.219,935,4.028,1227,5.596,1395,6.503,1396,4.834]],["t/247",[275,4.42,327,3.397,393,3.094,461,5.879,462,5.426,887,4.797,898,5.879,1058,5.079,1411,6.067]],["t/249",[195,1.813,393,2.777,484,4.469,523,3.784,529,2.962,569,3.909,885,3.316,907,5.446,934,3.28,935,3.799,1227,5.278,1327,4.871,1359,6.134,1404,6.925,1411,6.758,1415,7.612]],["t/251",[375,5.121,569,5.141,591,5.015,904,4.996,992,4.916,1038,5.641,1416,5.806,1417,7.618]],["t/254",[207,3.26,270,3.514,284,2.809,366,5.144,565,3.554,569,4.095,605,3.874,672,5.907,848,2.909,1084,4.594,1327,6.22,1328,6.78]],["t/256",[168,1.85,170,1.982,172,1.011,173,2.134,175,1.891,217,3.967,225,2.456,343,4.121,381,2.403,408,2.049,436,4.026,480,4.026,537,3.128,564,2.436,569,3.307,575,4.801,848,2.349,934,4.091,935,4.738,976,4.608,1359,5.189,1391,4.608,1394,5.859]],["t/258",[569,5.028,606,4.411,621,4.219,848,2.944,934,4.219,935,4.028,1284,6.265,1398,7.342,1418,8.071]],["t/260",[156,4.231,170,1.778,207,3.112,429,5.128,434,5.36,658,4.231,904,3.799,934,3.28,935,3.799,985,4.655,1213,5.278,1399,6.472,1400,5.639,1401,6.134,1402,5.639]],["t/262",[163,1.91,172,1.341,225,2.472,255,2.173,274,3.614,337,2.772,366,3.442,381,1.841,395,2.331,529,2.53,530,2.898,565,2.898,569,3.339,644,4.161,667,3.442,848,2.372,934,3.681,935,3.245,1063,5.754,1327,5.466,1411,4.652,1419,5.916,1420,5.916,1421,5.916,1422,5.916]],["t/264",[163,3.067,253,4.14,307,4.75,381,1.996,398,2.086,408,2.173,540,2.912,947,5.712,1063,7.037,1276,5.431,1277,6.064,1423,7.051,1424,7.051]],["t/266",[163,1.928,189,1.833,195,1.564,202,3.108,225,1.9,255,2.181,292,5.291,337,2.799,381,1.859,395,2.354,409,2.555,569,3.372,574,3.783,644,4.202,657,3.475,667,3.475,934,2.83,1063,6.85,1158,5.974,1327,5.501,1411,4.698,1419,5.974,1420,5.974,1421,5.974,1422,5.974]],["t/268",[375,5.121,569,5.141,591,5.015,904,4.996,992,4.916,1038,5.641,1416,5.806,1417,7.618]],["t/270",[172,1.422,187,6.277,465,4.719,933,5.939]],["t/272",[225,2.228,229,4.435,443,5.381,1425,7.7,1426,7.7,1427,7.7,1428,7.7,1429,7.7,1430,7.7,1431,7.7,1432,7.7,1433,7.7,1434,6.546,1435,7.7,1436,5.704,1437,7.7]],["t/274",[162,1.902,163,0.611,173,0.523,179,2.227,183,3.975,185,1.663,205,2.693,208,1.176,213,1.038,236,0.937,255,2.278,288,0.697,299,1.165,301,1.33,305,1.601,316,1.768,327,0.833,387,2.401,403,2.598,418,1.708,419,1.221,443,1.176,446,0.68,467,2.227,484,1.221,613,0.948,622,1.052,633,1.768,652,1.441,656,1.364,694,1.891,700,1.487,715,1.891,740,1.221,762,1.487,860,1.198,871,2.549,901,1.891,909,1.601,933,4.767,938,1.33,945,2.213,978,1.601,1002,1.675,1008,3.241,1028,3.975,1139,1.245,1161,1.136,1191,1.33,1242,5.778,1244,1.54,1265,2.639,1311,1.33,1329,1.675,1345,1.675,1414,1.601,1434,4.71,1436,3.463,1438,1.601,1439,2.079,1440,1.768,1441,1.891,1442,2.744,1443,3.601,1444,1.601,1445,2.079,1446,2.079,1447,5.235,1448,3.563,1449,6.518,1450,1.401,1451,3.241,1452,2.079,1453,2.079,1454,2.079,1455,2.079,1456,2.079,1457,2.079,1458,2.079,1459,2.079,1460,2.079,1461,2.079,1462,2.079,1463,2.079,1464,2.079,1465,2.079,1466,2.079,1467,2.079,1468,2.079,1469,2.079,1470,2.079,1471,2.079,1472,1.768,1473,2.079,1474,2.079,1475,1.891,1476,2.079,1477,2.079,1478,2.871,1479,2.079,1480,2.079,1481,2.079,1482,2.079,1483,2.079,1484,3.563,1485,2.079,1486,2.079,1487,3.563,1488,5.54,1489,2.079,1490,2.079,1491,3.563,1492,3.563,1493,1.768,1494,2.079,1495,3.563,1496,2.079,1497,5.54,1498,2.079,1499,2.079,1500,1.54,1501,1.768,1502,3.563,1503,2.079,1504,3.563,1505,2.079,1506,2.079,1507,2.079,1508,2.079,1509,2.079,1510,2.079,1511,1.891,1512,2.079,1513,2.079,1514,2.079,1515,2.079,1516,2.079,1517,1.891,1518,2.079,1519,2.079,1520,1.891,1521,2.079,1522,2.079,1523,2.079,1524,1.891,1525,2.079,1526,1.441,1527,2.549,1528,1.768,1529,2.079,1530,1.891]],["t/276",[163,1.338,173,1.145,177,4.459,187,3.158,188,3.259,236,2.054,255,1.863,288,1.526,301,2.915,346,3.873,369,2.213,399,3.158,401,3.993,408,1.1,412,3.259,420,2.848,480,2.848,646,2.532,652,6.021,667,3.53,708,3.509,719,2.624,728,4.144,933,5.696,1003,3.374,1062,3.509,1099,2.674,1131,2.274,1164,2.988,1222,3.259,1235,3.259,1252,2.988,1273,2.674,1311,2.915,1441,4.144,1447,3.509,1450,4.493,1478,3.671,1500,4.94,1517,4.144,1531,5.67,1532,4.555,1533,4.555,1534,4.555,1535,4.555,1536,4.555,1537,4.555,1538,4.555,1539,4.555,1540,4.555,1541,4.555,1542,4.555,1543,4.555,1544,4.144,1545,2.728,1546,4.144,1547,3.509,1548,3.374,1549,3.509]],["t/278",[177,3.071,185,1.796,188,4.278,189,1.669,255,1.283,275,3.117,281,1.88,369,2.905,382,4.606,401,4.089,460,3.581,487,3.511,667,3.165,729,4.818,933,5.299,998,4.146,1000,7.371,1003,4.43,1099,3.511,1436,4.43,1447,6.222,1450,4.028,1478,6.509,1500,4.43,1526,4.146,1531,5.084,1545,3.581,1549,4.606,1550,5.44,1551,4.028,1552,5.98,1553,5.98,1554,5.44,1555,3.738,1556,5.98,1557,5.98]],["t/280",[172,1.176,177,3.846,188,3.833,205,3.086,288,1.795,382,4.127,387,2.751,390,4.317,401,3.791,411,4.913,414,1.795,620,3.276,623,3.715,667,3.964,860,3.086,933,3.514,998,3.715,1003,3.969,1026,5.77,1163,3.276,1235,3.833,1269,4.127,1311,3.428,1365,4.874,1371,4.127,1396,3.209,1397,3.609,1447,4.127,1450,5.046,1500,3.969,1531,4.555,1545,3.209,1548,3.969,1549,4.127,1555,3.349,1558,5.358,1559,7.49,1560,5.358,1561,5.358,1562,5.358,1563,4.555,1564,5.358,1565,5.358,1566,5.358,1567,4.874,1568,5.358,1569,5.358]],["t/282",[172,1.308,177,3.214,185,1.88,369,3.04,401,3.168,453,7.21,459,4.106,460,3.749,636,3.001,667,3.313,749,5.694,860,3.606,933,4.106,1099,3.675,1235,4.478,1269,4.822,1273,3.675,1436,4.637,1500,6.934,1526,4.34,1548,4.637,1549,4.822,1550,5.694,1554,5.694,1570,8.329,1571,6.26,1572,8.329,1573,6.26,1574,6.26,1575,6.26]],["t/284",[160,0.779,162,1.42,168,0.712,172,0.835,173,0.851,176,1.363,177,4.06,178,1.205,179,1.55,180,0.766,181,1.093,185,1.433,187,4.558,191,0.779,193,1.763,207,1.014,208,0.779,215,1.118,235,0.985,236,0.621,238,1.253,255,1.48,272,0.955,273,0.636,275,1.293,281,0.433,284,1.457,288,1.599,292,1.11,294,0.904,296,2.876,301,0.881,308,0.536,317,1.02,324,1.11,327,0.552,340,0.955,347,0.678,387,0.707,399,2.346,401,1.712,402,1.255,403,0.766,408,0.599,418,0.66,420,0.861,437,1.11,439,0.569,446,0.45,448,1.11,458,2.34,467,1.55,487,1.456,493,0.636,498,1.485,557,1.671,558,1.837,587,0.861,592,1.11,613,0.628,637,1.061,640,1.671,652,4.304,658,0.766,667,0.729,690,0.985,704,4.839,708,1.061,717,2.606,719,0.793,726,1.911,730,0.985,735,1.837,736,2.421,837,1.11,860,1.949,877,1.171,883,1.253,921,1.253,932,1.171,933,5.315,969,1.171,974,0.904,977,1.171,978,3.677,1000,2.726,1007,1.171,1026,1.061,1033,1.403,1049,1.55,1107,0.729,1151,1.774,1161,0.753,1186,1.02,1193,1.11,1223,1.999,1228,1.061,1234,0.842,1242,1.171,1273,1.456,1280,1.061,1308,2.256,1345,1.11,1434,2.109,1436,2.506,1442,1.061,1443,1.911,1444,1.061,1447,1.911,1449,1.171,1450,0.928,1451,1.253,1493,2.109,1526,0.955,1544,1.253,1545,0.825,1548,4.87,1555,0.861,1576,2.109,1577,1.171,1578,6.575,1579,6.208,1580,1.377,1581,2.48,1582,4.773,1583,1.377,1584,3.383,1585,2.48,1586,1.377,1587,1.377,1588,2.109,1589,1.377,1590,1.377,1591,1.11,1592,1.377,1593,1.377,1594,2.256,1595,1.377,1596,2.48,1597,1.377,1598,1.377,1599,2.256,1600,1.253,1601,1.377,1602,2.109,1603,1.377,1604,1.377,1605,1.377,1606,1.377,1607,1.377,1608,1.377,1609,1.377,1610,1.377,1611,1.377,1612,5.794,1613,3.763,1614,1.377,1615,1.377,1616,1.377,1617,1.377,1618,1.253,1619,1.377,1620,1.171,1621,1.377,1622,1.377,1623,1.377,1624,1.377,1625,1.377,1626,1.377,1627,1.377,1628,1.377,1629,1.377,1630,1.377,1631,2.256,1632,2.876,1633,1.377,1634,1.377,1635,1.377,1636,1.377,1637,1.377,1638,1.377,1639,1.377,1640,1.377,1641,1.171,1642,0.985,1643,1.253,1644,1.11,1645,1.377,1646,1.253,1647,1.253,1648,1.377,1649,1.377,1650,1.11,1651,1.377,1652,2.48,1653,2.48,1654,1.377,1655,1.377,1656,2.48,1657,2.48,1658,1.377,1659,1.377,1660,1.377,1661,1.377,1662,1.377,1663,2.876,1664,1.377,1665,1.377,1666,1.377,1667,2.48,1668,1.377,1669,1.377,1670,1.377,1671,1.377,1672,1.377,1673,1.171,1674,1.377,1675,1.253,1676,1.377,1677,2.48,1678,1.171,1679,1.253,1680,1.377,1681,1.377,1682,1.02,1683,1.377,1684,1.377]],["t/286",[185,2.647,195,2.1,222,6.112,234,4.462,287,2.278,503,3.64]],["t/288",[168,1.774,172,1.515,173,1.553,175,2.731,222,7.379,224,4.283,264,2.086,274,2.283,281,1.291,284,2.176,303,1.485,310,2.593,314,2.209,344,2.512,347,2.022,367,2.141,373,2.512,386,1.897,387,2.11,393,1.499,397,2.245,412,2.939,414,1.376,434,3.083,446,2.02,457,2.46,459,2.695,472,2.46,475,3.812,496,2.381,503,1.697,523,1.646,529,1.598,530,1.831,532,1.81,533,1.944,564,1.554,602,5.499,618,3.952,622,2.079,738,1.944,803,3.164,854,3.321,856,2.848,885,1.79,907,2.939,936,2.412,947,4.685,985,2.512,1227,2.848,1685,3.737]],["t/290",[162,1.243,168,1.611,170,0.845,172,1.214,173,0.91,176,1.848,189,1.01,202,1.713,222,7.062,223,4.728,225,2.56,227,1.577,230,2.88,255,0.776,280,2.968,306,2.438,337,1.543,344,3.429,353,2.316,356,1.978,365,2.213,366,2.968,389,1.595,395,1.297,401,1.832,408,0.874,432,2.681,434,1.806,441,2.047,446,2.245,454,2.316,460,2.168,471,2.213,487,2.125,492,1.758,493,1.671,496,1.395,505,1.382,529,1.408,532,2.471,533,1.713,540,2.316,593,1.832,602,2.681,612,2.374,616,2.916,678,3.077,948,2.916,949,2.316,990,3.077,1027,2.438,1058,2.168,1062,4.319,1099,2.125,1123,2.047,1156,2.916,1311,2.316,1357,2.916,1371,2.788,1397,2.438,1686,3.619,1687,3.619,1688,3.619,1689,3.619,1690,2.374,1691,2.589,1692,3.292,1693,3.619,1694,3.619,1695,3.619,1696,3.619,1697,4.154,1698,3.619,1699,3.619,1700,4.768,1701,3.619,1702,3.619,1703,3.292,1704,3.619,1705,3.619,1706,3.619]],["t/292",[172,1.156,175,2.162,200,3.207,210,3.072,222,7.013,225,2.677,229,5.328,337,3.138,350,4.023,446,2.407,577,3.896,848,2.685,1406,7.792,1707,9.25]],["t/294",[168,1.057,170,1.62,172,0.892,173,1.428,175,2.475,176,1.213,180,2.045,181,1.621,185,1.105,189,1.936,195,0.876,198,2.345,203,3.104,217,1.72,222,6.967,224,2.551,225,1.065,233,2.3,236,1.659,237,1.445,264,1.918,270,1.621,271,1.889,276,2.16,281,1.156,282,1.432,299,1.857,308,1.432,310,2.422,314,1.978,330,2.834,349,1.551,356,2.011,369,1.787,373,2.25,375,2.25,381,1.042,398,1.088,414,1.903,446,1.857,463,2.011,472,2.203,492,1.787,493,1.699,496,2.189,503,1.519,505,2.168,533,1.741,538,2.531,540,3.221,541,2.632,564,1.392,600,2.478,602,2.725,604,2.011,612,2.413,660,3.634,663,2.119,738,1.741,879,2.551,1098,3.634,1116,2.632,1139,2.203,1234,4.241,1244,2.725,1444,2.834,1708,2.551,1709,3.679,1710,3.347,1711,2.632,1712,3.679,1713,3.347,1714,3.679,1715,3.347,1716,3.679]],["t/296",[152,1.714,160,1.803,162,1.743,165,1.872,168,1.458,170,1.477,172,0.501,173,1.59,175,1.491,176,1.051,181,1.405,189,1.765,195,1.876,198,2.096,207,1.303,216,3.515,217,2.956,222,7.228,225,0.923,255,0.684,264,1.077,270,1.405,275,1.662,282,1.241,299,2.742,308,1.241,310,2.209,314,1.714,337,2.696,347,2.499,349,1.345,350,1.742,370,1.803,393,1.852,394,1.909,395,1.143,398,1.502,406,2.646,414,1.701,451,3.68,457,1.909,463,1.742,464,2.711,475,1.637,493,1.472,495,1.993,505,1.938,512,1.33,537,1.549,540,2.612,541,2.281,564,1.206,605,1.549,621,1.374,644,2.04,662,1.803,738,1.509,854,2.729,858,2.686,866,3.419,887,1.803,927,2.569,928,1.803,936,1.872,937,2.729,938,2.04,945,1.509,990,5.377,1027,2.148,1030,3.577,1031,2.9,1032,2.148,1378,2.711,1710,2.9,1717,3.188,1718,3.188,1719,3.188,1720,2.211,1721,2.362]],["t/298",[185,2.613,195,2.073,213,4.342,234,4.404,287,2.248,304,3.486,503,3.593]],["t/300",[170,1.841,181,3.472,213,4.816,304,3.157,347,3.88,381,2.231,401,4.884,407,5.464,503,3.255,660,5.043,1029,6.35,1141,5.464,1722,7.881]],["t/302",[162,2.054,163,1.756,167,4.43,172,0.939,195,1.924,200,2.605,205,4.653,213,5.262,284,2.846,287,1.545,304,4.099,311,3.445,345,4.606,393,2.181,476,2.944,522,2.665,523,2.396,591,3.581,606,3.268,937,3.215,1029,4.818,1207,4.838,1208,2.984,1547,6.222,1682,5.984,1723,5.44,1724,5.084]],["t/304",[163,1.435,168,1.404,175,2.061,178,2.374,189,1.364,190,3.206,195,1.165,197,2.04,213,4.744,244,3.293,255,1.506,264,2.371,275,3.658,281,2.206,304,3.596,310,3.32,314,2.628,347,2.406,381,1.384,387,2.51,393,2.996,394,2.927,408,1.695,410,3.621,414,1.638,460,4.204,474,2.765,475,2.51,476,3.455,596,3.939,783,2.765,936,4.121,947,5.394,953,3.497,1397,3.293,1407,4.447,1725,3.939,1726,3.939,1727,4.447]],["t/306",[168,2.347,172,1.283,181,4.346,195,1.946,210,3.409,213,5.288,275,4.258,287,2.111,304,3.273]],["t/308",[162,1.608,163,1.375,172,1.069,181,2.063,189,1.307,192,3.469,195,1.116,198,2.81,210,1.954,213,5.144,225,1.355,255,2.005,274,2.603,288,2.28,304,3.211,366,2.478,381,1.326,402,4.057,443,3.849,444,2.405,446,2.225,493,2.162,578,2.405,657,2.478,739,3.072,945,2.217,1013,3.469,1119,4.26,1123,2.649,1216,2.805,1252,3.072,1323,2.927,1728,4.683,1729,5.483,1730,4.683,1731,4.26,1732,4.26,1733,3.981,1734,3.981,1735,4.26,1736,4.26,1737,4.26,1738,4.26,1739,4.26,1740,3.981,1741,4.26,1742,4.26,1743,3.773,1744,4.26,1745,4.26,1746,3.607,1747,3.981]],["t/310",[156,1.755,160,1.786,163,0.927,168,1.447,172,0.987,176,1.661,181,1.391,185,1.887,189,1.754,190,3.305,198,2.08,213,4.988,217,1.476,235,2.26,255,1.681,264,1.067,274,1.755,275,1.646,281,1.584,284,1.113,288,1.688,298,1.999,304,2.518,310,1.103,327,1.265,393,1.152,394,1.891,403,1.755,443,2.85,444,1.622,446,1.033,486,2.545,487,2.958,496,1.217,503,1.304,533,1.495,537,1.534,558,2.339,574,1.819,578,4.024,637,2.433,708,2.433,738,1.495,739,2.072,740,1.854,825,2.26,841,2.26,1005,2.685,1021,1.726,1191,2.021,1202,2.873,1216,1.891,1323,1.974,1355,2.873,1410,1.974,1443,2.433,1620,4.283,1729,2.545,1731,2.873,1732,2.873,1733,2.685,1734,2.685,1735,2.873,1736,2.873,1737,2.873,1738,2.873,1739,2.873,1740,2.685,1741,2.873,1742,2.873,1743,2.545,1744,2.873,1747,2.685,1748,7.172,1749,2.685,1750,2.26,1751,3.158,1752,3.158,1753,2.685,1754,3.158,1755,3.158,1756,2.873,1757,2.685,1758,2.873,1759,7.836,1760,5.038,1761,3.158,1762,4.583,1763,2.339,1764,5.038,1765,3.158,1766,3.158,1767,4.583,1768,2.545]],["t/312",[213,3.98,217,3.728,225,2.308,229,4.594,231,3.103,288,2.672,312,5.907,350,4.358,493,3.682,532,3.514,578,4.095,929,5.907,1769,7.975,1770,7.975]],["t/314",[163,2.115,195,1.716,213,4.554,264,2.433,265,4.725,298,2.857,304,4.218,310,2.516,398,2.131,401,3.646,506,4.149,522,3.21,523,2.886,1020,5.335,1161,3.936,1324,5.335,1771,9.124,1772,7.203]],["t/316",[185,2.647,195,2.1,219,4.9,234,4.462,287,2.278,503,3.64]],["t/318",[168,1.906,170,1.55,172,0.71,173,1.137,176,1.491,185,1.359,195,1.078,219,4.811,225,1.309,226,2.895,229,2.606,230,2.323,231,1.761,236,2.04,237,3.087,255,1.424,264,2.242,281,1.422,284,1.594,299,2.57,303,1.636,310,2.318,349,1.908,355,3.352,356,2.473,386,2.089,408,1.092,414,1.516,424,2.323,438,2.433,446,2.57,470,2.968,476,2.227,496,3.029,503,1.869,512,1.888,529,1.761,530,2.016,531,2.968,532,1.993,533,3.141,537,2.198,538,3.502,539,2.473,564,2.51,593,2.29,604,2.473,622,2.29,636,2.169,885,1.971,904,2.258,984,3.137,1098,2.895,1099,2.656,1391,3.237,1773,4.525,1774,4.525,1775,3.847]],["t/320",[162,1.691,168,1.414,170,1.15,172,1.295,185,1.479,216,3.922,219,5.298,220,4.186,221,8.192,231,1.916,237,1.934,255,1.056,264,2.383,287,1.823,298,1.953,308,1.916,310,2.464,347,2.424,356,4.507,381,1.998,393,1.796,398,1.457,408,1.991,414,1.65,433,3.523,496,3.471,498,2.949,499,2.424,503,2.034,540,2.034,621,3.04,862,3.317,885,2.145,937,2.648,1033,2.785,1098,5.276,1776,4.186,1777,7.056,1778,4.186,1779,4.186,1780,4.924]],["t/322",[162,2.246,168,1.274,172,1.346,173,1.115,181,1.954,189,1.238,198,1.831,219,2.465,236,2.948,273,2.048,276,2.604,280,2.347,288,1.486,339,2.712,375,2.712,386,2.048,387,2.277,395,1.589,397,2.424,420,2.772,454,2.838,532,1.954,540,2.7,565,2.914,571,2.347,593,2.245,605,2.154,665,2.772,848,1.618,850,2.555,865,3.634,874,5.335,904,2.213,929,3.285,974,2.909,975,3.285,1014,2.555,1050,3.416,1084,4.938,1141,3.075,1142,5.754,1143,3.77,1144,3.77,1145,3.77,1146,3.77,1147,3.77,1148,3.77,1149,5.269,1150,3.77,1151,3.173,1152,3.416,1153,5.559,1154,3.173,1188,3.77,1781,4.435,1782,4.435,1783,4.435]],["t/324",[170,1.549,178,3.221,189,2.688,190,6.695,215,2.99,219,4.809,227,2.889,350,3.624,373,4.055,512,2.767,571,3.51,576,4.35,604,3.624,854,4.652,861,4.146,862,4.467,1118,5.999,1397,4.467,1449,5.638,1784,5.638,1785,6.632,1786,6.632,1787,6.632]],["t/326",[185,2.613,195,2.073,214,5.012,215,3.923,234,4.404,287,2.248,503,3.593]],["t/328",[168,1.577,170,1.779,172,0.862,185,1.648,195,1.308,214,4.388,215,3.435,216,4.234,231,2.136,236,2.475,237,3.437,255,1.634,264,2.573,284,1.934,308,2.136,310,2.661,356,3,408,1.325,414,1.839,424,2.819,438,2.952,470,3.601,496,2.936,503,2.267,512,2.291,529,2.136,530,2.446,531,3.601,532,2.419,537,2.666,538,3.899,539,3,622,2.779,885,2.392,984,3.806,1098,3.513,1099,3.223,1690,3.601,1788,5.49,1789,5.49]],["t/330",[162,1.812,172,1.345,173,1.326,189,0.934,198,1.383,207,1.368,214,3.761,215,1.509,233,2.093,236,2.379,264,1.131,271,1.719,273,1.546,276,1.965,280,1.772,282,2.053,289,4.064,310,1.169,311,1.928,337,1.427,339,2.047,347,1.648,350,1.83,365,2.047,375,3.226,386,3.723,387,2.709,389,2.325,393,1.221,395,1.2,397,1.83,420,2.093,454,2.142,523,1.341,540,2.697,565,3.302,571,1.772,593,2.671,605,1.626,621,1.443,645,1.861,665,3.298,848,2.941,850,1.928,865,4.481,874,5.135,904,1.671,907,2.395,951,2.196,974,2.196,975,2.48,1013,2.48,1014,1.928,1021,2.883,1050,2.579,1084,4.269,1142,4.837,1143,2.846,1144,2.846,1145,2.846,1146,2.846,1147,2.846,1148,2.846,1149,4.251,1150,2.846,1151,2.395,1152,2.579,1153,4.486,1154,2.395,1332,2.142,1690,2.196,1790,3.348,1791,2.846,1792,2.48,1793,3.045,1794,4.486]],["t/332",[195,2.403,203,5.512,214,4.885,215,3.823,216,4.713,281,2.666,282,3.3]],["t/334",[175,2.623,181,3.936,252,5.584,1795,6.881,1796,8.126]],["t/336",[162,2.489,189,1.427,191,2.892,198,2.993,252,3.196,255,1.965,275,3.777,337,2.179,352,3.353,387,2.625,395,1.832,408,1.234,529,1.989,530,2.278,621,2.203,858,2.706,936,3.001,945,2.42,1058,3.062,1159,4.119,1161,2.794,1268,3.938,1324,3.787,1392,4.028,1410,3.196,1796,7.659,1797,5.112,1798,5.112,1799,4.119,1800,5.112,1801,3.938,1802,5.112,1803,5.112,1804,9.159,1805,5.112,1806,5.112,1807,5.112,1808,5.112,1809,4.346,1810,5.112,1811,5.112,1812,4.65,1813,4.65,1814,5.112,1815,5.112,1816,5.112,1817,5.112,1818,4.65,1819,4.119]],["t/338",[465,4.85,1820,9.306]],["t/341",[160,3.413,176,1.989,195,1.437,197,3.391,217,2.82,235,4.317,287,1.559,295,5.125,301,3.861,311,4.681,403,3.354,436,3.772,471,4.97,645,3.354,658,3.354,762,4.317,904,3.011,986,5.08,1155,4.648,1750,4.317,1821,6.034,1822,7.393,1823,5.489,1824,6.034,1825,7.393,1826,5.489,1827,5.489,1828,6.034,1829,6.034,1830,5.489,1831,5.489,1832,6.034]],["t/343",[175,2.007,197,2.852,267,4.18,287,1.766,295,4.599,298,2.711,299,2.885,423,3.411,436,4.273,439,2.823,566,3.799,620,4.18,853,4.18,890,4.89,922,5.507,929,6.536,971,5.507,989,8.026,1282,5.811,1778,5.811,1833,6.218,1834,6.835,1835,6.835,1836,6.835]],["t/345",[181,2.635,192,4.43,195,1.924,250,4.278,287,2.087,290,6.792,294,5.299,295,4.768,298,2.372,301,3.826,401,3.027,403,3.324,423,4.565,564,2.262,621,2.577,650,4.818,659,4.43,690,4.278,719,3.445,890,4.278,922,4.818,1021,3.268,1410,3.738,1527,4.278,1750,4.278,1833,5.44,1837,5.98,1838,5.98,1839,5.98,1840,5.98,1841,5.44,1842,5.98]],["t/347",[195,2.157,203,4.948,287,2.708]],["t/349",[168,2.114,173,1.851,181,4.075,185,2.21,199,4.075,203,5.055,215,3.319,225,2.13,366,3.896,369,3.575,493,3.399,671,4.091,865,4.091,1033,4.164,1139,4.408,1555,5.782,1711,5.266]],["t/351",[170,1.282,172,1.196,173,1.38,175,1.612,176,1.809,195,1.308,199,3.356,203,4.163,210,2.291,212,3.357,214,3.162,215,2.475,217,2.566,225,2.532,228,3.513,229,3.162,230,2.819,263,3.927,284,1.934,303,1.985,305,4.228,330,5.868,349,2.315,419,3.223,446,3.09,451,2.566,493,2.535,503,2.267,564,2.077,571,2.905,618,3.513,711,3.288,719,4.388,763,3.927,890,3.927,992,3.223,1038,3.698,1116,3.927,1162,6.93,1843,4.667,1844,4.667]],["t/353",[160,2.914,170,1.974,172,1.144,175,1.513,181,2.27,195,1.227,199,3.725,200,2.244,203,4.62,210,2.15,212,3.15,227,2.244,264,2.461,274,2.863,282,2.004,284,1.815,287,1.331,310,2.545,337,2.196,350,2.815,381,1.458,393,1.879,408,1.759,414,1.726,432,3.816,446,2.382,457,3.085,496,3.258,503,3.796,529,2.004,530,2.296,531,3.379,621,2.22,636,2.47,763,3.686,936,3.024,947,2.726,1021,2.815,1058,3.085,1527,3.686,1844,4.38,1845,5.152,1846,4.38,1847,5.152]],["t/355",[160,1.983,168,1.007,172,1.432,173,0.881,175,1.029,176,1.803,180,1.948,181,1.545,182,2.099,185,2.282,188,2.508,189,0.978,192,2.597,193,2.851,194,2.243,199,2.41,200,1.527,202,1.659,203,4.153,207,1.433,208,1.983,210,1.463,215,3.426,225,2.199,228,2.243,229,2.019,230,1.8,231,2.617,236,2.466,237,2.986,255,0.752,265,2.3,270,1.545,275,1.827,282,1.364,294,2.3,305,2.7,306,2.362,349,1.478,369,1.703,370,3.805,397,2.989,409,1.364,414,1.174,420,2.191,434,1.75,446,1.146,472,3.276,493,1.619,512,2.807,537,1.703,545,2.825,571,1.855,574,2.019,605,1.703,617,2.597,621,1.511,671,1.948,681,2.362,702,2.597,711,3.276,719,2.019,730,2.508,848,1.279,854,1.885,927,2.825,1014,2.019,1027,2.362,1049,2.191,1107,1.855,1123,1.983,1131,1.75,1139,2.099,1472,2.98,1527,2.508,1555,2.191,1690,2.3,1746,2.7,1799,2.825,1848,2.825,1849,3.506,1850,4.651,1851,3.506,1852,3.506,1853,3.506,1854,3.506]],["t/357",[152,3.971,162,0.966,163,0.826,164,2.559,168,0.808,170,0.657,172,1.16,173,0.707,178,2.823,181,2.022,189,0.785,195,1.76,203,4.567,207,1.15,210,1.174,225,0.814,255,0.984,264,1.55,271,2.356,274,1.564,281,1.442,287,1.186,288,0.942,299,1.5,301,2.936,310,1.603,327,2.686,330,2.167,337,2.477,339,1.72,340,1.95,348,4.174,365,3.554,367,1.466,370,1.591,373,1.72,381,0.796,393,2.12,394,1.685,395,3.123,396,1.564,398,1.984,407,1.95,419,1.652,424,1.445,457,1.685,458,1.591,488,2.392,499,1.385,505,1.074,523,1.838,529,1.095,530,1.254,540,1.895,555,2.392,606,1.537,609,2.559,612,3.01,656,1.845,667,1.489,671,1.564,674,3.01,783,1.591,858,2.429,859,2.267,866,1.895,868,2.392,887,1.591,904,1.404,936,1.652,937,1.513,938,1.8,998,1.95,1021,1.537,1084,2.643,1131,2.29,1151,2.013,1193,2.267,1216,1.685,1323,2.869,1324,2.084,1357,2.267,1442,2.167,1855,2.813,1856,2.559,1857,1.95,1858,2.392,1859,2.392,1860,2.392,1861,2.392,1862,6.099,1863,2.559,1864,2.559,1865,2.813,1866,2.559,1867,2.392,1868,2.813,1869,2.813,1870,2.813,1871,3.698,1872,5.812,1873,4.683,1874,2.559]],["t/359",[180,3.799,199,3.011,201,6.218,203,5.34,255,1.466,314,3.675,340,4.739,398,2.022,505,2.61,667,3.617,671,3.799,735,5.063,783,3.866,885,3.844,1208,3.411,1323,4.273,1691,4.89,1857,4.739,1858,5.811,1859,5.811,1860,5.811,1861,5.811,1875,8.026,1876,6.835]],["t/361",[152,2.353,163,1.285,172,1.017,178,2.126,195,1.836,199,1.928,200,1.907,203,4.969,205,4.44,210,1.826,255,1.389,271,3.325,287,1.991,349,1.846,369,2.126,393,2.362,395,2.321,398,1.295,418,2.098,424,2.248,426,3.371,457,2.621,463,2.392,505,2.943,523,1.753,529,1.703,530,1.95,598,2.621,648,3.371,657,2.316,667,2.316,671,2.433,674,5.056,713,3.242,738,4.303,783,2.476,858,2.316,936,2.57,937,2.353,1014,2.521,1208,2.184,1216,4.616,1265,3.242,1323,4.048,1411,3.131,1711,3.131,1857,3.034,1858,3.721,1859,3.721,1860,3.721,1861,3.721,1862,7.749,1877,4.377,1878,3.981]],["t/363",[163,1.457,168,1.426,170,0.724,172,0.779,173,1.248,175,0.91,181,1.366,189,1.385,194,1.983,195,0.738,199,1.366,203,5.094,207,1.267,225,2.053,230,3.187,231,1.206,237,1.218,255,2.051,273,1.431,281,1.56,287,0.801,288,1.038,294,2.033,299,2.708,330,5.466,337,1.321,338,2.149,350,1.694,355,2.296,381,0.878,395,1.111,398,1.836,406,2.587,409,1.931,423,1.547,446,2.029,451,2.901,470,2.033,493,1.431,495,1.938,512,1.293,532,1.366,533,2.349,571,1.641,574,1.786,604,1.694,606,1.694,621,2.674,636,1.486,656,2.033,739,2.033,766,2.296,807,4.747,854,1.667,858,2.627,897,3.551,974,2.033,985,3.035,1032,2.088,1081,2.388,1208,4.341,1273,1.82,1282,4.22,1323,1.938,1673,4.22,1843,5.277,1844,2.635,1879,3.823,1880,4.22,1881,4.963,1882,3.1,1883,3.1,1884,3.1,1885,3.1,1886,3.1,1887,3.1,1888,3.1,1889,3.1,1890,2.82]],["t/365",[165,5.388,465,4.784,1830,8.349]],["t/367",[165,3.332,175,2.289,178,2.757,185,1.704,198,3.219,200,2.473,237,3.063,287,2.014,299,1.856,314,4.787,327,2.274,349,2.393,393,2.07,414,2.983,418,3.737,457,3.399,462,3.632,470,5.114,505,3.66,512,2.368,529,2.208,538,2.529,598,3.399,671,4.333,681,3.823,928,3.21,1017,5.163,1025,4.061,1141,3.935,1265,4.204,1567,5.163,1801,6.005,1878,5.163]],["t/369",[162,1.742,163,1.49,165,2.979,168,1.457,172,0.797,182,3.038,189,1.416,191,2.87,193,2.644,195,1.209,237,1.993,271,2.605,273,2.342,288,1.7,299,1.659,337,3.072,403,2.82,407,3.517,419,2.979,480,3.172,496,1.955,505,1.937,538,3.212,574,2.923,598,3.038,612,4.728,645,2.82,651,4.313,656,3.328,681,3.418,693,4.088,904,2.532,1030,4.077,1168,4.088,1187,3.63,1206,3.328,1222,3.63,1526,3.517,1551,3.418,1721,3.758,1866,4.615,1890,4.615,1891,4.088,1892,5.073,1893,4.615,1894,5.073,1895,3.418,1896,5.073,1897,4.313,1898,5.073,1899,5.073,1900,5.073,1901,5.073,1902,5.073,1903,5.073,1904,5.073,1905,5.073,1906,5.073]],["t/371",[165,2.42,170,0.577,172,0.388,173,1.036,175,0.725,193,1.288,195,0.589,197,1.031,217,1.155,227,1.076,253,2.42,255,0.884,266,1.767,270,2.337,280,1.307,281,1.296,287,1.371,299,1.735,303,3.205,327,2.125,337,1.053,350,1.35,369,1.2,381,0.699,389,3.644,393,0.901,398,1.22,408,1.662,409,1.604,414,0.828,434,1.233,439,1.02,441,1.397,444,1.269,446,2.898,451,1.155,496,1.589,505,0.943,523,1.652,529,0.961,532,1.088,534,4.042,565,3.068,566,3.442,568,1.397,589,1.373,598,2.469,604,2.899,606,2.899,607,3.322,612,2.704,631,1.991,674,3.48,693,1.991,709,1.903,717,1.903,849,3.709,850,3.056,853,1.511,862,3.574,866,1.664,885,1.796,886,2.247,887,1.397,908,2.704,945,2.931,949,2.638,985,1.511,995,1.903,1049,1.544,1065,3.709,1087,2.1,1103,2.858,1141,1.713,1152,1.903,1157,1.713,1203,1.767,1208,2.057,1273,1.45,1311,1.581,1332,1.581,1411,1.767,1549,1.903,1679,2.247,1750,1.767,1891,1.991,1907,2.1,1908,2.47,1909,2.47,1910,2.47,1911,2.47,1912,2.47,1913,2.47,1914,2.47,1915,2.47,1916,2.47,1917,3.505,1918,2.47,1919,2.47,1920,2.247,1921,2.47,1922,2.47,1923,2.47,1924,1.903,1925,2.47,1926,4.122,1927,2.47,1928,4.122,1929,2.47,1930,4.122,1931,2.47,1932,2.1,1933,2.1]],["t/373",[153,4.87,162,1.278,165,3.363,168,1.069,170,0.869,172,1.097,180,3.183,189,2.364,200,1.621,244,2.506,255,1.683,302,2.579,321,4.87,340,2.579,365,2.275,414,1.246,437,2.997,487,2.184,512,1.552,526,2.997,529,1.447,530,1.658,532,1.639,540,3.695,548,5.21,568,2.104,598,2.228,600,5.706,601,2.997,612,3.757,621,1.603,647,2.865,672,2.756,681,2.506,690,2.661,705,2.997,879,2.579,922,2.997,926,2.44,1014,2.143,1033,2.104,1141,2.579,1207,2.228,1216,2.228,1234,2.275,1284,2.38,1311,2.38,1548,2.756,1642,4.997,1746,2.865,1891,2.997,1907,3.163,1934,3.72,1935,3.72,1936,5.728,1937,5.728,1938,5.728,1939,5.728,1940,5.728,1941,5.728,1942,3.72,1943,5.21,1944,3.384,1945,3.384,1946,3.72,1947,3.163,1948,3.384,1949,3.72,1950,5.21,1951,3.72,1952,3.384,1953,3.72,1954,3.384,1955,3.384,1956,2.756,1957,3.72]],["t/375",[162,1.243,163,1.647,172,1.078,173,0.91,175,2.016,176,1.193,189,1.565,193,1.886,200,1.577,202,1.713,207,1.479,250,2.589,255,0.776,274,2.012,308,1.408,311,2.085,365,5.644,375,2.213,383,2.788,386,3.57,393,1.32,395,2.46,405,3.23,409,1.408,414,1.213,438,1.946,454,4.392,498,3.358,522,1.613,523,1.45,537,2.724,574,2.085,593,2.838,610,2.916,644,2.316,645,2.012,653,2.509,696,2.509,783,2.047,849,2.168,850,2.085,858,1.916,874,4.478,924,3.292,938,2.316,967,3.077,974,4.503,1029,2.916,1084,2.085,1131,1.806,1149,2.916,1154,2.589,1234,3.429,1323,2.263,1325,2.681,1332,2.316,1405,2.916,1743,2.916,1778,3.077,1791,3.077,1809,3.077,1958,3.292,1959,3.619,1960,3.619,1961,3.292,1962,3.619,1963,3.619,1964,3.292,1965,3.619,1966,5.608,1967,3.619,1968,3.619,1969,3.619,1970,3.619,1971,3.619,1972,2.788,1973,3.619,1974,3.619,1975,3.619,1976,3.619,1977,3.619,1978,3.292,1979,3.077]],["t/377",[162,2.774,163,1.756,165,5.371,170,1.886,172,0.939,185,1.796,231,3.981,237,4.02,255,1.733,271,3.071,370,3.383,388,4.028,408,1.95,409,2.327,484,3.511,512,3.371,537,2.905,619,3.738,656,3.923,659,5.984,693,4.818,1907,5.084,1980,8.078,1981,8.078]],["t/379",[282,3.672]],["t/381",[168,1.799,173,1.052,223,3.83,225,2.172,230,3.216,246,2.215,255,0.898,256,2.745,353,2.678,366,2.215,374,3.558,408,1.01,418,2.006,453,3.224,532,1.844,605,2.033,645,2.326,662,4.713,736,2.994,739,4.109,945,1.981,1123,2.367,1323,2.616,1396,2.506,1400,3.1,1700,3.558,1703,3.807,1843,6.382,1880,3.558,1895,2.819,1982,6.714,1983,4.185,1984,4.185,1985,8.333,1986,4.185,1987,6.264,1988,8.333,1989,4.185,1990,8.922,1991,4.185,1992,6.264,1993,4.185,1994,4.185,1995,6.264,1996,4.185,1997,3.807,1998,3.807,1999,4.185,2000,4.185,2001,4.185,2002,4.185,2003,4.185,2004,6.264,2005,4.185,2006,3.558,2007,4.185,2008,4.185,2009,4.185,2010,4.185,2011,4.185,2012,4.185,2013,3.558]],["t/383",[168,2.467,194,5.496,298,3.407,304,3.441,395,3.078,405,4.948,475,4.411,1982,6.921]],["t/385",[207,3.147,226,4.927,255,1.652,304,3.085,358,5.931,386,3.555,389,3.392,405,4.435,414,2.579,423,3.843,475,3.954,476,3.79,593,3.897,660,4.927,2014,7.7,2015,7.7,2016,7.7]],["t/387",[1216,5.573,1982,7.498]],["t/389",[499,4.518,522,4.09,1982,7.395]],["t/392",[176,1.871,207,2.32,223,4.767,225,2.774,255,1.218,281,1.784,295,4.063,308,2.208,349,2.393,356,4.26,424,2.915,518,3.935,529,2.208,530,2.529,533,3.69,564,2.147,575,3.21,662,3.21,671,3.155,882,4.061,1084,3.269,1763,5.775,1997,5.163,1998,5.163,2017,4.372,2018,7.796,2019,7.796,2020,5.676,2021,5.676,2022,5.676,2023,5.676,2024,5.676,2025,5.676,2026,5.163,2027,4.988,2028,5.676,2029,5.676]],["t/394",[172,0.965,173,1.545,225,2.38,255,2.125,256,4.031,272,4.26,284,2.165,295,3.203,303,2.975,365,3.758,366,3.252,397,3.358,408,1.484,534,3.608,620,3.758,621,2.648,645,3.415,783,5.246,1123,4.653,1252,5.396,1362,5.224,1414,4.733,1721,4.552,1819,4.951,2027,3.932,2030,6.145,2031,6.145,2032,6.145,2033,4.26,2034,6.145]],["t/396",[168,1.04,172,0.568,173,1.41,175,1.063,193,1.886,225,1.047,255,1.659,264,1.223,276,2.125,284,1.275,295,1.886,299,1.183,304,1.45,306,2.438,308,2.671,310,1.959,327,1.45,365,3.429,386,1.671,389,1.595,395,1.297,408,0.874,411,3.678,422,2.374,493,1.671,498,4.111,499,3.379,574,2.085,576,2.374,618,2.316,645,4.649,660,3.588,665,3.506,738,1.713,951,5.487,960,2.916,1046,2.916,1052,3.292,1065,4.63,1140,5.208,1142,2.681,1273,2.125,1329,2.916,1396,2.168,1397,2.438,1406,2.788,1545,2.168,1641,3.077,1763,4.154,1779,4.768,1945,3.292,1956,2.681,2027,2.316,2035,3.292,2036,3.619,2037,3.619,2038,4.518,2039,5.608,2040,5.608,2041,3.619,2042,3.619,2043,3.619,2044,6.573,2045,3.292,2046,3.619,2047,3.619,2048,5.608,2049,3.619,2050,3.619,2051,3.292,2052,3.619,2053,3.292,2054,3.619,2055,3.292,2056,3.292,2057,3.292,2058,3.292]],["t/398",[168,2.347,189,2.28,264,2.759,310,2.853,576,5.359,671,4.541,1406,6.293,2027,6.311,2055,7.432,2056,7.432,2057,7.432]],["t/400",[156,3.479,173,1.574,198,2.585,223,5.093,254,5.044,270,2.758,295,3.263,356,3.421,370,3.541,480,3.913,505,2.39,515,5.044,523,2.508,646,5.203,662,3.541,671,3.479,904,3.124,947,3.313,1818,5.694,1850,5.322,2026,7.577,2027,4.005,2059,6.26,2060,6.26,2061,9.361,2062,8.329,2063,8.329,2064,6.26,2065,6.26]],["t/402",[207,3.339,255,1.753,408,1.972,498,4.893,499,4.022,580,4.465,1140,5.504,1213,5.664,2066,8.17,2067,5.504,2068,8.17,2069,8.17]],["t/404",[195,1.287,255,1.159,310,1.886,417,4.16,439,3.582,498,3.235,499,2.659,522,4.182,580,5.394,605,2.623,645,5.486,937,2.904,1076,4.592,1140,6.886,1895,3.638,1972,6.681,2027,5.55,2035,6.852,2044,8.391,2070,4.352,2071,5.401,2072,6.852,2073,3.745,2074,5.401,2075,5.401,2076,4.913]],["t/407",[156,3.128,176,1.855,189,1.571,197,2.348,225,2.243,270,2.48,280,2.979,281,2.436,349,2.373,408,1.871,439,2.324,498,4.642,499,3.815,533,2.664,564,2.129,580,5.474,660,3.601,807,3.442,904,3.868,1063,5.221,1140,5.221,1416,3.902,2027,4.959,2051,5.12,2077,5.628,2078,5.628,2079,5.628,2080,5.628,2081,5.628,2082,5.628,2083,5.628,2084,4.785,2085,5.12,2086,5.12,2087,5.628,2088,5.628]],["t/409",[281,2.846,439,3.739,564,3.425,580,4.948]],["t/411",[175,2.459,310,2.925,474,4.737,580,4.577,1140,5.641,2027,5.358,2089,7.12,2090,8.374,2091,8.374,2092,8.374]],["t/413",[168,2.636,287,2.371,450,6.799]],["t/415",[172,1.392,178,2.734,179,4.845,180,3.128,189,2.163,205,3.242,255,1.207,287,2.002,299,1.84,324,4.535,381,1.593,414,2.596,423,2.809,424,2.89,440,3.692,450,4.169,532,3.906,540,3.201,598,3.371,854,3.026,992,3.304,1020,4.169,1021,4.236,1131,2.809,1500,4.169,1867,6.589,2093,4.785,2094,4.785,2095,7.75,2096,5.628,2097,5.628,2098,5.628,2099,5.628,2100,5.628,2101,5.628,2102,5.628,2103,5.628]],["t/417",[160,2.202,165,4.716,170,1.385,172,0.611,175,1.143,189,1.086,191,2.202,195,1.914,202,1.843,281,1.224,282,2.308,287,1.532,299,1.273,303,1.408,314,2.093,351,5.063,378,2.999,381,1.679,398,1.152,408,1.432,409,1.515,412,2.785,414,1.304,446,1.273,450,4.393,451,2.772,475,1.999,480,2.434,482,2.623,505,2.743,532,3.165,540,1.608,571,2.06,575,2.202,591,4.302,598,4.811,607,3.137,612,2.554,617,2.884,648,2.999,659,2.884,670,2.491,849,2.332,861,2.434,862,2.623,898,2.699,928,2.202,944,2.884,1014,2.243,1018,3.541,1311,3.795,1391,2.785,1444,4.568,1551,2.623,1642,2.785,1650,3.137,1723,3.541,1891,7.632,2104,3.541,2105,3.137,2106,3.893,2107,3.893,2108,3.893,2109,3.893,2110,3.893,2111,2.999,2112,3.893]],["t/419",[195,2.497,287,2.339,406,4.719]],["t/421",[162,2.462,163,1.479,168,1.446,170,1.176,172,0.791,189,1.405,207,2.058,248,3.392,255,1.538,275,2.624,288,2.402,290,3.491,298,3.312,299,2.344,304,2.017,367,2.624,381,2.576,389,3.678,398,2.47,405,4.809,406,3.737,408,1.216,409,2.79,512,2.101,523,2.017,662,4.055,762,3.602,848,1.837,865,4.64,874,2.549,1311,3.222,1332,3.222,1632,4.281,1691,3.602,2113,5.035,2114,5.035,2115,5.035,2116,5.035,2117,4.58,2118,4.281,2119,7.169,2120,4.58]],["t/423",[163,2.555,290,6.032,398,2.574,406,4.535,409,3.385,451,4.067,996,6.702]],["t/425",[162,2.675,168,2.752,170,1.819,284,2.744,290,5.401,337,3.32,406,4.994,409,3.031,451,4.478,879,5.401,998,5.401,1081,6,2121,7.086]],["t/427",[162,1.763,163,2.135,168,1.475,172,0.508,173,0.813,175,0.95,176,1.066,181,1.425,185,1.917,189,0.903,195,1.521,208,1.83,219,1.798,231,1.998,236,2.315,237,3.115,255,1.37,264,1.093,282,1.259,287,1.327,290,5.04,298,1.283,299,1.679,308,1.259,310,1.13,381,0.916,387,2.637,395,1.159,398,2.15,402,1.637,406,2.676,408,1.541,409,2.484,444,1.661,446,1.058,451,1.512,471,1.978,492,1.571,495,3.21,496,1.247,499,1.592,512,1.35,538,3.761,539,3.489,605,1.571,613,1.476,621,1.394,622,1.637,632,2.75,642,2.122,657,1.712,667,1.712,858,3.379,904,2.562,928,1.83,937,3.433,1014,1.863,1025,2.314,1030,1.83,1032,3.459,1099,1.899,1208,4.212,1211,2.396,1252,2.122,1260,3.955,1261,2.492,1262,2.492,1323,2.022,1775,2.75,1857,3.56,1895,2.179,2094,2.75,2122,5.134,2123,2.942,2124,2.942,2125,6.384,2126,5.134,2127,3.235,2128,5.134,2129,3.235]],["t/429",[162,2.054,181,2.635,266,4.278,275,3.117,281,1.88,287,1.545,290,4.146,303,2.162,395,2.895,398,2.706,408,1.95,532,2.635,540,2.47,576,3.923,618,3.826,630,5.299,858,3.165,944,4.43,947,5.415,1154,4.278,1277,4.028,1278,4.818,1279,4.606,1632,5.084,1642,4.278,1857,5.601,2121,5.44,2130,5.98,2131,5.98,2132,5.44,2133,5.98,2134,5.44]],["t/431",[281,2.666,290,5.879,398,2.984,406,4.42,451,3.964,522,4.495,564,3.208]],["t/433",[163,2.825,195,2.131,255,1.228,284,2.016,287,2.026,298,2.271,303,3.98,369,2.78,398,1.693,405,4.517,409,2.227,446,3.548,451,3.665,532,2.522,645,3.182,662,3.238,848,2.088,908,3.755,985,3.501,1040,4.409,1208,3.913,2135,4.867,2136,5.724]],["t/435",[123,2.293,152,2.4,163,2.29,168,1.282,173,1.122,181,1.967,189,1.246,195,2.049,255,1.673,264,2.634,284,1.573,287,1.153,298,2.607,299,2.149,307,3.007,308,1.737,310,2.295,337,1.903,347,2.198,350,2.44,373,2.73,384,4.061,389,1.967,393,1.629,395,1.6,396,2.481,398,1.321,408,1.078,414,1.496,474,2.525,475,2.293,495,4.875,496,1.721,523,2.633,529,1.737,621,2.831,681,3.007,848,2.397,858,3.478,874,3.947,887,2.525,937,2.4,938,2.857,1030,2.525,1032,4.427,1065,2.674,1157,3.095,1208,3.279,1527,3.194,1600,4.061,1857,4.556,2123,4.061,2124,4.061,2137,4.464,2138,4.464,2139,4.464,2140,4.464]],["t/437",[465,4.784,1555,5.737,2141,7.07]],["t/439",[123,2.842,160,3.131,170,1.789,173,2.384,175,1.625,198,2.286,251,2.885,252,4.79,268,6.092,269,6.092,271,2.842,280,4.055,284,1.95,308,3.69,337,2.359,339,5.374,341,4.706,368,4.706,377,4.263,379,5.035,380,6.174,382,4.263,383,4.263,388,3.729,568,3.131,594,4.706,658,4.259,1033,3.131,2142,5.535,2143,5.035,2144,5.535,2145,5.535,2146,5.535,2147,5.535]],["t/441",[173,2.054,176,2.693,248,5.504,255,1.753,344,6.032,345,6.293,349,3.445,381,2.313,575,4.621,1280,6.293,1397,5.504]],["t/443",[170,2.392,172,1.366,175,2.555,308,3.385,564,3.291,1784,7.397]],["t/445",[168,2.128,170,1.73,172,0.828,173,1.862,189,1.472,196,3.296,199,2.323,200,2.297,210,3.091,215,2.377,216,2.931,219,2.931,225,1.526,288,1.767,308,2.052,350,2.882,415,4.483,431,4.483,444,3.804,446,2.422,493,2.435,496,3.3,503,2.178,537,2.561,589,2.931,664,3.552,711,3.158,848,2.702,861,5.805,862,3.552,865,2.931,874,2.669,932,4.483,964,4.249,992,3.096,1038,3.552,1107,2.791,1121,4.062,1139,3.158,1555,3.296,1932,4.483,2148,5.273,2149,4.797,2150,5.273,2151,5.273]],["t/447",[168,2.291,172,1.252,199,3.514,210,3.328,214,4.594,215,3.595,219,4.433,487,4.682,684,5.372,854,4.288,1311,5.103,1526,5.529,1618,7.255,2152,7.975]],["t/449",[225,2.62,251,4.719,252,5.66,465,4.719]],["t/451",[160,4.21,170,2.175,173,1.871,175,2.186,255,1.597,268,5.16,269,5.16,271,3.822,280,4.93,284,2.622,339,4.552,340,5.16,341,6.328,342,6.771,343,4.763,344,4.552,345,5.733,346,6.328]],["t/453",[68,2.215,162,0.895,170,0.608,172,0.865,173,0.655,175,2.373,178,2.093,189,0.727,198,2.928,200,1.135,205,3.687,217,2.014,223,1.593,224,2.987,225,2.21,230,2.212,251,4.708,252,5.646,268,4.915,269,4.915,270,1.898,271,2.212,281,0.819,282,1.014,284,1.518,294,1.709,307,4.775,337,1.11,339,1.593,347,1.282,351,2.395,352,1.709,353,2.757,354,3.471,355,1.93,356,1.424,357,2.37,358,2.007,359,2.37,360,2.215,361,2.215,362,6.027,363,3.663,365,1.593,366,1.379,367,3.695,368,6.493,369,1.265,380,3.471,386,2.543,393,1.572,416,3.319,476,2.712,484,2.529,518,2.987,593,2.181,599,4.001,610,3.471,705,2.099,1156,2.099,1392,1.448,1493,2.215,1812,2.37,1813,6.449,1874,2.37,2153,4.308,2154,4.308,2155,4.308,2156,4.308,2157,3.919,2158,4.308,2159,2.37,2160,2.007,2161,2.605,2162,2.605,2163,2.605,2164,4.308,2165,4.308,2166,6.449,2167,2.605,2168,2.605,2169,2.215,2170,2.605]],["t/455",[195,2.128,225,2.585,287,2.308,934,3.849,935,4.458]],["t/458",[172,0.884,189,1.571,191,3.183,225,1.629,255,1.663,350,3.076,353,3.601,364,4.785,365,3.442,414,2.97,434,2.809,493,2.599,523,2.255,537,2.734,546,5.374,621,2.425,671,3.128,783,3.183,866,3.791,934,2.425,986,3.518,1400,4.169,1545,3.371,1819,4.535,2073,5.374,2171,5.628,2172,5.628,2173,5.628,2174,4.169,2175,5.628,2176,7.75,2177,5.12,2178,5.628,2179,5.628,2180,5.12,2181,5.628,2182,5.628,2183,7.75,2184,7.75,2185,5.628,2186,5.628]],["t/460",[172,1.284,225,3.133,255,1.306,281,2.902,295,3.174,353,5.232,364,6.952,365,5,533,2.882,564,3.733,600,4.102,662,3.444,740,3.575,934,4.666,1879,4.69,2187,6.089,2188,5.177,2189,6.089,2190,6.089]],["t/462",[434,4.644,934,4.01]],["t/464",[255,1.891,589,4.9,934,3.798,935,4.4,1577,7.495,2191,8.815]],["t/466",[163,1.185,168,1.159,172,0.633,196,3.809,197,2.542,198,1.666,225,2.124,255,0.865,281,1.268,287,1.042,288,3.094,298,1.6,350,2.205,367,2.102,386,3.777,395,1.446,398,2.42,405,2.324,458,2.282,459,2.646,474,3.446,492,1.959,533,2.884,540,1.666,564,2.305,606,3.33,616,3.25,621,2.625,630,3.996,660,2.581,848,2.223,934,4.131,935,3.664,947,3.224,965,7.851,1065,2.416,1159,3.25,1284,3.898,1720,4.224,1721,2.988,1895,2.717,2027,2.581,2089,5.18,2132,3.67,2192,3.67,2193,4.034,2194,4.034,2195,4.034,2196,4.034,2197,4.034,2198,4.034,2199,4.034,2200,3.107,2201,4.034,2202,4.034,2203,4.034,2204,4.034]],["t/468",[288,3.033,934,3.901,965,7.697,2070,7.295]],["t/470",[168,1.688,170,1.372,172,0.923,180,3.265,189,1.64,195,1.4,197,3.331,225,2.623,273,2.713,280,4.224,287,1.518,288,1.968,314,4.291,402,2.974,495,3.673,506,3.384,529,2.286,530,2.618,613,2.68,621,3.439,622,2.974,848,2.143,934,3.906,935,2.932,945,3.778,955,6.786,962,5.344,1284,3.759,2205,7.982,2206,5.875,2207,5.875,2208,5.875,2209,5.344]],["t/472",[303,3.274,446,2.96,465,4.719,589,5.032]],["t/475",[172,0.988,175,1.237,181,1.856,200,1.835,202,1.993,227,1.835,236,1.899,245,3.244,253,2.473,255,1.35,280,2.229,281,1.324,284,1.484,299,2.925,303,2.275,337,1.795,351,2.341,358,4.847,365,2.575,386,1.945,389,2.773,395,1.509,408,1.819,409,1.639,410,3.12,434,2.102,444,2.163,446,2.058,518,2.92,522,2.804,523,1.687,533,1.993,540,1.739,564,1.593,565,4.183,566,4.188,589,2.341,593,3.814,604,2.302,702,3.12,909,3.244,926,2.763,951,6.157,1157,2.92,1163,2.575,1547,3.244,1697,3.12,1720,2.92,1721,3.12,1856,3.831,1933,5.35,2200,3.244,2210,4.211,2211,4.211,2212,4.211,2213,4.211,2214,3.831,2215,7.534,2216,3.831]],["t/477",[280,4.377,303,2.99,375,5.058,386,3.819,389,4.378,408,1.997,540,3.416,593,4.186,2217,8.271,2218,6.664]],["t/479",[189,2.124,202,3.603,224,5.278,225,2.203,255,1.633,280,4.029,284,2.682,303,3.415,386,3.515,389,3.354,534,5.546,540,3.144,575,4.306,593,3.853,1027,5.128,2219,7.612]],["t/481",[172,1.238,200,3.434,227,3.434,233,4.927,299,2.577,303,2.85,347,3.88,351,4.38,446,3.155,566,4.38,589,4.38,695,6.35,849,4.72,1547,6.071]],["t/483",[123,2.422,181,2.078,200,2.055,215,2.126,236,2.126,244,3.177,253,4.725,255,1.012,270,2.078,288,1.58,299,2.886,303,3.389,308,1.835,408,1.651,444,2.422,446,3.064,451,3.197,523,1.889,565,3.934,566,4.906,589,2.621,657,2.496,850,3.94,852,4.009,853,2.884,885,2.98,926,3.093,945,3.237,992,4.016,1040,5.268,1041,5.511,1103,4.742,1104,5.268,1157,3.27,1227,3.27,1472,4.009,1721,3.493,2135,5.815,2214,4.29,2220,4.716,2221,6.222]],["t/485",[253,5.214,303,3.211,408,1.667,446,3.389,451,4.152,849,4.136,850,5.116,992,5.214,1040,5.319,1041,7.157,2135,7.551,2218,7.911,2222,6.905,2223,6.905,2224,6.905]],["t/487",[163,1.329,170,1.835,172,1.042,210,2.769,253,2.656,255,0.971,281,1.422,297,3.646,299,2.17,303,3.333,304,1.813,308,2.582,351,2.515,381,1.281,389,1.993,397,2.473,408,1.092,444,2.323,446,3.014,451,3.673,494,3.237,523,1.813,534,5.994,565,2.016,566,3.689,589,2.515,644,2.895,646,2.515,647,3.485,652,3.137,656,2.968,674,4.353,848,1.651,849,5.184,850,3.823,853,2.767,908,4.353,1040,5.112,1753,3.847,1917,3.847,2218,3.646,2221,4.116,2225,4.525,2226,4.525,2227,7.859]],["t/489",[162,1.62,172,1.386,173,1.186,200,2.055,207,1.928,253,2.769,255,1.726,281,1.482,287,1.218,288,2.291,299,1.542,303,2.473,406,2.458,409,2.661,439,3.87,446,3.196,451,3.197,476,2.322,492,2.291,523,1.889,532,2.078,565,3.934,566,2.621,589,5.209,848,2.495,850,3.94,855,3.493,856,3.27,860,2.717,874,2.387,885,2.055,887,2.668,904,2.354,908,5.279,985,2.884,1227,3.27,1235,3.374,1825,4.29,2228,4.716,2229,4.716,2230,4.716,2231,4.716,2232,4.716]],["t/491",[282,3.672]],["t/493",[170,1.272,173,1.369,195,1.805,225,2.521,255,1.168,287,1.407,288,1.824,301,4.847,353,3.484,366,4.009,398,1.611,414,1.824,434,2.717,436,3.404,446,1.78,470,3.572,512,2.272,573,4.194,662,3.08,783,3.08,807,3.33,848,1.986,861,3.404,865,4.21,874,2.756,985,3.33,996,4.194,1035,3.668,1265,5.611,1287,4.953,2013,4.629,2174,4.033,2180,4.953,2233,6.891,2234,4.629,2235,5.445,2236,5.445,2237,5.445,2238,5.445,2239,4.953,2240,4.953,2241,5.445,2242,5.445,2243,5.445,2244,5.445]],["t/495",[123,2.821,162,1.886,163,1.035,168,1.939,170,1.283,172,1.197,173,1.697,175,1.035,185,1.058,189,1.533,195,1.608,197,1.471,199,1.553,213,3.368,227,2.393,231,1.371,237,2.994,255,1.772,264,1.19,270,1.553,284,1.935,304,2.201,310,1.231,314,1.895,367,1.837,369,1.712,373,2.155,381,0.998,408,1.629,409,1.371,414,1.84,419,2.069,424,1.81,438,1.895,467,2.203,475,1.81,512,2.292,523,1.412,537,1.712,538,3.008,539,1.926,565,3.008,569,4.242,572,2.312,573,2.715,605,1.712,645,1.959,738,1.668,740,2.069,860,2.03,876,2.84,885,2.393,1063,2.374,1065,2.111,1123,1.993,1131,1.759,1139,3.289,1208,1.759,1216,2.111,1252,2.312,1332,4.319,1339,2.996,1682,2.61,1726,2.84,1740,2.996,2073,2.443,2120,3.206,2233,3.206,2240,4.997,2245,4.997,2246,3.524,2247,3.524,2248,3.524,2249,3.524,2250,3.524,2251,3.206,2252,3.206]],["t/497",[156,3.922,198,2.034,213,4.95,255,2.282,270,2.169,288,1.65,304,1.973,327,2.827,408,1.189,419,2.891,434,4.115,443,2.785,529,1.916,530,2.194,736,3.523,738,3.34,740,4.143,877,5.999,1324,5.227,1410,4.411,1729,3.968,1733,4.186,1734,4.186,1745,4.479,1746,3.793,1747,4.186,2252,4.479,2253,4.186,2254,4.924,2255,4.924,2256,4.924,2257,4.924,2258,4.924,2259,4.924,2260,4.924,2261,4.924,2262,4.924,2263,4.924,2264,4.924,2265,4.924,2266,4.924,2267,4.924,2268,4.924,2269,4.924]],["t/499",[162,1.484,163,1.269,173,1.086,189,1.79,199,3.729,210,1.803,213,4.895,225,2.213,228,4.104,229,4.406,255,0.927,270,2.826,281,1.358,288,1.447,304,1.731,310,1.509,366,2.286,406,2.252,408,1.847,409,1.681,414,1.447,419,2.536,434,5.137,458,2.444,490,3.091,499,2.127,523,2.57,529,1.681,530,1.925,532,1.903,587,2.701,601,3.481,996,4.94,1107,2.286,1123,2.444,1124,3.673,1125,3.673,1127,5.453,1128,3.673,1129,2.995,1135,3.673,1136,3.673,1159,6.163,1324,3.2,1729,6.163,1956,3.2,2253,3.673,2270,4.32,2271,4.32,2272,4.32,2273,4.32,2274,4.32]],["t/501",[175,2.093,213,4.973,256,4.674,304,3.631,310,3.48,398,2.681,408,1.72,450,5.279,475,3.659,506,4.105,509,5.742,510,5.742,630,4.674,947,4.796,2275,7.126,2276,7.126,2277,6.482]],["t/503",[163,2.094,172,0.785,176,1.647,179,4.458,180,3.964,213,4.975,217,2.336,225,1.446,255,1.072,273,3.293,281,1.571,288,2.389,304,2.857,327,2.002,349,2.108,398,1.478,401,3.609,408,1.207,409,1.945,432,3.702,434,2.494,446,1.634,458,2.827,498,2.993,499,2.46,570,4.249,572,4.678,667,2.645,700,3.576,738,2.365,876,4.027,904,3.559,1066,3.124,1129,3.465,1410,4.458,1682,3.702,2253,4.249,2278,4.998,2279,4.998,2280,4.998,2281,4.998,2282,4.998,2283,4.998,2284,4.998,2285,4.998,2286,6.487,2287,4.998,2288,4.998]],["t/505",[189,2.308,213,4.128,237,3.249,287,2.137,304,3.313,522,3.686,538,3.686,569,4.247,620,5.058,2245,7.524,2289,8.271]],["t/507",[303,3.274,389,3.989,465,4.719,566,5.032]],["t/509",[172,1.3,173,2.081,175,2.431,198,2.561,202,2.935,217,3.869,248,4.178,251,4.856,253,4.86,273,2.863,284,2.185,299,3.046,337,2.643,352,4.068,381,2.344,389,3.647,393,2.262,454,5.297,484,3.641,566,5.179,599,3.877,1033,3.508,1392,3.447,2290,6.202]],["t/511",[246,3.812,251,3.754,274,4.003,299,2.355,303,2.604,308,2.803,327,2.886,337,3.07,389,4.02,393,3.653,414,2.413,454,4.609,546,4.994,566,4.003,593,3.646,1021,3.936,1022,6.124,1172,5.548,1392,4.003,2291,7.203]],["t/513",[163,1.873,170,1.49,172,1.002,195,1.52,251,3.325,288,2.825,299,2.086,303,3.417,381,2.388,408,1.54,414,2.137,446,2.086,498,3.82,534,3.745,566,4.688,657,3.376,849,3.82,860,3.675,954,5.848,975,6.248,1234,3.901,1406,6.497,1551,4.297,2292,6.379,2293,5.803,2294,6.379,2295,6.379]],["t/515",[163,2.162,175,2.162,251,3.837,255,1.579,299,2.407,344,4.501,349,3.104,381,2.619,393,3.374,409,2.864,414,2.466,531,4.829,557,4.959,566,4.091,1356,6.258,1414,5.67,1438,5.67,2296,7.361,2297,7.361]],["t/517",[197,2.236,246,2.836,251,4.874,270,3.3,274,2.978,280,2.836,299,2.449,303,3.381,308,2.085,327,2.146,337,2.284,381,1.517,389,3.3,393,3.15,398,2.216,408,1.808,414,1.795,546,3.715,566,4.8,593,3.791,949,4.793,1021,2.928,1022,4.555,1065,6.106,1172,4.127,1392,2.978,1924,4.127,2298,7.856,2299,4.874,2300,5.358,2301,4.874]],["t/519",[163,1.457,197,2.07,202,2.348,246,2.625,251,4.71,270,3.125,280,5.059,299,2.319,303,3.267,381,2.008,389,3.125,393,3.02,398,2.098,408,2.182,409,1.93,414,1.662,566,4.602,575,2.806,598,2.971,825,3.549,949,4.539,1065,6.131,1392,2.757,1924,3.821,2073,4.918,2298,7.532,2299,4.513,2301,4.513,2302,8.28,2303,4.961,2304,4.961]],["t/521",[172,0.835,175,2.187,197,2.218,198,2.195,270,3.282,280,2.813,299,2.435,303,3.548,381,1.505,386,2.454,389,3.282,398,2.204,408,2.076,414,1.781,565,4.372,566,4.78,593,2.69,945,2.516,949,4.766,951,4.886,1065,6.091,1697,5.518,1924,4.094,2160,4.094,2305,5.315,2306,7.449,2307,5.315,2308,5.315,2309,5.315,2310,4.835]],["t/523",[197,2.389,270,3.455,280,3.03,303,3.644,381,2.22,386,2.643,389,3.94,398,2.32,408,1.893,446,1.872,534,6.11,566,3.182,593,2.897,674,5.144,849,3.428,949,5.017,1065,6.233,1203,5.61,1920,5.207,1924,4.409,2160,4.409,2310,5.207,2311,5.724,2312,5.724]],["t/525",[487,5.315,713,6.707,1203,6.477,1795,6.974]],["t/527",[165,1.732,170,1.114,172,1.393,173,1.509,174,2.683,175,0.866,176,1.979,178,1.433,179,5.547,181,2.101,182,1.766,184,2.045,185,1.432,186,4.837,187,5.615,189,2.113,199,1.299,200,1.285,205,1.699,207,1.206,208,1.668,215,1.33,217,1.379,218,2.377,220,2.508,227,2.078,235,4.933,288,0.988,315,2.683,316,2.508,317,5.607,321,2.508,325,2.272,327,1.182,400,2.11,403,1.639,412,2.11,414,0.988,440,1.935,480,1.844,487,4.048,493,1.362,494,2.11,529,2.683,532,1.299,558,2.185,568,1.668,575,1.668,587,5.063,591,1.766,655,2.377,658,1.639,674,1.935,711,1.766,716,2.683,857,2.377,864,2.272,866,4.645,871,2.11,872,3.412,885,1.285,1013,2.185,1014,1.699,1027,1.987,1107,1.561,1142,2.185,1203,2.11,1341,2.683,1391,3.412,1436,4.447,1444,2.272,1501,2.508,1528,2.508,1646,2.683,1691,2.11,1753,2.508,1767,2.683,2313,4.338,2314,2.949,2315,2.949,2316,2.949,2317,2.949,2318,2.949,2319,2.949,2320,4.769,2321,4.769,2322,2.949,2323,2.949,2324,2.949,2325,2.949,2326,2.949,2327,2.683,2328,2.949,2329,2.949,2330,2.949,2331,2.949]],["t/529",[3,3.369,170,1.696,172,0.622,173,0.996,176,1.981,177,2.035,179,3.757,180,2.202,182,2.373,185,2.616,189,1.106,193,2.065,199,3.571,207,1.62,231,1.542,237,3.603,267,4.441,275,2.065,287,2.094,294,3.943,304,1.587,314,4.358,317,2.935,325,3.052,352,2.599,373,2.423,382,3.052,414,1.327,441,2.241,538,4.087,642,2.599,647,3.052,657,2.097,671,2.202,702,2.935,730,2.835,738,1.875,854,2.13,866,4.049,871,5.195,872,2.835,882,2.835,1027,2.669,1049,2.477,1131,3,1139,2.373,1141,2.747,1244,2.935,1269,3.052,1342,3.604,1376,2.935,1410,2.477,1591,3.193,1715,3.604,1799,3.193,2033,2.747,2072,3.604,2293,3.604,2332,3.962,2333,3.604,2334,3.962,2335,3.962,2336,3.604,2337,3.962,2338,3.962]],["t/531",[162,2.112,168,1.173,172,0.641,173,1.026,182,2.445,185,2.891,199,3.626,207,2.513,215,1.841,227,1.779,231,3.435,236,1.841,237,3.468,244,2.751,246,3.254,247,3.29,248,2.751,276,2.397,287,1.055,306,2.751,314,2.195,327,1.636,387,2.097,443,3.478,472,2.445,482,2.751,487,2.397,512,3.871,538,3.668,642,2.678,652,4.263,657,2.161,671,2.269,686,3.29,702,3.024,713,3.024,730,2.921,872,2.921,1033,2.31,1035,4.142,1036,3.714,1049,2.552,1081,3.145,1193,3.29,1215,3.714,1269,4.736,1520,3.714,1690,2.678,1961,5.593,2111,3.145,2339,7.487,2340,3.714,2341,5.593,2342,4.083,2343,4.083,2344,4.083,2345,4.083]],["t/533",[282,3.672]],["t/535",[162,1.322,163,1.13,168,1.689,170,0.899,172,1.254,178,1.869,185,1.765,189,1.074,198,1.589,199,4.286,200,2.561,210,1.606,214,2.217,215,3.6,216,2.139,225,2.31,228,2.462,229,3.386,234,1.948,237,2.309,251,2.006,252,2.406,280,2.037,281,1.21,284,1.356,339,2.353,347,1.894,389,1.695,395,1.379,400,2.753,414,1.969,463,2.103,467,2.406,470,3.856,475,1.976,493,1.777,512,2.453,565,1.715,569,1.976,604,3.213,622,1.948,674,2.524,848,2.602,865,3.267,874,4.042,885,2.561,1020,2.851,1058,2.305,1100,2.964,1116,2.753,1123,3.325,1131,1.921,1152,2.964,1216,4.272,1234,3.595,1252,2.524,1288,3.501,1794,3.272,1848,3.101,2346,5.879,2347,3.848,2348,3.848,2349,3.848,2350,3.848,2351,3.848,2352,3.848,2353,3.501]],["t/537",[162,2.235,163,1.294,168,1.869,173,1.636,199,1.941,200,3.371,210,2.715,217,2.059,225,2.638,228,6.106,229,4.457,270,1.941,273,2.034,282,2.532,288,1.476,327,1.765,408,1.571,409,2.532,434,2.199,493,2.034,499,2.169,568,2.492,578,3.342,604,3.556,605,2.14,620,2.694,701,3.394,709,3.394,711,2.638,872,3.152,998,3.054,1107,3.444,1122,5.532,1123,5.583,1124,5.532,1125,5.532,1127,5.532,1128,5.532,1129,3.054,1130,4.008,1131,2.199,1132,4.008,1133,4.008,1135,3.746,1136,3.746,1137,4.008,1138,4.008,2354,4.406]],["t/539",[172,1.395,175,2.028,193,3.599,210,2.882,214,3.978,215,3.113,227,3.008,311,3.978,369,3.354,375,4.223,523,2.767,540,2.852,593,3.495,848,3.24,850,3.978,865,3.838,874,5.246,929,5.115,951,4.53,1155,5.319,1157,4.788,1697,5.115]],["t/541",[185,2.579,189,2.397,190,5.634,195,2.046,234,4.347,287,2.219,503,3.547,2355,7.813]],["t/543",[152,1.157,162,1.947,163,1.409,170,1.486,172,0.338,173,0.541,176,0.71,178,1.783,179,2.294,180,2.667,189,2.063,190,5.109,191,1.218,194,1.377,195,0.874,200,0.938,205,1.24,212,3.89,225,0.623,255,1.216,264,0.727,281,0.677,282,0.838,284,0.758,288,1.607,295,1.122,299,2.264,308,0.838,310,0.752,327,1.922,347,1.06,349,0.908,351,1.196,369,1.046,378,1.658,381,1.605,388,2.473,395,1.316,398,1.086,401,1.09,406,1.913,408,1.158,414,0.721,419,1.264,446,1.853,457,1.289,471,1.316,482,1.45,492,1.783,495,2.999,496,2.185,499,1.06,512,0.898,523,0.862,532,0.948,564,0.814,568,1.218,571,1.139,576,1.412,613,1.674,646,1.196,674,3.147,711,1.289,736,2.626,738,2.683,783,1.218,846,1.83,848,1.339,856,1.492,858,1.139,865,2.04,874,1.09,909,1.658,937,1.973,945,1.019,992,1.264,1025,1.54,1030,3.206,1032,3.819,1038,1.45,1043,1.83,1047,1.735,1066,2.294,1107,2.539,1122,4.819,1131,1.074,1139,2.198,1222,1.54,1233,3.121,1321,1.83,1551,2.473,1631,1.958,1682,5.13,1768,1.735,2104,1.958,2111,1.658,2234,1.83,2286,1.958,2355,7.393,2356,1.958,2357,2.153,2358,2.153,2359,2.153,2360,5.669,2361,2.153,2362,2.153,2363,2.153,2364,2.153,2365,4.798,2366,4.798,2367,4.798,2368,2.153,2369,2.153,2370,2.153,2371,2.153,2372,2.153,2373,2.153,2374,3.67,2375,2.153,2376,2.153,2377,2.153,2378,2.153,2379,3.67,2380,2.153,2381,2.153,2382,2.153,2383,2.153,2384,2.153,2385,2.153,2386,2.153,2387,2.153]],["t/545",[272,6.277,465,4.719,522,4.035,1161,4.948]],["t/547",[152,2.449,163,2.317,172,1.364,195,1.085,200,2.906,227,1.985,231,1.773,255,1.431,264,1.539,284,1.605,287,2.643,298,1.807,310,1.591,387,2.339,395,1.633,398,1.973,407,3.158,408,1.905,416,3.509,440,2.988,496,3.347,506,5.002,522,4.65,524,5.67,529,1.773,530,2.03,653,5.47,738,2.156,845,3.509,904,2.274,928,2.577,1014,2.624,1131,2.274,1161,4.312,1208,2.274,1403,4.144,1724,5.67,2388,3.873,2389,3.509,2390,3.509]],["t/549",[123,4.443,172,1.041,227,2.889,398,2.849,440,4.35,505,4.222,516,5.536,522,4.928,653,6.677,928,3.751,1720,4.598,2389,5.108,2390,5.108]],["t/551",[173,1.754,231,2.715,237,2.741,302,4.838,369,3.389,381,3.046,398,2.645,522,4.905,928,3.947,1140,4.7,1161,4.886,1274,8.134,1720,4.838,2391,6.347]],["t/553",[163,2.399,185,2.453,231,3.838,236,3.683,237,3.21,302,5.664,398,2.417,522,3.641,629,6.052,686,6.583,1720,5.664]],["t/556",[39,3.212,152,2.143,172,1.277,173,1.518,176,1.314,193,2.077,195,1.438,197,3.393,200,1.736,208,2.255,255,1.563,264,1.346,295,3.147,298,3.225,310,2.109,370,2.255,381,2.063,393,1.454,398,2.156,402,2.017,408,1.458,414,1.335,494,2.852,496,2.809,498,2.387,499,1.962,503,3.358,522,3.623,540,1.646,568,2.255,696,2.763,854,2.143,858,2.109,928,2.255,937,3.919,945,1.887,954,2.763,1020,2.952,1142,5.399,1161,2.178,1213,2.763,1228,3.07,1243,3.389,1273,4.279,1325,2.952,1442,3.07,1873,3.212,1895,2.685,1956,2.952,2392,6.63,2393,3.626,2394,3.986,2395,3.986,2396,7.396,2397,3.986,2398,3.986,2399,3.626,2400,3.986,2401,3.986]],["t/558",[39,5.096,162,1.095,167,2.362,175,0.936,255,2.182,295,3.296,327,2.889,337,1.359,370,1.803,381,2.88,395,1.143,398,2.912,417,2.456,418,1.529,498,3.04,499,2.499,522,4.069,532,2.236,540,3.886,646,1.772,653,4.385,670,2.04,681,2.148,696,4.999,818,2.569,926,3.33,937,3.4,1025,2.281,1035,2.148,1140,3.419,1161,3.941,1273,3.713,1325,3.76,1641,6.13,1873,2.569,1895,3.419,1956,5.341,2045,6.559,2200,5.554,2389,3.91,2390,3.91,2391,2.9,2393,4.618,2396,4.618,2399,2.9,2402,3.188,2403,3.188,2404,3.188,2405,3.188]],["t/560",[168,2.636,287,2.371,522,4.09]],["t/562",[152,3.397,163,1.855,172,0.992,284,2.226,287,2.588,395,2.265,398,2.479,506,4.828,522,4.774,524,7.126,529,2.459,530,2.816,653,6.52,845,4.867,928,3.574,1014,3.64,1161,4.58,1724,7.126,2388,5.372,2389,4.867,2390,4.867]],["t/564",[172,1.084,177,3.546,195,1.645,197,2.882,200,3.008,225,1.998,282,2.687,287,2.295,397,3.774,398,2.627,408,1.667,441,3.906,457,4.136,522,4.779,537,3.354,890,4.94,947,3.655,954,4.788,1084,3.978,1131,3.446,1161,3.774]],["t/566",[152,4.868,395,3.245,396,5.032,465,4.719]],["t/568",[162,1.428,170,0.971,189,1.74,202,1.969,208,2.353,217,1.944,246,2.201,267,5.08,273,1.92,287,1.075,298,1.65,299,2.445,304,1.666,353,2.661,381,2.519,389,2.747,395,3.189,398,3.157,404,4.905,405,3.591,456,3.204,458,2.353,476,2.047,505,3.699,515,3.351,516,3.989,522,2.778,540,2.575,541,2.976,1014,2.396,1020,3.081,1026,3.204,1047,3.351,1081,4.802,1163,2.543,1526,2.884,1545,2.491,1958,3.784,2033,2.884,2118,3.536,2313,3.784,2406,4.159,2407,4.159,2408,4.159,2409,4.159,2410,4.159,2411,6.234,2412,4.802,2413,3.536,2414,4.159]],["t/571",[381,2.635,398,2.753]],["t/573",[381,2.991,398,2.715]],["t/575",[123,4,381,2.205,398,2.304,443,4.406,446,2.547,476,3.835,534,4.573,606,5.236,848,2.842,849,4.665,985,4.763,995,7.38,1933,6.623,2415,7.086]],["t/578",[398,2.541,408,2.074,476,4.228,499,4.228,505,3.279,2067,5.786,2416,8.589,2417,8.589]],["t/580",[189,2.367,398,2.984,458,4.797,505,4.111,2418,6.833,2419,8.48]],["t/582",[123,3.659,172,1.119,255,1.529,288,2.387,398,2.681,485,5.279,486,5.742,505,3.805,506,5.74,507,6.482,508,6.482,509,7.302,510,5.742,511,6.482,516,4.56,1033,4.031,1311,4.56,1956,5.279]],["t/584",[198,3.64,398,2.608,505,3.366,516,5.641,541,6.307,600,5.938]],["t/586",[288,2.841,398,2.509,408,2.047,512,3.539,515,6.833,516,5.426,517,7.21,518,5.879,2420,8.48]],["t/588",[398,2.775,505,4.084,516,6.002,522,4.904,653,7.084,928,4.257,2389,5.798,2390,5.798]],["t/590",[176,2.76,349,3.531,351,4.654,398,2.477,499,4.122,505,3.822,516,5.358,1850,8.511]],["t/592",[172,0.983,264,2.114,284,2.205,288,2.097,380,5.044,398,2.464,408,1.511,409,3.241,496,3.608,506,3.606,518,6.918,522,2.789,578,4.807,622,3.168,954,4.34,1042,4.637,1208,4.98,1756,5.694,2421,6.26,2422,6.26,2423,8.329,2424,6.26,2425,6.26,2426,6.26]],["t/594",[182,5.422,185,2.719,281,2.846,282,3.523]],["t/596",[185,2.613,195,2.073,234,4.404,287,2.248,338,6.032,503,3.593,2427,8.701]],["t/598",[168,1.405,170,1.432,172,1.103,173,0.765,175,0.894,176,1.003,185,0.914,195,1.165,210,2.93,225,2.032,226,1.947,229,2.817,230,1.563,231,1.184,236,1.372,237,2.409,255,1.926,264,1.028,281,1.537,284,2.16,299,0.995,303,1.768,310,1.063,339,1.861,349,1.283,351,1.692,356,1.663,381,0.862,386,1.405,390,2.452,395,1.091,396,1.692,402,1.54,408,0.735,414,1.02,424,1.563,438,1.636,446,1.599,463,1.663,470,1.996,476,1.498,484,1.787,496,1.173,506,1.753,509,2.452,510,2.452,512,2.041,537,1.478,538,2.732,539,1.663,540,2.532,577,1.611,578,3.606,593,1.54,604,2.673,621,1.311,622,1.54,660,1.947,807,1.861,848,1.784,865,1.692,874,3.893,885,3.351,904,1.519,934,2.642,945,3.888,947,1.611,949,1.947,984,2.11,1098,1.947,1099,1.787,1100,2.344,1101,2.769,1116,2.177,1121,2.344,1177,2.587,1178,2.587,1284,3.129,1392,1.692,1526,2.11,1726,2.452,1792,2.254,2209,2.769,2277,2.769,2428,5.213,2429,3.043,2430,3.043,2431,3.043,2432,3.043,2433,3.043,2434,3.043,2435,3.043,2436,3.043,2437,3.043]],["t/600",[189,1.968,230,3.621,255,2.127,303,3.255,311,4.062,387,4.622,406,4.692,636,3.38,2428,7.653,2438,6.414,2439,8.188,2440,7.051,2441,5.995,2442,6.414,2443,6.414,2444,6.414,2445,6.414,2446,7.051]],["t/602",[189,1.869,230,3.44,255,2.198,303,3.149,311,3.858,387,4.472,406,4.539,636,3.211,1118,4.644,2428,7.404,2438,6.093,2441,5.695,2442,6.093,2443,6.093,2444,6.093,2445,6.093,2447,8.708,2448,6.698,2449,6.698,2450,6.698,2451,6.698,2452,6.698]],["t/604",[165,3.818,170,1.995,176,2.143,189,1.815,195,2.035,207,2.658,225,1.882,281,2.044,310,2.983,349,2.742,395,2.331,400,4.652,402,3.291,406,4.452,471,3.977,476,3.201,564,2.46,571,3.442,602,6.327,604,3.554,605,3.159,621,2.802,926,4.266,2453,5.529,2454,6.503,2455,6.503,2456,6.503,2457,5.009]],["t/606",[172,1.384,176,2.906,203,4.818,349,3.717,2439,8.019,2458,8.815]],["t/608",[168,2.636,185,2.756,619,5.737]],["t/610",[162,3.535,172,1.284,177,3.127,182,3.646,185,1.828,199,2.683,202,2.882,207,2.489,210,2.541,219,5.131,248,4.102,264,2.762,289,4.69,299,1.991,310,2.856,370,3.444,394,3.646,461,4.221,484,3.575,568,3.444,619,5.771,681,4.102,1191,3.896,1690,5.363,1749,5.177,2459,5.539,2460,5.177,2461,6.089]],["t/612",[152,3.131,162,2.725,163,1.71,170,1.853,172,1.246,216,4.41,225,2.611,246,3.082,282,2.266,287,2.05,367,4.703,369,2.829,395,2.087,396,3.237,398,2.867,403,3.237,404,3.82,444,2.991,505,3.029,506,3.355,512,2.43,516,3.727,576,3.82,619,3.641,854,3.131,866,5.345,1014,3.355,1247,5.298,2462,5.824,2463,5.824,2464,4.951]],["t/614",[123,2.773,172,1.362,173,1.358,199,2.38,202,2.556,210,3.143,219,3.002,227,2.353,246,2.858,255,1.159,270,2.38,284,1.903,287,1.395,288,1.809,398,2.776,404,5.689,476,2.659,505,3.583,506,3.111,512,3.143,619,3.376,636,2.589,783,4.906,784,3.745,803,4.16,885,2.353,1207,3.235,1208,2.695,1297,4.352,1302,4.592,1775,4.592,2157,6.852,2464,4.592,2465,7.532,2466,5.401,2467,5.401,2468,4.913,2469,4.913]],["t/616",[398,2.643,404,5.86,2470,8.126,2471,8.933,2472,8.933]],["t/618",[162,2.916,163,2.151,170,1.212,172,1.15,185,1.559,244,3.497,246,4.493,253,5.415,282,2.02,344,3.175,366,3.877,391,4.723,393,2.673,408,1.769,414,1.739,427,4.183,459,3.405,475,3.762,476,2.556,505,1.982,576,3.405,591,3.109,618,3.322,673,5.079,729,4.183,730,3.714,851,6.229,949,6.458,1879,3.999,2174,5.427,2473,8.49,2474,5.191,2475,4.723,2476,5.191,2477,5.191,2478,5.191]],["t/620",[162,2.808,172,0.956,207,2.489,281,1.914,398,1.801,403,3.384,404,5.363,408,1.47,418,2.919,420,3.806,444,3.127,529,2.369,575,3.444,606,3.328,619,3.806,671,3.384,872,4.356,1007,5.177,1216,4.897,1307,5.177,1402,4.51,1673,5.177,2412,7.601,2413,6.952,2457,4.69,2479,6.089,2480,6.089,2481,5.539,2482,6.089,2483,5.539,2484,6.089,2485,6.089,2486,6.089]],["t/622",[123,3.583,172,1.096,202,3.303,255,1.497,398,2.919,505,3.767,506,4.019,636,3.345,885,3.04,1207,4.179,1208,3.482,1302,5.932,2412,6.887,2457,6.887,2464,5.932,2468,6.347,2469,6.347,2487,8.941,2488,6.977]],["t/624",[398,2.541,505,3.279,2058,7.813,2412,6.616,2457,6.616,2470,7.813,2489,8.589,2490,8.589]],["t/626",[162,3.422,167,5.279,172,1.119,176,2.349,185,2.14,306,4.8,403,3.961,463,3.894,522,4.038,523,2.855,619,4.455,668,5.742,885,3.105,986,4.455,2033,4.941,2084,7.705,2491,9.062,2492,7.126,2493,7.126]],["t/629",[185,2.756,231,3.571,465,4.784]],["t/631",[163,0.866,168,0.847,170,0.689,172,0.943,173,0.741,175,2.025,189,0.823,193,1.537,208,1.668,231,2.683,237,3.349,255,0.633,276,1.732,284,1.68,287,1.232,288,0.988,298,1.17,299,0.964,303,1.066,308,1.148,312,2.185,314,4.771,381,1.35,393,1.74,394,1.766,398,0.873,402,1.493,408,1.665,409,1.148,414,1.598,424,1.515,429,3.212,438,1.586,446,0.964,456,2.272,460,2.856,462,1.887,492,1.433,496,1.137,505,1.126,522,1.314,538,4.195,539,3.768,575,1.668,577,1.561,612,1.935,623,2.045,629,3.532,631,2.377,634,4.624,635,2.683,636,1.414,637,2.272,638,5.461,639,3.842,640,4.645,641,5.104,642,4.966,643,4.054,644,3.051,645,1.639,646,1.639,647,3.673,648,2.272,649,2.508,650,2.377,651,2.508,652,2.045,653,2.045,654,2.683,655,2.377,685,3.532,871,3.412,882,4.295,885,1.285,954,2.045,1035,1.987,1163,1.804,1191,1.887,1279,2.272,1450,1.987,1475,2.683,1776,2.508,1871,2.377,2169,2.508,2494,2.683,2495,2.949,2496,2.949,2497,2.949,2498,2.683,2499,2.683,2500,2.683]],["t/633",[172,0.983,176,2.063,198,2.585,227,2.727,231,3.642,282,2.436,287,2.418,370,3.541,393,2.283,429,4.217,444,3.214,457,3.749,458,3.541,460,5.976,463,4.552,539,4.552,557,4.217,621,4.033,642,4.106,656,4.106,657,3.313,665,3.913,783,3.541,887,3.541,1064,5.694,2169,5.322]],["t/635",[191,3.323,227,3.477,231,2.286,237,3.136,255,1.26,381,2.26,393,2.912,414,2.674,424,3.017,438,3.159,456,4.525,460,3.518,538,3.557,539,4.362,605,2.854,629,6.715,634,6.983,701,4.525,738,2.781,898,4.073,1191,3.759,1331,5.344,1354,5.344,1396,3.518,1792,4.352,1864,5.344,2494,9.541,2501,5.875]],["t/637",[175,2.775,231,3.863,237,2.157,274,3.051,282,2.136,287,1.418,308,3.677,312,4.066,381,2.477,393,2.003,408,2.282,414,1.839,496,2.116,538,4.424,640,3.698,641,4.667,643,4.667,670,3.513,673,3.806,882,5.45,885,2.392,1021,3,1691,3.927,1871,4.423,2341,6.93,2498,4.994,2502,6.138,2503,5.49,2504,5.49,2505,5.49]],["t/639",[156,3.545,172,1.324,176,2.103,185,1.915,231,3.677,270,3.716,298,2.531,308,2.482,397,3.486,408,2.036,419,3.745,505,3.22,522,2.843,538,3.759,830,5.803,1367,5.803,1690,5.532,1768,5.14,1875,7.672,2339,7.672,2392,5.803,2500,5.803,2502,5.14,2506,6.379,2507,6.379]],["t/641",[195,2.157,225,2.62,287,2.339,401,4.583]],["t/643",[170,2.321,225,2.393,227,3.603,378,6.371,388,6.694,401,4.186,409,3.218,482,5.572,2111,6.371]],["t/645",[172,0.899,176,1.887,194,3.663,195,2.131,215,2.581,225,1.656,233,3.578,281,1.799,284,2.762,295,2.984,301,3.663,401,3.969,493,2.643,533,2.709,564,2.165,606,3.128,613,4.08,615,5.207,636,2.744,708,4.409,740,3.361,934,2.467,937,3.078,1131,2.857,1440,7.604,1563,6.667,1776,4.867,2508,5.724,2509,4.867,2510,8.944,2511,5.207,2512,5.724,2513,7.133,2514,7.841]],["t/647",[172,0.741,195,1.123,288,2.696,293,5.58,297,3.8,298,1.871,307,3.177,401,5.328,451,3.762,465,3.565,495,2.948,529,1.835,533,3.809,564,1.784,575,2.668,589,2.621,726,3.633,897,5.758,934,2.032,935,2.354,947,2.496,1059,4.29,1401,5.511,1416,3.27,1831,4.29,1897,4.009,2513,6.222,2515,6.84,2516,4.716,2517,4.716,2518,6.222,2519,6.84,2520,4.716,2521,4.716,2522,4.716,2523,4.716,2524,4.716,2525,4.716,2526,4.716,2527,4.716,2528,4.716,2529,4.716,2530,4.716,2531,4.716,2532,4.716,2533,4.29,2534,4.009,2535,3.8]],["t/649",[168,2.636,381,2.598,1555,5.737]],["t/651",[170,1.372,172,1.423,177,3.017,195,1.4,215,4.087,236,2.649,244,3.958,247,4.734,253,3.449,267,3.593,268,4.073,269,4.073,397,4.362,402,2.974,436,5.667,441,3.323,523,2.354,529,2.286,565,2.618,568,3.323,673,4.073,695,4.734,711,3.518,848,3.307,864,4.525,874,4.588,1050,4.525,1157,4.073,1947,4.995,2475,5.344,2536,5.875,2537,5.875,2538,5.875]],["t/653",[170,2.26,172,1.367,173,1.684,176,2.208,184,4.644,185,2.906,231,3.987,236,3.02,237,3.421,273,3.093,281,2.105,349,2.825,381,1.896,403,3.723,408,1.617,463,3.661,564,2.534,619,4.187,848,2.443,850,3.858,874,3.39]],["t/655",[160,3.183,162,1.933,172,0.884,176,1.855,177,3.98,182,3.371,185,3.008,210,2.348,214,3.242,215,2.537,231,3.016,236,2.537,237,3.045,246,2.979,264,1.901,281,1.769,282,2.19,287,2.002,310,1.966,395,2.017,398,1.665,402,2.849,404,3.692,405,3.242,461,3.902,505,3.385,512,2.348,529,2.19,530,2.508,564,2.129,619,4.845,642,3.692,662,3.183,671,3.128,684,3.791,702,4.169,1049,3.518,2460,4.785]],["t/657",[160,4.257,172,1.182,173,1.892,176,2.481,182,4.508,185,2.816,225,2.178,246,3.983,281,2.366,282,2.929,287,1.945,395,2.698,398,2.227,404,4.937,505,2.874,564,2.847,657,3.983,866,5.07]],["t/659",[170,1.886,173,1.503,175,2.877,198,2.47,216,5.084,251,4.768,266,4.278,270,2.635,271,3.071,283,5.084,343,3.826,411,3.923,441,4.569,466,5.084,467,3.738,468,4.818,469,5.44,470,3.923,471,3.657,472,3.581,473,5.44,474,4.569,475,3.071,476,2.944,477,5.44,478,5.44,479,5.084,480,3.738,481,5.44,482,4.028,483,5.44,484,3.511]],["t/661",[168,2.636,225,2.656,367,4.784]],["t/664",[152,4.093,255,1.633,366,4.029,367,3.968,395,2.728,396,4.231,398,2.794,2013,6.472,2027,6.044,2174,5.639,2239,6.925,2539,6.925,2540,7.612,2541,7.612,2542,7.612,2543,7.612]],["t/666",[172,1.052,225,1.938,298,2.657,337,2.855,366,4.609,395,2.401,398,1.981,471,4.096,532,2.951,540,2.766,621,2.886,783,5.796,954,4.644,996,5.159,1545,4.011,1691,4.792,1700,5.695,2033,4.644,2188,7.404,2200,5.159,2539,6.093,2544,5.159,2545,6.698,2546,6.093,2547,6.093]],["t/668",[152,2.105,162,3.259,173,0.984,175,1.15,225,1.133,272,4.13,273,1.808,275,2.041,281,1.231,298,2.363,299,1.28,301,2.506,308,2.805,366,2.072,367,2.041,393,2.939,394,2.345,395,2.135,396,2.176,398,2.383,408,1.438,418,1.877,419,2.299,434,1.954,463,2.14,471,4.409,476,1.928,512,2.486,529,1.524,533,2.82,564,2.253,598,2.345,634,3.016,645,5.074,670,2.506,766,4.413,783,3.37,859,3.155,890,2.802,945,2.82,993,5.419,1123,3.37,1207,2.345,1234,2.395,1252,5.285,1362,3.329,1396,2.345,1551,2.638,1809,3.329,1932,3.329,1964,6.559,1972,3.016,2033,2.715,2094,3.329,2177,3.562,2459,3.562,2544,3.016,2547,3.562,2548,7.21,2549,3.916,2550,3.916]],["t/670",[152,4.05,170,1.759,172,1.183,173,1.358,246,3.986,255,1.159,264,2.544,274,3.002,282,2.101,284,1.903,310,2.631,386,3.478,389,4.348,393,1.97,394,3.235,395,3.664,396,4.187,398,2.92,414,1.809,540,3.111,658,4.187,951,3.543,1021,2.952,1280,4.16,1332,4.82,1545,3.235,1551,3.638,2118,4.592,2551,5.401,2552,5.401]],["t/672",[546,6.363,763,6.566,1795,7.07]],["t/674",[160,3.714,176,2.164,179,4.105,193,3.423,288,2.2,345,5.058,350,3.589,399,4.553,440,4.307,526,5.291,546,5.96,719,3.783,720,5.974,727,5.974,841,4.698,1186,4.864,1450,4.424,1548,4.864,1588,5.583,1644,5.291,1867,5.583,2502,5.291,2544,7.383,2553,5.974,2554,6.567,2555,6.567,2556,6.567,2557,6.567,2558,6.567]],["t/676",[176,1.795,177,2.796,178,2.645,191,4.285,193,3.948,227,3.3,347,2.68,403,3.026,467,3.404,472,3.261,530,2.426,590,5.868,664,6.668,666,4.953,676,4.629,718,6.44,719,4.364,725,6.44,729,4.387,734,4.629,740,3.197,841,3.895,986,4.735,1027,3.668,1062,4.194,1099,3.197,1749,4.629,2559,5.445,2560,6.891,2561,7.575,2562,4.629,2563,7.575,2564,4.953,2565,5.445,2566,5.445,2567,5.445,2568,5.445]],["t/678",[172,0.674,173,1.079,176,2.104,178,2.085,180,2.386,189,2.517,193,3.327,202,2.032,203,4.611,208,4.773,215,3.804,217,2.006,243,3.18,265,2.816,281,1.349,302,4.425,314,2.308,347,2.113,403,2.386,408,1.541,414,2.138,467,2.683,472,4.564,476,2.113,529,1.67,546,4.425,558,4.728,592,3.459,668,3.459,709,3.306,719,2.473,737,5.807,803,3.306,1027,2.892,1099,2.52,1131,2.142,1361,3.905,1376,3.18,1402,3.18,1438,3.306,1697,3.18,1711,3.071,1799,3.459,1871,3.459,2053,3.905,2117,3.905,2149,3.905,2564,3.905,2569,4.293,2570,4.293,2571,7.62,2572,4.293,2573,4.293,2574,4.293,2575,4.293,2576,3.905,2577,4.293,2578,4.293,2579,4.293,2580,4.293,2581,4.293]],["t/680",[306,5.572,397,4.52,418,3.965,663,4.764,713,6.126,1708,5.734,2582,7.032,2583,7.524,2584,7.524,2585,7.361]],["t/682",[273,3.682,407,5.529,546,6.739,564,3.017,763,6.954,1678,6.78,2017,6.143,2586,7.975,2587,7.975,2588,7.975,2589,7.975,2590,7.975]],["t/684",[381,2.563,408,2.186,409,3.523,465,4.719]],["t/686",[163,2.158,170,0.77,172,1.4,175,0.969,178,1.602,198,1.362,199,1.453,205,1.9,208,2.95,217,1.542,231,1.283,281,2.033,284,1.162,287,1.347,293,2.287,299,1.078,304,1.321,337,1.406,340,2.287,343,2.111,349,2.199,370,1.866,381,3.144,393,1.902,402,1.669,408,2.231,409,1.283,410,2.443,413,2.804,420,2.062,422,2.164,423,1.646,424,2.678,425,2.541,427,2.658,438,1.773,458,2.95,459,2.164,460,3.123,461,3.615,462,3.337,463,1.803,522,1.47,598,3.123,621,1.421,623,3.615,634,2.541,657,1.746,670,2.111,858,1.746,864,2.541,898,3.615,907,2.36,946,4.017,992,1.936,1021,1.803,1049,3.26,1098,2.111,1131,2.602,1163,2.017,1184,3,1357,2.658,1725,2.658,1763,2.443,1791,2.804,1792,2.443,1801,2.541,2141,2.541,2418,2.658,2591,3.298,2592,3,2593,3.298,2594,3.298,2595,3.298,2596,3.298,2597,3.298,2598,3.298,2599,3.298]],["t/688",[170,1.81,173,1.415,189,1.571,193,2.933,200,2.452,202,2.664,227,2.452,246,2.979,255,1.207,288,1.885,293,3.902,351,3.128,370,3.183,381,3.003,389,2.48,395,2.017,398,2.293,408,1.871,409,3.898,415,4.785,416,4.335,417,4.335,418,2.698,419,3.304,420,3.518,421,5.12,459,5.084,2141,6.828,2600,5.12,2601,5.628,2602,5.628,2603,5.628,2604,5.628]],["t/690",[163,1.237,172,0.988,175,2.936,194,2.695,202,1.993,270,1.856,272,2.92,282,1.639,284,1.484,308,3.252,311,2.426,381,2.754,393,2.748,409,2.449,422,5.868,423,3.141,424,4.292,425,5.804,426,3.244,427,3.393,429,2.837,438,2.264,444,3.232,458,3.56,460,5.006,461,4.363,462,4.027,598,2.522,621,3.602,636,2.019,660,2.695,665,2.633,858,2.229,946,3.244,1172,3.244,1325,4.662,1478,3.393,1768,3.393,1792,3.12,1972,3.244,1979,3.581,2533,8.538,2605,4.211]],["t/692",[163,1.91,227,3.721,255,1.832,274,3.614,381,3.116,393,2.372,458,4.832,460,3.894,463,3.554,471,3.977,522,4.689,536,5.009,610,5.24,898,4.509,946,5.009,1792,4.817,2105,5.24,2418,5.24,2606,6.503,2607,6.503]],["t/694",[170,1.81,173,1.415,189,1.571,216,4.308,246,2.979,256,3.692,264,1.901,288,1.885,351,3.128,381,3.142,389,2.48,395,2.017,398,2.293,409,2.19,430,4.785,433,5.545,452,7.05,522,4.464,540,3.661,605,2.734,606,3.076,621,2.425,818,4.535,954,3.902,1273,3.304,1325,4.169,2141,4.335,2600,5.12,2608,5.628]],["t/696",[282,3.672]],["t/698",[170,1.564,172,1.367,175,1.967,185,2.011,195,1.596,197,3.634,207,2.738,217,3.131,250,4.792,251,4.539,252,4.187,253,3.932,254,5.397,255,1.868,256,4.394,257,6.093,258,6.093,259,6.093,260,5.695,261,6.093,262,5.695,263,4.792,264,2.262,265,4.394,352,4.394,2609,6.093]],["t/700",[170,2.131,173,1.811,175,2.115,176,2.374,185,3.007,241,6.552,281,2.264,349,3.037,375,4.405,438,4.906,487,4.229,617,5.335,1038,4.852,1410,4.503,2592,6.552,2610,7.203,2611,7.203,2612,7.203,2613,6.124]],["t/702",[168,3.035,199,4.044]],["t/704",[168,1.345,170,1.094,172,0.735,173,2.015,175,1.375,177,3.494,185,1.406,189,2.979,194,2.996,206,3.773,210,1.954,214,2.698,215,3.068,219,2.603,225,1.969,226,5.129,234,2.37,264,1.582,281,1.472,287,1.21,288,1.569,299,2.621,303,2.46,347,2.305,353,4.354,396,3.782,417,3.607,444,2.405,446,1.531,540,1.934,575,2.649,589,2.603,599,2.927,645,2.603,646,2.603,660,2.996,673,3.247,1049,2.927,1063,3.155,1098,2.996,1105,3.469,1107,2.478,1118,3.247,1123,2.649,1414,3.607,1711,3.35,1972,3.607,2544,3.607,2614,4.683,2615,4.683,2616,4.683,2617,4.683]],["t/706",[123,1.186,162,0.793,163,0.678,168,1.451,170,1.876,172,1.201,173,0.979,175,2.359,176,2.184,177,1.186,185,0.693,189,2.005,195,1.712,197,0.963,198,0.953,199,3.166,200,1.697,202,1.843,206,1.86,210,0.963,214,1.33,215,2.675,219,1.283,222,1.601,223,3.089,224,4.113,225,1.462,227,2.201,234,1.168,255,0.495,263,1.652,264,2.427,276,1.355,282,1.515,284,1.372,287,1.976,299,1.273,303,0.835,308,0.898,310,0.806,349,0.974,353,1.477,367,1.203,369,1.121,373,1.412,381,1.103,387,1.186,389,1.017,396,4.466,401,1.168,408,0.557,410,1.71,411,1.514,413,1.963,414,0.773,423,1.152,432,1.71,446,1.652,460,2.332,464,1.963,474,1.306,476,1.136,484,1.355,490,1.652,496,2.286,503,0.953,529,1.515,530,1.029,532,1.017,537,1.121,540,2.45,555,1.963,564,0.873,576,1.514,577,1.222,578,1.186,602,2.885,616,1.86,618,2.492,619,1.443,621,2.177,630,2.555,636,1.107,645,1.283,670,1.477,684,1.555,711,1.383,807,1.412,854,1.241,864,1.778,898,1.601,928,1.306,936,1.355,938,1.477,943,1.601,948,1.86,957,3.138,1000,1.86,1033,1.306,1058,1.383,1062,1.778,1105,1.71,1107,1.222,1114,2.1,1116,1.652,1118,2.7,1131,1.152,1139,1.383,1160,2.1,1307,1.963,1335,1.963,1376,1.71,1392,1.283,1555,1.443,1599,2.1,1644,1.86,1725,1.86,1726,1.86,1743,3.138,1763,1.71,2544,1.778,2618,2.309,2619,2.309,2620,2.309]],["t/707",[163,1.329,168,2.257,170,1.057,172,0.71,173,1.137,175,1.949,176,2.187,178,2.198,185,1.359,189,1.263,191,2.559,195,1.078,199,1.993,227,1.971,234,2.29,264,2.242,281,1.422,287,2.572,303,1.636,310,1.58,337,1.929,344,2.767,347,3.267,349,2.799,381,1.879,386,2.089,387,2.323,393,3.362,394,2.71,396,2.515,402,2.29,408,1.092,446,1.479,496,1.744,532,1.993,571,2.395,574,2.606,598,2.71,622,2.29,623,3.137,663,2.606,701,3.485,717,3.485,849,2.71,885,1.971,947,2.395,957,3.646,960,3.646,984,3.137,1014,2.606,1099,3.896,1163,2.767,1273,2.656,1711,3.237,1763,3.352,1863,4.116,2017,3.485,2141,5.112]],["t/709",[162,2.502,170,1.203,172,1.328,176,1.698,185,1.547,199,3.21,200,3.174,210,2.15,214,2.967,215,3.285,219,2.863,225,2.446,230,2.645,264,1.74,281,1.619,282,2.004,310,1.799,311,2.967,349,2.172,367,2.685,373,3.15,381,2.063,395,1.846,396,2.863,397,2.815,398,2.501,403,2.863,404,3.379,446,1.684,461,3.572,540,2.127,591,3.085,619,5.285,673,3.572,684,4.909,854,2.77,865,2.863,874,3.688,1083,4.686,1112,4.686,1191,3.296,1527,3.686,2412,3.968,2460,4.38,2621,5.152]],["t/711",[168,1.617,172,1.217,173,1.415,189,1.571,199,2.48,200,2.452,225,2.764,230,3.98,234,2.849,287,1.454,299,2.534,303,2.035,347,2.771,395,2.017,397,3.076,408,1.359,409,2.19,446,1.84,451,2.631,476,2.771,493,2.599,604,3.076,636,2.698,665,3.518,671,3.128,711,3.371,739,3.692,1123,3.183,1131,3.868,1139,3.371,1555,3.518,1591,4.535,1711,4.027,1848,4.535,1879,4.335,2174,4.169,2251,5.12,2622,5.628,2623,5.628,2624,5.628,2625,5.628,2626,4.535]],["t/713",[123,2.176,162,1.455,172,1.317,189,2.72,190,2.78,192,3.139,193,2.209,215,2.851,219,2.356,236,1.911,243,3.139,255,1.356,264,2.136,288,1.42,289,3.265,310,1.48,351,2.356,395,1.519,396,2.356,419,2.488,467,2.649,476,2.086,487,4.44,532,2.786,540,2.611,545,3.415,574,2.441,577,2.243,578,3.247,599,2.649,600,2.855,626,3.265,652,2.938,725,3.603,754,3.855,1021,2.316,1107,3.346,1139,5.022,1141,2.938,1234,2.592,1371,3.265,1545,2.538,1547,3.265,1642,3.032,1690,4.147,1727,3.855,1762,5.752,1948,3.855,1950,5.752,1952,3.855,1954,3.855,1955,3.855,2159,3.855,2388,3.603,2627,4.238,2628,4.238,2629,4.238,2630,5.752,2631,4.238,2632,4.238,2633,6.323,2634,3.855]],["t/715",[168,1.842,172,1.201,175,1.883,176,1.424,178,4.111,184,2.995,185,1.297,189,1.206,193,2.252,195,1.029,198,1.784,205,2.489,207,2.622,208,2.444,213,2.156,231,1.681,237,1.697,255,1.376,264,1.459,282,1.681,287,1.657,304,1.731,310,1.509,327,2.57,340,2.995,343,2.764,393,2.34,402,2.187,418,3.075,457,2.587,462,2.764,485,3.2,486,3.481,487,2.536,505,3.745,540,1.784,574,2.489,648,3.328,657,2.286,670,2.764,671,3.565,690,3.091,818,3.481,854,2.323,860,2.489,1049,2.701,1131,2.156,1203,3.091,1216,3.841,1228,3.328,1329,3.481,1396,2.587,1682,3.2,1801,3.328,1846,5.453,2093,3.673,2105,3.481,2502,3.481,2635,5.834,2636,3.93,2637,3.93,2638,4.32]],["t/717",[168,1.384,170,1.125,172,1.279,175,2.04,176,1.588,178,4.591,184,3.34,189,1.345,193,2.511,195,1.148,199,2.123,205,2.775,255,1.034,282,2.703,288,1.614,296,4.096,304,1.93,308,1.875,327,2.783,343,3.083,370,2.725,393,1.757,414,1.614,418,3.33,441,2.725,457,2.885,462,3.083,484,2.829,505,3.404,529,1.875,530,2.147,649,4.096,1035,3.246,1201,4.383,1203,3.447,1216,2.885,1228,3.711,1396,2.885,1685,6.319,1750,6.379,1801,3.711,1841,4.383,1846,4.096,2093,5.906,2105,3.882,2481,4.383,2635,6.319,2636,4.383,2637,4.383,2639,4.818]],["t/719",[152,2.393,156,1.507,162,0.931,168,2.236,172,0.699,173,1.119,176,0.893,189,1.83,199,1.961,202,1.283,207,1.108,210,2.736,214,2.564,215,2.956,216,1.507,219,2.474,223,1.658,225,2.574,227,1.181,229,2.564,230,2.909,246,2.356,255,2.124,264,1.504,273,2.615,281,0.852,287,1.15,288,1.491,299,1.455,303,2.371,308,1.055,347,1.334,349,1.143,355,2.008,387,2.286,395,1.596,400,1.939,402,1.372,406,3.418,411,1.778,418,1.299,420,1.694,444,1.392,446,2.368,451,2.081,465,1.413,472,1.623,476,2.191,492,1.317,493,1.252,512,1.858,523,1.086,529,1.055,530,1.208,533,1.283,577,1.435,578,1.392,589,2.474,591,1.623,604,1.481,606,1.481,613,2.031,621,1.918,636,2.715,657,1.435,711,2.666,726,2.088,807,2.722,854,1.457,939,4.049,945,1.283,986,2.783,1035,1.826,1058,1.623,1097,2.008,1100,3.429,1107,3.47,1118,1.879,1208,1.353,1234,1.658,1591,3.587,1650,2.184,2033,1.879,2626,2.184,2640,2.184,2641,2.711,2642,2.711,2643,2.711,2644,2.711,2645,2.711,2646,2.711,2647,2.711,2648,2.711,2649,2.711,2650,2.711,2651,2.711,2652,2.711]],["t/721",[212,5.613,719,5.287,1795,7.07]],["t/723",[172,1.131,176,2.374,178,3.499,184,4.994,185,3.007,412,5.153,418,3.453,443,4.074,719,4.149,845,5.548,846,6.124,871,5.153,882,5.153,1555,4.503,1563,6.124,1757,6.124,2576,6.552,2653,7.203,2654,7.203,2655,7.203,2656,7.203]],["t/725",[185,3.068,620,4.603,663,4.336,664,5.07,860,4.336,882,5.385,1273,4.419,1392,5.213,1551,5.07,1555,4.705,1576,6.399,1708,5.218,1757,6.399,2585,5.575,2657,6.847,2658,6.399]],["t/727",[172,1.283,185,2.453,212,4.996,236,3.683,327,3.273,738,3.867,740,4.796,1191,5.228,1443,6.293,1594,7.432,1663,6.946,2659,8.17]],["t/729",[172,1.062,180,3.761,181,2.981,185,2.632,187,6.077,325,5.212,327,2.711,487,5.146,622,3.425,667,3.581,719,3.897,738,3.202,740,3.972,750,5.452,825,4.841,975,5.012,1410,4.229,1978,6.155,2660,6.766,2661,6.766,2662,6.766,2663,6.766,2664,6.155,2665,6.155,2666,6.155,2667,6.766]],["t/731",[172,1.422,203,4.948,327,3.627,2658,7.697]],["t/733",[172,1.663,180,2.885,183,4.414,186,4.183,187,3.599,205,2.991,208,2.937,225,1.502,260,4.414,294,3.405,325,3.999,327,2.08,387,2.666,537,2.522,663,2.991,664,3.497,700,3.714,719,4.891,738,2.457,740,3.048,750,4.183,825,3.714,885,2.262,1097,3.846,1191,3.322,1244,3.846,1410,3.245,1746,3.999,1917,4.414,2356,4.723,2457,6.54,2634,4.723,2658,4.414,2664,4.723,2668,5.191,2669,5.191,2670,4.723,2671,5.191,2672,5.191,2673,5.191,2674,5.191,2675,5.191,2676,5.191,2677,5.191,2678,7.326,2679,5.191,2680,5.191]],["t/735",[172,1.483,181,3.354,203,4.16,237,2.991,327,3.05,401,3.853,558,5.639,590,6.919,738,3.603,740,4.469,825,5.446,1410,4.759,2681,7.612,2682,7.612,2683,7.612]],["t/737",[306,5.572,397,4.52,418,3.965,663,4.764,713,6.126,1708,5.734,2582,7.032,2583,7.524,2584,7.524,2585,7.361]],["t/739",[564,3.09,589,4.541,663,4.706,971,6.583,1277,5.504,1281,7.432,1708,5.664,2166,7.432,2684,8.17,2685,8.17,2686,8.17,2687,8.17]],["t/741",[465,4.719,587,5.66,662,5.121,663,5.215]],["t/743",[205,3.783,255,1.409,274,3.65,281,2.064,284,2.313,288,2.2,587,4.105,590,4.424,591,3.933,662,5.421,663,5.521,848,2.395,900,5.291,1058,3.933,1131,3.277,1708,4.553,1758,5.974,1784,5.583,2017,5.058,2327,7.82,2688,6.567,2689,6.567,2690,6.567,2691,6.567,2692,6.567,2693,6.567,2694,6.567]],["t/745",[2695,9.306,2696,9.306]],["t/747",[176,2.831,281,2.7,349,3.622,587,5.369,663,4.948,900,6.921,970,7.302,1708,5.955]],["t/749",[423,4.458,587,5.584,1015,8.126,1107,4.728,2697,8.933]],["t/751",[423,4.458,587,5.584,1107,4.728,2698,8.933,2699,8.933]],["t/753",[169,6.921,281,2.7,423,4.287,490,6.145,587,5.369,900,6.921,2700,8.589,2701,8.589]],["t/755",[212,5.537,1021,4.948,2702,7.697,2703,9.054]],["t/757",[212,5.252,587,5.369,1021,4.694,1107,4.546,2702,7.302,2704,8.589,2705,8.589,2706,8.589]],["t/759",[212,5.186,423,4.232,663,4.885,763,6.067,1021,4.635,2702,7.21,2707,8.48,2708,8.48,2709,8.48]],["t/761",[637,7.07,1795,7.07,2710,7.07]],["t/764",[177,4.572,197,3.716,231,3.033,232,4.573,304,2.274,338,6.173,428,4.372,605,2.757,658,3.155,853,3.471,1021,3.102,1109,5.163,1175,4.573,1277,3.823,1576,4.825,1577,4.825,1644,4.573,1647,5.163,2143,5.163,2160,4.372,2626,4.573,2710,6.005,2711,5.676,2712,5.163,2713,5.676,2714,4.825,2715,5.676,2716,5.676,2717,4.825,2718,5.676,2719,5.676,2720,5.676,2721,5.676,2722,5.676,2723,5.163,2724,5.676,2725,5.676,2726,5.676,2727,5.676,2728,5.676]],["t/766",[162,2.018,175,1.725,177,4.994,179,3.673,180,3.265,188,4.203,189,1.64,198,2.426,207,2.401,235,4.203,304,3.198,308,2.286,314,4.874,352,3.854,399,4.073,453,4.525,536,4.525,568,3.323,645,4.436,646,3.265,673,4.073,860,3.384,1002,4.734,1186,4.352,1450,3.958,1528,4.995,1530,5.344,1643,5.344,2562,4.995,2626,4.734,2717,4.995,2729,5.875,2730,5.875,2731,5.344,2732,5.875,2733,5.875,2734,5.344]],["t/768",[162,1.587,172,0.6,175,1.122,176,0.528,177,2.715,178,0.778,185,2.914,187,1.11,192,2.102,197,1.929,198,1.578,199,1.25,207,1.16,217,0.749,231,2.887,232,1.29,233,1.774,236,2.084,237,0.629,245,1.234,250,2.03,253,0.94,255,0.991,256,1.051,264,0.541,267,1.735,269,1.11,277,1.362,284,1,288,0.537,299,1.728,304,1.852,308,1.104,312,1.186,314,0.861,317,1.186,327,0.642,338,4.924,352,1.861,373,0.979,374,2.413,381,0.453,390,1.29,399,1.967,402,0.811,403,2.124,418,0.768,423,0.799,428,2.186,436,2.389,440,1.051,467,1.001,468,1.29,480,1.001,487,2.713,494,1.146,545,1.29,547,1.234,557,1.079,589,1.577,592,1.29,599,1.001,620,0.979,636,1.36,645,3.516,646,2.938,650,1.29,658,0.89,662,1.605,685,2.83,690,1.146,718,2.413,738,0.758,740,0.94,762,1.146,837,3.079,848,1.035,853,0.979,854,0.861,945,1.809,971,3.079,976,1.146,977,3.249,1002,2.287,1021,0.875,1026,1.234,1033,0.906,1157,1.11,1161,3.457,1168,1.29,1175,1.29,1214,1.457,1234,1.735,1250,2.287,1332,1.025,1377,1.457,1410,1.001,1438,1.234,1440,1.362,1442,2.186,1443,2.186,1450,1.079,1546,1.457,1613,1.457,1620,2.413,1650,1.29,1690,1.051,1713,1.457,1720,1.11,1725,1.29,1750,1.146,1823,2.582,1827,2.582,2033,1.967,2160,2.186,2192,1.457,2336,1.457,2340,1.457,2353,1.457,2415,1.457,2499,1.457,2546,4.809,2562,1.362,2609,1.457,2640,1.29,2710,4.504,2712,2.582,2714,2.413,2717,1.362,2723,1.457,2731,1.457,2734,1.457,2735,4.622,2736,2.838,2737,1.602,2738,1.602,2739,2.838,2740,1.602,2741,1.602,2742,3.821,2743,1.602,2744,1.602,2745,1.602,2746,1.602,2747,1.602,2748,1.602,2749,1.602,2750,1.602,2751,1.602,2752,1.602,2753,1.602,2754,1.602,2755,3.476,2756,2.838,2757,2.838,2758,2.413,2759,2.838,2760,1.602,2761,1.602,2762,1.602,2763,1.602,2764,1.602,2765,1.602,2766,1.602,2767,1.602,2768,1.602,2769,1.602,2770,1.602,2771,2.838,2772,1.602,2773,1.602,2774,1.602,2775,1.602,2776,1.602,2777,1.602,2778,1.602,2779,1.602,2780,1.602,2781,1.602,2782,1.602,2783,1.602,2784,1.602,2785,1.602,2786,2.582,2787,1.602,2788,1.602,2789,1.602,2790,1.602,2791,1.602,2792,1.602,2793,1.602,2794,1.602,2795,1.602]],["t/770",[170,1.117,172,0.751,173,1.737,177,3.549,178,3.357,197,2.884,231,1.861,250,3.422,255,1.026,273,2.209,287,2.296,318,4.351,327,2.768,338,4.791,381,1.956,403,2.659,418,2.293,426,5.323,441,2.706,493,2.209,498,2.865,526,3.854,572,3.138,591,4.138,620,2.925,636,2.293,645,2.659,659,3.543,735,3.543,738,2.264,740,2.808,748,3.685,807,2.925,946,3.685,1376,3.543,1402,3.543,1527,3.422,2560,4.351,2582,4.067,2710,3.685,2714,6.897,2755,4.351,2786,4.351,2796,6.91,2797,6.91,2798,4.783,2799,4.783,2800,4.783,2801,6.91,2802,4.783,2803,4.783,2804,4.783,2805,4.351]],["t/772",[663,4.948,1708,5.955,2017,6.616,2710,6.616,2806,8.589,2807,8.589,2808,8.589,2809,8.589]],["t/774",[195,2.157,439,3.739,465,4.719,579,5.939]],["t/776",[170,1.629,173,1.754,176,2.3,177,3.583,195,2.13,225,2.587,281,2.193,349,2.942,485,5.168,579,5.865,606,3.813,621,3.006,897,4.992,934,4.635,935,4.462,1284,5.721,2810,6.977]],["t/777",[162,2.08,168,0.859,170,1.415,172,0.757,173,1.523,175,1.779,176,0.985,181,1.317,189,1.345,195,1.443,207,1.222,225,1.395,248,2.014,251,1.558,252,1.869,255,1.635,264,2.046,270,1.317,276,1.755,310,1.044,347,1.471,351,1.661,365,1.828,366,1.582,386,1.38,395,1.728,396,3.367,398,2.056,405,1.722,429,2.014,467,1.869,474,1.691,475,1.535,485,2.214,495,1.869,499,2.373,533,1.415,540,1.991,579,5,597,2.541,599,1.869,613,1.364,622,1.513,630,3.162,662,1.691,705,2.408,717,2.302,848,1.758,854,1.607,858,2.551,862,2.014,874,2.44,885,1.302,897,2.138,904,2.406,934,2.077,935,1.492,945,3.857,947,2.551,955,5.908,1025,3.449,1032,2.014,1099,1.755,1116,2.138,1131,1.492,1154,2.138,1273,1.755,1277,2.014,1278,2.408,1279,2.302,1378,2.541,1779,2.541,1857,4.818,2089,4.098,2134,2.719,2413,2.541,2553,2.719,2811,2.138,2812,6.057,2813,2.989,2814,2.989,2815,2.989,2816,2.989,2817,2.989,2818,2.989,2819,2.989,2820,2.989,2821,2.989,2822,2.989,2823,2.989,2824,2.989,2825,6.057,2826,2.989,2827,4.82,2828,2.989,2829,2.989,2830,2.989,2831,2.989]],["t/779",[156,2.386,170,1.002,172,0.674,173,1.079,188,4.567,189,1.198,195,1.023,255,2.028,273,2.947,288,2.138,295,3.327,327,1.72,339,2.625,381,1.215,401,4.271,443,4.773,505,2.909,523,1.72,537,2.085,579,5.535,600,2.892,622,2.173,1063,2.892,1107,2.272,1268,5.87,1819,3.459,1893,3.905,2535,3.459,2613,3.649,2811,5.452,2832,4.293,2833,6.383,2834,4.293,2835,4.293,2836,4.293,2837,4.293,2838,4.293,2839,4.293,2840,4.293,2841,4.293,2842,4.293,2843,4.293,2844,4.293,2845,4.293,2846,4.293,2847,4.293,2848,4.293,2849,4.293,2850,4.293,2851,4.293,2852,4.293,2853,4.293,2854,4.293,2855,4.293,2856,4.293,2857,4.293,2858,4.293,2859,4.293,2860,4.293,2861,4.293,2862,4.293]],["t/781",[579,6.02,934,3.955,2070,7.395]],["t/783",[176,2.186,195,1.58,215,2.99,273,3.062,293,4.598,298,3.432,349,2.797,496,2.556,533,3.139,565,2.955,579,6.317,622,3.357,897,4.745,934,4.562,935,5.284,949,4.243,986,4.146,1014,3.82,1099,3.893,1284,4.243]],["t/785",[169,7.295,439,4.328,465,4.719]],["t/787",[172,1.58,173,1.83,189,2.032,208,4.118,217,3.403,304,2.917,439,3.007,441,5.196,711,5.502,860,4.194,1131,3.634,2640,7.402,2657,6.623,2863,7.281,2864,7.281,2865,7.281,2866,7.281]],["t/789",[170,2.086,195,2.128,200,3.892,568,5.053,1690,5.86]],["t/791",[162,2.447,170,1.664,176,2.349,195,1.698,267,4.358,281,2.24,282,2.773,349,3.005,373,4.358,439,2.943,565,4.441,613,3.251,646,3.961,726,5.489,848,2.599,885,3.105,887,5.126,937,3.831,1051,5.799,1822,6.482]],["t/793",[170,1.738,176,2.453,195,1.773,281,2.34,282,2.896,349,3.139,436,4.653,439,3.074,450,5.513,494,5.325,579,6.669,851,6.328,1003,5.513,1198,5.997,1383,6.771,2811,5.325,2867,7.443,2868,7.443]],["t/795",[162,2.212,170,1.504,172,1.011,189,2.369,227,2.806,295,4.424,299,2.775,303,2.329,388,5.718,439,4.331,580,4.639,613,3.872,636,3.088,658,3.58,762,4.608,841,4.608,890,6.072,937,3.463,1042,4.771,1392,3.58,1602,5.475,2811,4.608,2869,5.859]],["t/797",[170,2.032,172,1.366,195,2.073,439,3.593,472,5.211,568,4.921,2453,7.397]],["t/799",[212,5.321,295,4.535,439,3.593,719,5.012,888,7.915,1107,4.605,2585,6.445]],["t/801",[161,6.623,189,2.174,195,2.283,281,3.012,295,4.994,298,3.09,439,3.217,564,2.946,573,6,579,6.285,613,3.553,1198,6.276]],["t/803",[172,1.223,176,2.567,189,2.674,213,3.888,295,4.06,439,3.957,841,5.573,1198,6.276,2234,6.623,2585,5.77,2670,7.086,2870,9.581,2871,7.086]],["t/805",[172,1.073,173,1.718,189,2.462,228,4.373,233,4.273,284,2.408,293,4.739,299,2.235,303,2.471,338,4.739,395,2.45,408,1.65,423,3.411,439,2.823,547,5.265,600,4.604,613,3.118,1033,3.866,1139,4.093,1642,4.89,1663,5.811,2333,6.218,2585,6.536,2758,5.811,2872,5.507,2873,6.835]],["t/807",[204,6.414,293,4.888,298,2.797,338,4.888,423,4.492,439,3.717,451,3.296,485,5.223,547,5.431,579,4.625,658,5.003,926,4.625,1005,5.995,1163,4.312,1376,5.223,1692,6.414,2073,4.888,2758,5.995,2869,6.414,2874,7.051,2875,5.681,2876,7.051]],["t/809",[281,2.537,288,2.704,301,5.165,311,4.649,439,4.044,547,6.217,551,7.342,613,3.682,974,5.294,2509,6.862,2665,7.342,2871,7.342]],["t/811",[170,1.596,189,1.907,195,1.628,233,4.273,271,3.51,295,4.599,311,3.937,388,5.944,409,2.659,436,4.273,439,2.823,471,4.18,590,5.944,658,3.799,662,3.866,663,3.937,986,4.273,992,4.013,1038,4.604,1588,5.811,1750,4.89,1826,6.218,2509,5.811,2640,5.507,2877,6.835]],["t/813",[189,2.493,439,4.295,465,4.656,580,4.882]],["t/815",[168,2.114,170,1.719,177,3.78,225,2.13,255,1.579,295,3.837,299,2.407,366,3.896,367,3.837,439,4.177,580,5.055,636,3.529,658,4.091,762,5.266,841,5.266,1392,4.091,1602,6.258,2811,5.266]],["t/817",[353,5.292,356,4.52,736,5.917,1879,6.371,1880,7.032,1895,5.572,2006,7.032,2878,9.039,2879,8.271,2880,8.271]],["t/819",[225,2.656,2188,7.803,2881,9.178]],["t/821",[281,2.6,434,4.128,533,3.915,564,3.129,565,3.686,580,5.821,1227,5.734,1545,4.953,2882,8.271]],["t/823",[156,3.512,172,0.992,173,1.588,189,1.763,191,3.574,217,2.953,225,2.721,281,1.986,282,2.459,284,2.226,288,2.117,351,4.658,367,3.293,389,2.784,398,2.479,439,2.609,564,3.17,580,5.14,928,3.574,1131,3.154,1199,5.748,1416,4.381,1551,4.257,2084,5.372,2085,5.748,2086,5.748,2534,5.372,2535,5.091,2883,6.319]],["t/826",[172,1.107,195,1.68,225,2.869,434,3.519,580,5.419,621,3.879,848,2.572,934,4.756,935,4.949,1284,4.512,2884,7.051,2885,7.051,2886,7.051]],["t/828",[170,1.98,225,2.454,375,5.186,378,6.532,381,2.401,401,4.292,482,5.713,2111,6.532,2887,8.48]],["t/830",[223,5.186,275,4.42,356,4.635,434,4.232,580,4.635,1400,6.282,1848,6.833,2441,7.21,2878,7.714]],["t/832",[172,1.384,288,2.953,317,6.53,381,2.496,852,7.495,2038,7.103]],["t/834",[172,1.209,255,1.652,298,3.055,304,3.085,381,2.693,389,3.392,398,2.278,496,2.968,861,4.813,865,4.28,874,3.897,1297,6.204,1332,4.927,2038,6.204,2067,5.187,2888,7.004]],["t/836",[155,8.925,162,3.493,172,0.835,255,1.598,281,2.341,284,2.624,288,1.781,416,4.094,439,4.052,499,4.233,565,3.319,580,4.071,636,2.548,937,4.005,1042,3.937,1084,4.291,1392,2.954,1416,5.164,2067,5.018,2811,3.803,2889,6.776,2890,8.477,2891,5.315,2892,6.776,2893,7.449,2894,4.835]],["t/838",[162,2.677,165,4.577,197,2.368,255,1.672,370,3.21,439,3.959,499,3.838,580,4.26,607,6.281,636,2.721,845,4.372,1392,4.333,1416,5.405,1511,9.14,2067,5.252,2811,4.061,2889,7.092,2890,8.72,2892,7.092,2894,5.163,2895,5.676,2896,7.796,2897,7.796,2898,5.676,2899,5.676]],["t/841",[458,5.264,580,5.086]],["t/843",[172,1.299,439,4.563,441,4.678,522,3.686,540,3.416,580,5.43,1216,4.953]],["t/845",[439,3.79,522,4.09,580,5.016]],["t/847",[123,3.172,156,4.126,163,2.599,168,1.18,172,0.645,185,1.234,189,1.146,198,1.697,199,1.81,244,2.767,255,1.325,281,1.291,288,1.376,299,2.02,381,1.163,395,2.214,398,1.215,408,0.992,439,3.409,444,2.11,446,1.343,496,2.381,499,3.041,505,2.834,523,1.646,538,2.753,541,2.939,580,4.837,601,3.31,629,3.043,632,3.493,848,1.499,937,2.209,1030,2.324,1043,3.493,1066,2.568,1211,3.043,1260,3.164,1261,3.164,1262,3.164,1392,2.283,1416,2.848,1501,3.493,1545,3.699,2067,2.767,2900,4.108,2901,6.177,2902,6.177,2903,4.108,2904,4.108,2905,4.108,2906,4.108,2907,7.423,2908,4.108,2909,6.177,2910,3.737,2911,4.108,2912,7.423,2913,4.108,2914,4.108,2915,4.108,2916,4.108,2917,4.108]],["t/849",[189,2.493,439,4.295,465,4.656,2872,7.198]],["t/851",[177,4.095,225,2.308,255,1.711,281,2.507,367,4.157,533,3.775,564,3.677,600,5.372,2174,5.907,2872,7.832,2918,7.975,2919,7.975]],["t/853",[172,1.404,200,3.04,228,5.721,231,2.715,370,3.947,381,1.975,439,3.692,444,3.583,613,3.183,629,5.168,1123,5.057,2534,5.932,2535,5.622,2872,7.95,2910,6.347,2920,6.977,2921,6.977,2922,6.977,2923,6.977,2924,6.977]],["t/855",[189,2.493,439,4.295,465,4.656,2875,7.198]],["t/857",[165,3.449,173,2.007,185,2.722,237,2.308,288,1.968,304,2.354,381,2.26,408,1.418,422,5.236,439,2.426,496,2.264,512,2.452,517,4.995,538,2.618,540,2.426,644,3.759,681,3.958,684,3.958,861,3.673,865,4.436,874,2.974,879,4.073,1118,4.073,1297,4.734,1397,3.958,1545,3.518,1642,5.71,2006,4.995,2038,4.734,2067,3.958,2875,4.734,2888,5.344,2925,5.875,2926,5.875,2927,5.875,2928,5.875,2929,5.875]],["t/859",[162,2.13,168,1.781,172,0.974,173,1.559,177,3.185,189,2.31,227,2.702,231,2.413,302,4.3,367,3.232,395,2.223,398,2.449,408,1.998,439,3.418,499,3.053,505,3.557,522,3.689,613,3.776,695,4.997,783,3.508,928,3.508,1042,4.594,1545,3.714,2067,4.178,2875,7.508,2930,7.53,2931,5.641]],["t/861",[398,2.574,439,3.593,499,4.283,522,3.877,2067,5.861,2930,7.915,2931,7.915]],["t/863",[195,2.128,439,3.689,465,4.656,565,3.981,1051,5.716]],["t/865",[163,1.385,170,1.597,173,1.186,175,1.385,176,1.554,177,2.422,195,1.629,225,1.979,255,1.467,281,1.482,298,1.871,337,2.01,349,1.989,350,2.577,381,1.335,393,1.72,394,2.824,408,1.651,409,1.835,410,3.493,414,1.58,422,3.093,439,1.948,446,1.542,476,2.322,532,2.078,565,3.934,606,2.577,621,2.032,676,4.009,853,2.884,887,4.993,897,3.374,934,3.804,935,2.354,945,2.232,951,4.487,1051,6.452,1243,4.009,1284,5.15,1397,3.177,1697,3.493,1793,4.29,2518,4.29,2932,4.716,2933,4.716,2934,4.716,2935,4.716,2936,4.716]],["t/867",[195,2.367,227,3.603,381,3.016,398,2.447,529,3.218,530,3.686,605,4.017,1051,5.292]],["t/869",[163,1.124,170,0.894,175,1.124,195,1.897,217,2.736,225,1.107,228,2.448,255,2.248,256,2.51,270,1.686,273,1.767,281,1.203,282,1.489,284,1.348,298,1.518,408,0.924,414,1.961,446,1.251,458,2.164,499,1.884,523,1.533,532,1.686,565,2.608,571,2.025,598,2.292,599,2.392,622,1.937,848,1.396,853,4.868,859,3.083,887,4.853,934,2.522,945,5.02,951,2.51,978,4.508,1046,3.083,1051,6.368,1947,3.253,2027,2.448,2073,2.653,2937,3.826,2938,3.826,2939,3.826,2940,3.253,2941,3.481,2942,3.481,2943,3.481,2944,3.481,2945,3.826,2946,3.826,2947,3.826,2948,3.826,2949,3.826,2950,3.826,2951,3.826,2952,3.826,2953,3.826,2954,3.826,2955,3.826]],["t/871",[170,1.251,195,1.784,225,1.55,226,3.428,255,2.187,298,2.125,440,3.514,591,3.209,848,1.954,887,4.237,934,2.309,945,4.426,978,6.652,1042,3.969,1051,6.833,1284,5.984,2073,3.715,2613,4.555,2940,7.951,2941,4.874,2942,4.874,2943,4.874,2944,4.874,2956,5.358,2957,5.358,2958,5.358,2959,5.358,2960,5.358,2961,5.358,2962,5.358]],["t/873",[170,2.034,173,1.684,189,2.701,190,6.347,255,1.868,267,4.096,275,3.491,308,2.606,439,3.596,512,2.795,565,2.985,848,2.443,853,4.096,887,3.789,938,4.286,945,3.17,1051,4.286,1222,4.792,1392,3.723,2940,5.695,2963,6.698,2964,6.698,2965,6.698]],["t/875",[887,5.121,934,3.901,1051,5.793,2070,7.295]],["t/877",[50,1.471,123,0.888,163,1.939,168,0.267,172,0.706,173,0.234,175,0.713,176,0.57,189,1.462,190,0.61,191,0.526,195,1.248,197,0.722,198,1.002,207,0.38,217,0.809,223,1.058,225,0.879,227,0.405,230,0.888,233,0.581,236,0.419,248,1.166,251,0.485,252,1.517,254,1.394,255,2.385,270,0.41,273,0.429,275,0.485,281,1.277,284,0.609,287,1.049,288,0.311,297,2.884,298,2.322,299,0.566,303,0.626,304,0.972,308,0.673,311,0.997,327,0.372,337,0.396,350,0.946,351,1.349,352,1.135,354,3.272,356,1.66,360,1.471,361,1.471,362,1.471,363,1.471,369,0.452,381,0.49,386,0.429,389,0.762,398,1.644,399,0.645,402,0.471,406,2.342,408,0.864,409,0.362,418,0.446,424,0.477,436,0.581,439,2.163,446,1.597,451,2.1,458,0.526,462,1.107,472,0.557,474,2.542,482,0.626,492,0.84,493,0.429,496,0.358,498,0.557,506,0.536,518,0.645,522,0.771,523,0.372,529,0.362,530,0.414,534,0.546,564,0.352,565,1.081,566,2.911,571,0.492,575,0.526,593,1.228,596,0.749,605,0.452,613,0.424,620,0.569,621,0.746,630,2.664,644,0.595,667,0.492,672,0.689,684,1.166,696,0.645,700,1.238,736,1.238,738,0.44,739,1.135,766,1.797,783,1.718,848,1.306,850,2.813,853,0.569,855,1.797,856,0.645,858,2.585,860,0.536,871,0.665,879,0.645,887,0.979,904,0.863,908,2.348,926,0.61,934,0.401,938,0.595,945,1.922,947,1.894,967,0.79,992,0.546,1021,0.508,1030,0.526,1038,0.626,1051,0.595,1063,1.635,1065,1.453,1103,0.645,1123,0.526,1129,1.2,1140,0.626,1187,0.665,1208,2.614,1235,1.238,1251,0.689,1252,0.61,1323,0.581,1356,0.79,1371,0.716,1392,1.989,1405,0.749,1438,0.716,1524,2.207,1675,1.574,1678,0.79,1691,0.665,1720,0.645,1794,0.79,1857,0.645,1873,0.749,1895,0.626,1897,0.79,1943,0.846,1944,0.846,1979,0.79,2044,0.79,2073,2.106,2076,0.846,2200,4.909,2216,1.574,2218,0.749,2418,0.749,2453,0.79,2483,1.574,2511,0.846,2630,1.574,2666,2.207,2805,0.846,2966,0.93,2967,0.93,2968,1.73,2969,1.73,2970,1.73,2971,1.73,2972,1.73,2973,0.93,2974,0.93,2975,2.427,2976,0.93,2977,1.73,2978,0.93,2979,4.088,2980,0.93,2981,0.93,2982,0.93,2983,1.73,2984,1.73,2985,0.93,2986,0.93,2987,0.93,2988,0.93,2989,0.93,2990,2.427,2991,1.73,2992,0.93,2993,0.93,2994,1.73,2995,0.93,2996,0.93,2997,0.93,2998,0.93,2999,0.93,3000,4.494,3001,0.93,3002,0.93,3003,0.93,3004,0.93,3005,0.93,3006,0.93,3007,3.038,3008,0.93,3009,0.93,3010,1.73,3011,3.038,3012,2.427,3013,0.93,3014,2.427,3015,0.93,3016,0.93,3017,0.93,3018,0.93,3019,1.73,3020,0.93,3021,0.93,3022,0.93,3023,0.93,3024,0.93,3025,1.73,3026,1.73,3027,0.93,3028,0.93,3029,0.93,3030,0.93,3031,0.93,3032,0.93,3033,0.93,3034,0.93,3035,0.93,3036,1.73,3037,0.93,3038,0.93,3039,0.93,3040,0.93,3041,0.93,3042,1.73,3043,0.93]],["t/879",[189,1.888,195,1.612,252,4.229,304,2.711,354,5.452,369,3.286,446,2.212,493,3.124,565,3.015,848,2.468,885,2.948,887,5.818,935,4.375,1051,6.985,2979,8.845,3044,9.723]]],"invertedIndex":[["",{"_index":255,"t":{"18":{"position":[[118,2],[295,2],[437,3]]},"31":{"position":[[212,2]]},"33":{"position":[[63,2],[300,2],[424,2]]},"35":{"position":[[615,1]]},"49":{"position":[[86,1],[113,2]]},"53":{"position":[[269,2],[471,1],[481,1],[717,2]]},"57":{"position":[[454,2],[656,1],[666,1]]},"65":{"position":[[108,1]]},"70":{"position":[[411,2],[599,1],[609,1]]},"74":{"position":[[1125,1]]},"76":{"position":[[952,1]]},"82":{"position":[[903,1],[1002,2],[1079,1],[1092,2],[1147,1],[1153,1],[1162,2],[1165,1],[1207,1],[1451,1],[1462,1],[1473,1],[1488,1],[1501,1],[1543,1],[1559,1],[1565,1]]},"86":{"position":[[174,1],[455,1],[610,1],[770,1],[934,1],[977,1],[1270,1]]},"88":{"position":[[33,1],[142,1],[310,1],[405,1],[407,1],[590,1],[740,1],[766,1],[800,1]]},"97":{"position":[[444,3],[506,2]]},"111":{"position":[[190,1],[206,1],[222,1],[237,1]]},"121":{"position":[[289,2]]},"123":{"position":[[261,2]]},"133":{"position":[[776,1]]},"139":{"position":[[88,3]]},"141":{"position":[[184,1],[884,3]]},"147":{"position":[[196,1],[368,1]]},"151":{"position":[[183,1],[898,1],[1009,1],[1034,1],[1036,3],[1047,1]]},"155":{"position":[[964,1],[974,1]]},"157":{"position":[[1291,2]]},"165":{"position":[[501,1],[601,2],[815,1],[825,1]]},"167":{"position":[[359,2]]},"177":{"position":[[1000,1],[1017,1]]},"181":{"position":[[78,1]]},"187":{"position":[[287,2]]},"189":{"position":[[417,1],[517,2],[731,1],[741,1],[1654,2],[3472,1],[3489,1],[4329,2]]},"191":{"position":[[698,1],[791,1],[811,1],[848,1],[912,1],[982,1],[984,1],[1109,1],[1834,1],[1851,1],[1868,1]]},"204":{"position":[[261,1],[271,1],[577,1],[579,3]]},"206":{"position":[[225,1],[1148,1],[1150,3],[1278,1],[1280,3]]},"208":{"position":[[493,1],[628,1],[1124,1],[1126,3],[2206,1],[2208,3]]},"241":{"position":[[88,1]]},"262":{"position":[[162,1],[207,1],[225,1],[251,1]]},"266":{"position":[[84,1],[129,1],[147,1],[173,1]]},"274":{"position":[[408,1],[485,1],[589,1],[984,1],[1004,1],[1019,1],[1085,1],[1184,1],[1228,1],[1267,1],[1310,1],[1358,1],[1410,1],[1482,1],[1555,1],[1612,1],[1656,1],[1721,1],[1736,1],[1809,1],[1863,1],[1925,1],[1939,2],[1957,2],[1972,1],[1977,2],[2054,2],[2071,1],[2096,1]]},"276":{"position":[[290,1],[292,1],[315,1],[338,1]]},"278":{"position":[[463,2]]},"284":{"position":[[1635,2],[2181,1],[2225,1],[2359,1],[2468,1],[2548,1],[3472,1],[3559,1],[3622,1],[3707,1]]},"290":{"position":[[889,2]]},"296":{"position":[[982,3]]},"304":{"position":[[98,1],[140,1]]},"308":{"position":[[481,1],[614,1],[691,1],[740,1],[793,1]]},"310":{"position":[[745,1],[878,1],[955,1],[1004,1],[1024,1]]},"318":{"position":[[610,1],[620,1]]},"320":{"position":[[182,1]]},"328":{"position":[[469,1],[479,1]]},"336":{"position":[[61,1],[275,1],[488,1],[555,1]]},"355":{"position":[[573,2]]},"357":{"position":[[932,1],[998,2]]},"359":{"position":[[337,5]]},"361":{"position":[[275,1],[293,1]]},"363":{"position":[[447,1],[553,1],[555,1],[571,1],[573,2],[630,1],[632,2],[740,1],[761,1],[763,2]]},"371":{"position":[[52,1],[262,1]]},"373":{"position":[[52,1],[679,1],[946,1],[988,1]]},"375":{"position":[[7,2]]},"377":{"position":[[74,1],[105,1]]},"381":{"position":[[117,1]]},"385":{"position":[[137,1]]},"392":{"position":[[277,1]]},"394":{"position":[[108,1],[427,1],[438,1],[462,1]]},"396":{"position":[[0,1],[498,1],[516,1],[629,1]]},"402":{"position":[[0,1]]},"404":{"position":[[105,1]]},"415":{"position":[[419,1]]},"421":{"position":[[130,1],[475,1]]},"427":{"position":[[434,1],[558,1],[1333,1]]},"433":{"position":[[129,1]]},"435":{"position":[[11,1],[702,1],[870,5]]},"441":{"position":[[15,2]]},"451":{"position":[[212,3]]},"458":{"position":[[351,2],[436,2]]},"460":{"position":[[170,1]]},"464":{"position":[[53,1]]},"466":{"position":[[676,1]]},"475":{"position":[[621,5],[749,1]]},"479":{"position":[[119,5]]},"483":{"position":[[207,2]]},"487":{"position":[[33,2]]},"489":{"position":[[94,2],[131,2],[272,5]]},"493":{"position":[[169,3]]},"495":{"position":[[73,2],[841,3],[1077,1],[1087,1],[1115,2]]},"497":{"position":[[276,1],[312,1],[377,1],[529,1],[581,1],[600,1],[644,1],[670,1],[723,1]]},"499":{"position":[[255,1]]},"503":{"position":[[116,1]]},"515":{"position":[[38,1]]},"543":{"position":[[357,1],[1916,2],[1985,2],[2107,2]]},"547":{"position":[[312,1],[372,1]]},"556":{"position":[[369,1],[461,1],[480,1]]},"558":{"position":[[285,1],[354,1],[451,1],[529,1],[695,1],[785,1],[862,1],[884,1],[907,1],[923,1],[1051,1],[1139,1],[1186,1]]},"582":{"position":[[11,1]]},"598":{"position":[[882,1],[892,1],[1222,1],[1257,1],[1293,1],[1353,1],[1382,1],[1414,1]]},"600":{"position":[[114,1],[156,1],[205,1]]},"602":{"position":[[124,1],[166,1],[246,1],[275,1]]},"614":{"position":[[506,1]]},"622":{"position":[[202,1]]},"631":{"position":[[726,1]]},"635":{"position":[[171,3]]},"664":{"position":[[145,2]]},"670":{"position":[[438,2]]},"688":{"position":[[228,1]]},"692":{"position":[[17,1],[189,1]]},"698":{"position":[[148,2],[325,2]]},"706":{"position":[[602,2]]},"713":{"position":[[398,1],[603,1]]},"715":{"position":[[383,2],[477,2]]},"717":{"position":[[298,2]]},"719":{"position":[[470,1],[472,1],[482,1],[484,1],[697,1],[933,2],[936,1],[1114,1],[1116,1],[1118,1],[1256,2],[1259,1],[1389,1],[1408,1]]},"743":{"position":[[284,1]]},"768":{"position":[[1876,1],[2227,1],[2609,1],[2891,1]]},"770":{"position":[[159,1]]},"777":{"position":[[330,1],[443,1],[495,1],[562,1],[595,1]]},"779":{"position":[[489,1],[491,1],[712,1],[796,1],[844,1],[902,1]]},"815":{"position":[[5,2]]},"834":{"position":[[158,1]]},"836":{"position":[[150,1],[430,1]]},"838":{"position":[[103,1],[254,1]]},"847":{"position":[[299,1],[571,2]]},"851":{"position":[[80,1]]},"865":{"position":[[630,2],[661,2]]},"869":{"position":[[341,1],[378,1],[419,1],[463,2],[466,1],[485,1],[519,1],[549,1],[577,1],[603,1],[733,1],[852,1]]},"871":{"position":[[244,1],[399,1],[436,1],[477,1],[522,1],[569,2]]},"873":{"position":[[286,1],[295,3]]},"877":{"position":[[393,1],[464,4],[490,3],[494,1],[601,3],[622,2],[2058,2],[2091,2],[2248,1],[2250,2],[2330,1],[2332,2],[2414,3],[2444,2],[2447,1],[2449,2],[2516,1],[2624,3],[2653,2],[2673,3],[2701,2],[2704,1],[2706,2],[2785,1],[2787,2],[2919,2],[2922,1],[3061,1],[3165,1],[3243,1],[3411,2],[3414,2],[3422,1],[3457,1],[3512,2],[3515,2],[3523,1],[3553,1],[3574,1],[3712,3],[3729,2],[3737,3],[3749,2],[3752,1],[3791,1],[3863,1],[3952,1],[3967,1],[4127,3],[4144,2],[4152,3],[4164,2],[4177,3],[4194,2],[4207,3],[4229,2],[4242,3],[4268,2],[4271,1],[4323,3],[4372,2],[4407,1],[4478,4],[4504,3],[4508,1],[4615,3],[4636,2],[4676,1],[4743,1],[4791,1],[4932,3],[4957,2],[4977,3],[5001,2],[5046,3],[5074,2],[5077,1],[5183,1],[5319,1],[5384,3],[5417,2],[5458,3],[5488,2],[5510,1],[5630,1],[5692,2],[5705,1],[5718,2],[5762,2],[5765,1],[5767,2],[5978,2],[6029,1],[6031,2],[6037,3],[6051,2],[6054,2],[6057,2],[6136,25],[6196,25],[6227,3],[6241,2],[6249,85],[6424,6]]}}}],["0",{"_index":1265,"t":{"183":{"position":[[222,1]]},"189":{"position":[[3886,1]]},"274":{"position":[[1980,2],[2057,3]]},"361":{"position":[[235,1]]},"367":{"position":[[491,2]]},"493":{"position":[[351,1],[396,1]]}}}],["0.0.1",{"_index":2232,"t":{"489":{"position":[[724,5]]}}}],["0.00504448",{"_index":1455,"t":{"274":{"position":[[452,11]]}}}],["0.07772372",{"_index":1461,"t":{"274":{"position":[[523,11]]}}}],["0.15.2",{"_index":2886,"t":{"826":{"position":[[193,6]]}}}],["0.22607761",{"_index":1459,"t":{"274":{"position":[[499,11]]}}}],["0.37513511",{"_index":1458,"t":{"274":{"position":[[487,11]]}}}],["0.468925",{"_index":1457,"t":{"274":{"position":[[476,8]]}}}],["0.5",{"_index":755,"t":{"82":{"position":[[1431,3]]}}}],["0.5,}torch.save(checkpoint",{"_index":831,"t":{"88":{"position":[[517,27]]}}}],["0.61587933",{"_index":1462,"t":{"274":{"position":[[535,11]]}}}],["0.66214299",{"_index":1464,"t":{"274":{"position":[[559,11]]}}}],["0.82861156",{"_index":1463,"t":{"274":{"position":[[547,11]]}}}],["0.83035297",{"_index":1460,"t":{"274":{"position":[[511,11]]}}}],["0.87087911",{"_index":1456,"t":{"274":{"position":[[464,11]]}}}],["0.90979423",{"_index":1465,"t":{"274":{"position":[[571,12]]}}}],["000:/home/jovyan",{"_index":2467,"t":{"614":{"position":[[352,16]]}}}],["01",{"_index":361,"t":{"33":{"position":[[259,2]]},"453":{"position":[[1543,2]]},"877":{"position":[[581,2],[4595,2]]}}}],["024",{"_index":328,"t":{"25":{"position":[[124,3]]}}}],["1",{"_index":327,"t":{"25":{"position":[[122,1],[134,1],[143,1]]},"82":{"position":[[849,2]]},"157":{"position":[[742,1]]},"175":{"position":[[980,1]]},"177":{"position":[[491,1]]},"189":{"position":[[2951,1]]},"191":{"position":[[934,3],[938,2]]},"195":{"position":[[117,3]]},"215":{"position":[[63,4]]},"224":{"position":[[63,4]]},"226":{"position":[[135,4]]},"230":{"position":[[39,4]]},"232":{"position":[[42,4]]},"237":{"position":[[117,4]]},"241":{"position":[[17,4]]},"245":{"position":[[63,4]]},"247":{"position":[[58,4]]},"274":{"position":[[849,1]]},"284":{"position":[[3945,1]]},"310":{"position":[[1006,3]]},"357":{"position":[[620,3],[653,6],[1199,1],[1225,1]]},"367":{"position":[[517,2]]},"371":{"position":[[79,1],[124,2],[158,2]]},"396":{"position":[[530,6]]},"497":{"position":[[583,3],[646,3]]},"503":{"position":[[338,3]]},"511":{"position":[[224,2]]},"517":{"position":[[363,2]]},"527":{"position":[[1271,1]]},"531":{"position":[[5,1]]},"537":{"position":[[366,1]]},"543":{"position":[[862,1],[925,1],[1282,1]]},"558":{"position":[[386,6],[561,6],[817,6],[1083,6]]},"715":{"position":[[509,1],[712,1]]},"717":{"position":[[364,2],[391,1]]},"727":{"position":[[0,2]]},"729":{"position":[[0,2]]},"731":{"position":[[0,2]]},"733":{"position":[[0,2]]},"735":{"position":[[0,2]]},"768":{"position":[[65,2]]},"770":{"position":[[71,2],[187,1]]},"779":{"position":[[669,2]]},"877":{"position":[[6128,2]]}}}],["1.14.4",{"_index":2172,"t":{"458":{"position":[[44,6]]}}}],["1.2",{"_index":761,"t":{"82":{"position":[[1561,3]]}}}],["1.34.1",{"_index":2011,"t":{"381":{"position":[[823,6]]}}}],["1.87",{"_index":759,"t":{"82":{"position":[[1490,4]]}}}],["1.87/724)100",{"_index":760,"t":{"82":{"position":[[1545,13]]}}}],["10",{"_index":700,"t":{"82":{"position":[[44,2],[74,2]]},"149":{"position":[[534,2]]},"193":{"position":[[527,2]]},"274":{"position":[[1974,2]]},"503":{"position":[[293,4]]},"733":{"position":[[380,3]]},"877":{"position":[[659,2],[4673,2]]}}}],["100",{"_index":748,"t":{"82":{"position":[[1221,3]]},"183":{"position":[[246,3]]},"187":{"position":[[344,3]]},"189":{"position":[[3910,3],[4386,3]]},"770":{"position":[[243,3]]}}}],["1000",{"_index":1677,"t":{"284":{"position":[[4056,4],[4095,4]]}}}],["10000",{"_index":2566,"t":{"676":{"position":[[363,5]]}}}],["100_000",{"_index":1482,"t":{"274":{"position":[[1021,8]]}}}],["100g",{"_index":1939,"t":{"373":{"position":[[146,4],[849,4]]}}}],["10chunk_siz",{"_index":1481,"t":{"274":{"position":[[1006,12]]}}}],["11",{"_index":2679,"t":{"733":{"position":[[404,3]]}}}],["12",{"_index":694,"t":{"80":{"position":[[647,2],[698,2]]},"274":{"position":[[361,2]]}}}],["120",{"_index":323,"t":{"25":{"position":[[91,3]]}}}],["1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890",{"_index":150,"t":{"6":{"position":[[0,250]]}}}],["128",{"_index":322,"t":{"25":{"position":[[77,4]]},"195":{"position":[[148,5]]}}}],["13444.5",{"_index":2102,"t":{"415":{"position":[[466,7]]}}}],["137.120.1.1",{"_index":1805,"t":{"336":{"position":[[221,11]]}}}],["137.120.1.5",{"_index":1806,"t":{"336":{"position":[[244,11]]}}}],["15",{"_index":1316,"t":{"193":{"position":[[466,2]]}}}],["15min",{"_index":1148,"t":{"159":{"position":[[348,6]]},"322":{"position":[[297,6]]},"330":{"position":[[1040,6]]}}}],["16",{"_index":313,"t":{"25":{"position":[[0,2]]}}}],["16g",{"_index":2316,"t":{"527":{"position":[[150,3]]}}}],["18.04",{"_index":1983,"t":{"381":{"position":[[130,5]]}}}],["18.10",{"_index":1989,"t":{"381":{"position":[[198,5]]}}}],["192",{"_index":326,"t":{"25":{"position":[[115,3]]}}}],["1error",{"_index":1908,"t":{"371":{"position":[[84,7]]}}}],["2",{"_index":738,"t":{"82":{"position":[[912,2]]},"88":{"position":[[55,3]]},"157":{"position":[[747,1]]},"193":{"position":[[546,1]]},"215":{"position":[[112,4]]},"224":{"position":[[114,3]]},"237":{"position":[[143,4]]},"241":{"position":[[55,4]]},"245":{"position":[[107,4]]},"288":{"position":[[507,1]]},"294":{"position":[[349,1]]},"296":{"position":[[1031,1]]},"310":{"position":[[1010,2]]},"361":{"position":[[403,2],[441,3],[471,3],[769,3],[802,6]]},"396":{"position":[[575,1]]},"495":{"position":[[107,1]]},"497":{"position":[[587,2],[650,2]]},"503":{"position":[[272,1]]},"529":{"position":[[709,1]]},"543":{"position":[[1079,3],[1314,1],[1458,1],[1638,1]]},"547":{"position":[[245,1]]},"635":{"position":[[342,1]]},"727":{"position":[[22,2]]},"729":{"position":[[15,2]]},"733":{"position":[[33,2]]},"735":{"position":[[26,2]]},"768":{"position":[[834,2]]},"770":{"position":[[401,2]]},"877":{"position":[[293,1]]}}}],["2,000",{"_index":1836,"t":{"343":{"position":[[92,6]]}}}],["2.2",{"_index":2071,"t":{"404":{"position":[[23,4]]}}}],["2.4.2",{"_index":2882,"t":{"821":{"position":[[28,5]]}}}],["2.4.5",{"_index":2270,"t":{"499":{"position":[[622,6]]}}}],["20",{"_index":751,"t":{"82":{"position":[[1374,2]]}}}],["20.04",{"_index":3005,"t":{"877":{"position":[[3326,5]]}}}],["20/3600",{"_index":758,"t":{"82":{"position":[[1480,7]]}}}],["200g",{"_index":2314,"t":{"527":{"position":[[96,4]]}}}],["2023.5.0",{"_index":1427,"t":{"272":{"position":[[58,10]]}}}],["24",{"_index":677,"t":{"80":{"position":[[359,2]]},"82":{"position":[[1084,5]]}}}],["260",{"_index":1666,"t":{"284":{"position":[[3748,3]]}}}],["2gi",{"_index":1321,"t":{"195":{"position":[[129,5]]},"204":{"position":[[441,4],[602,3]]},"543":{"position":[[1292,3]]}}}],["2nd",{"_index":1877,"t":{"361":{"position":[[27,3]]}}}],["3",{"_index":740,"t":{"82":{"position":[[963,2]]},"241":{"position":[[101,4]]},"274":{"position":[[708,2]]},"310":{"position":[[1013,2]]},"460":{"position":[[168,1]]},"495":{"position":[[1139,1]]},"497":{"position":[[590,2],[653,2]]},"645":{"position":[[92,1]]},"676":{"position":[[49,1]]},"727":{"position":[[51,2]]},"729":{"position":[[32,2]]},"733":{"position":[[58,2]]},"735":{"position":[[48,2]]},"768":{"position":[[1452,2]]},"770":{"position":[[541,2]]}}}],["3,3",{"_index":2096,"t":{"415":{"position":[[358,3]]}}}],["3.0.1",{"_index":2253,"t":{"497":{"position":[[102,5]]},"499":{"position":[[78,6]]},"503":{"position":[[65,5]]}}}],["3.3.8",{"_index":1399,"t":{"222":{"position":[[98,6]]},"232":{"position":[[36,5],[111,5]]},"260":{"position":[[98,6]]}}}],["3.9",{"_index":1132,"t":{"157":{"position":[[1013,4]]},"537":{"position":[[631,4]]}}}],["30",{"_index":754,"t":{"82":{"position":[[1413,2]]},"713":{"position":[[263,2]]}}}],["300gi",{"_index":1322,"t":{"195":{"position":[[162,7]]}}}],["30min",{"_index":1048,"t":{"149":{"position":[[541,5]]}}}],["32.2",{"_index":1672,"t":{"284":{"position":[[3794,4]]}}}],["328",{"_index":1670,"t":{"284":{"position":[[3777,3]]}}}],["32gb",{"_index":335,"t":{"25":{"position":[[183,4]]}}}],["3306",{"_index":2204,"t":{"466":{"position":[[917,4]]}}}],["336",{"_index":757,"t":{"82":{"position":[[1464,3],[1475,4]]}}}],["4",{"_index":1410,"t":{"241":{"position":[[132,4]]},"310":{"position":[[1016,2]]},"336":{"position":[[678,1]]},"345":{"position":[[382,1]]},"497":{"position":[[593,2],[656,2]]},"503":{"position":[[317,1],[361,1]]},"529":{"position":[[781,2]]},"700":{"position":[[146,1]]},"729":{"position":[[62,2]]},"733":{"position":[[84,2]]},"735":{"position":[[67,2]]},"768":{"position":[[1823,2]]}}}],["4.1",{"_index":2779,"t":{"768":{"position":[[1858,4]]}}}],["4.11",{"_index":291,"t":{"23":{"position":[[11,5]]}}}],["4.2",{"_index":2784,"t":{"768":{"position":[[2206,4]]}}}],["4.3",{"_index":2192,"t":{"466":{"position":[[27,3]]},"768":{"position":[[2597,4]]}}}],["4.4",{"_index":2790,"t":{"768":{"position":[[2873,4]]}}}],["40",{"_index":336,"t":{"25":{"position":[[245,2]]},"193":{"position":[[509,2]]}}}],["403",{"_index":1980,"t":{"377":{"position":[[26,3],[112,3]]}}}],["403486.8",{"_index":2100,"t":{"415":{"position":[[437,8]]}}}],["42.1",{"_index":1680,"t":{"284":{"position":[[4192,4]]}}}],["443",{"_index":478,"t":{"45":{"position":[[370,3]]},"659":{"position":[[370,3]]}}}],["4eof",{"_index":2288,"t":{"503":{"position":[[402,4]]}}}],["4gi",{"_index":2286,"t":{"503":{"position":[[306,5],[350,5]]},"543":{"position":[[1468,3]]}}}],["5",{"_index":825,"t":{"88":{"position":[[418,2]]},"241":{"position":[[178,4]]},"310":{"position":[[1019,2]]},"519":{"position":[[451,2]]},"729":{"position":[[96,2]]},"733":{"position":[[118,2]]},"735":{"position":[[91,2]]}}}],["50",{"_index":2787,"t":{"768":{"position":[[2668,2]]}}}],["512",{"_index":320,"t":{"25":{"position":[[61,3],[238,3]]}}}],["515543.2",{"_index":2099,"t":{"415":{"position":[[421,8]]}}}],["5]distdata",{"_index":2268,"t":{"497":{"position":[[659,10]]}}}],["5]i",{"_index":2266,"t":{"497":{"position":[[596,3]]}}}],["5_000_000num_col",{"_index":1480,"t":{"274":{"position":[[986,17]]}}}],["5gi",{"_index":1192,"t":{"167":{"position":[[920,3]]},"189":{"position":[[2215,3]]}}}],["6",{"_index":2664,"t":{"729":{"position":[[132,2]]},"733":{"position":[[137,2]]}}}],["6.0.1",{"_index":2438,"t":{"600":{"position":[[39,5]]},"602":{"position":[[39,5]]}}}],["6.0.3",{"_index":2440,"t":{"600":{"position":[[49,5]]}}}],["600",{"_index":1914,"t":{"371":{"position":[[178,3]]}}}],["6000",{"_index":1816,"t":{"336":{"position":[[593,4]]}}}],["64",{"_index":321,"t":{"25":{"position":[[68,2]]},"373":{"position":[[157,2],[860,2]]},"527":{"position":[[113,2]]}}}],["68.1",{"_index":1669,"t":{"284":{"position":[[3761,4]]}}}],["6a",{"_index":2674,"t":{"733":{"position":[[171,3]]}}}],["6b",{"_index":2675,"t":{"733":{"position":[[200,3]]}}}],["6c",{"_index":2676,"t":{"733":{"position":[[233,3]]}}}],["7",{"_index":750,"t":{"82":{"position":[[1325,1]]},"131":{"position":[[273,1]]},"729":{"position":[[146,2]]},"733":{"position":[[263,2]]}}}],["7*24/0.5",{"_index":756,"t":{"82":{"position":[[1453,8]]}}}],["777",{"_index":2928,"t":{"857":{"position":[[445,3]]}}}],["8",{"_index":325,"t":{"25":{"position":[[113,1],[236,1]]},"527":{"position":[[165,1]]},"529":{"position":[[822,2]]},"729":{"position":[[184,2]]},"733":{"position":[[310,2]]}}}],["8.8.8.8",{"_index":1807,"t":{"336":{"position":[[267,7]]}}}],["80",{"_index":1408,"t":{"241":{"position":[[84,2]]}}}],["8002:8001",{"_index":2091,"t":{"411":{"position":[[48,9]]}}}],["8080",{"_index":596,"t":{"70":{"position":[[657,4]]},"177":{"position":[[755,6]]},"304":{"position":[[485,4]]},"877":{"position":[[2780,4]]}}}],["8080:8080",{"_index":2828,"t":{"777":{"position":[[1417,9]]}}}],["8787",{"_index":567,"t":{"57":{"position":[[714,4]]}}}],["8888",{"_index":943,"t":{"111":{"position":[[266,4]]},"177":{"position":[[1056,4]]},"185":{"position":[[46,4],[388,4],[417,4],[434,4]]},"187":{"position":[[366,4]]},"189":{"position":[[3528,4],[4074,4],[4103,4],[4120,4],[4408,4]]},"193":{"position":[[425,4],[486,4]]},"197":{"position":[[158,4]]},"706":{"position":[[1539,5]]}}}],["8x",{"_index":332,"t":{"25":{"position":[[167,2]]}}}],["9",{"_index":2634,"t":{"713":{"position":[[886,1]]},"733":{"position":[[357,2]]}}}],["920",{"_index":329,"t":{"25":{"position":[[136,3]]}}}],["98612.0",{"_index":2101,"t":{"415":{"position":[[452,7]]}}}],["9:00",{"_index":1876,"t":{"359":{"position":[[98,5]]}}}],["_.interfacedescript",{"_index":1811,"t":{"336":{"position":[[503,24]]}}}],["a\"].compute().head",{"_index":1557,"t":{"278":{"position":[[466,21]]}}}],["a'filtered_str",{"_index":1628,"t":{"284":{"position":[[2339,19]]}}}],["a.k.a",{"_index":2164,"t":{"453":{"position":[[646,7],[1195,7]]}}}],["abbrevi",{"_index":2496,"t":{"631":{"position":[[416,12]]}}}],["abid",{"_index":261,"t":{"18":{"position":[[189,5]]},"698":{"position":[[219,5]]}}}],["abil",{"_index":442,"t":{"39":{"position":[[336,7]]}}}],["abov",{"_index":656,"t":{"76":{"position":[[664,5]]},"139":{"position":[[441,5]]},"208":{"position":[[1112,6]]},"274":{"position":[[635,5]]},"357":{"position":[[1432,5]]},"363":{"position":[[1269,5]]},"369":{"position":[[319,5]]},"377":{"position":[[126,5]]},"487":{"position":[[438,7]]},"633":{"position":[[260,5]]}}}],["absolut",{"_index":2157,"t":{"453":{"position":[[416,10],[1830,10]]},"614":{"position":[[104,8],[153,8]]}}}],["absolute_path_in_pod",{"_index":2469,"t":{"614":{"position":[[542,25]]},"622":{"position":[[252,25]]}}}],["ac",{"_index":105,"t":{"2":{"position":[[1524,2],[2648,2],[2723,2],[2754,2]]},"4":{"position":[[1524,2],[2648,2],[2723,2],[2754,2]]}}}],["acc",{"_index":1366,"t":{"208":{"position":[[859,5]]}}}],["acceptor",{"_index":1909,"t":{"371":{"position":[[99,8]]}}}],["access",{"_index":175,"t":{"16":{"position":[[53,6]]},"18":{"position":[[23,8],[414,6]]},"29":{"position":[[12,6]]},"31":{"position":[[154,6]]},"33":{"position":[[46,6],[474,6],[592,6],[676,6]]},"35":{"position":[[0,6],[140,6]]},"37":{"position":[[432,6],[559,6],[624,6]]},"41":{"position":[[8,6]]},"43":{"position":[[7,6]]},"45":{"position":[[37,10],[135,11],[161,6],[316,7],[495,10]]},"53":{"position":[[556,6]]},"70":{"position":[[733,6]]},"72":{"position":[[27,10]]},"74":{"position":[[493,6]]},"76":{"position":[[181,6],[287,6],[874,6]]},"80":{"position":[[270,6]]},"113":{"position":[[170,6]]},"115":{"position":[[132,6]]},"121":{"position":[[543,9]]},"123":{"position":[[433,9]]},"141":{"position":[[888,6]]},"155":{"position":[[371,6]]},"167":{"position":[[509,6]]},"189":{"position":[[1804,6]]},"193":{"position":[[176,10]]},"206":{"position":[[940,7]]},"208":{"position":[[345,6],[2010,6]]},"213":{"position":[[109,6]]},"243":{"position":[[303,6]]},"256":{"position":[[109,6]]},"288":{"position":[[31,10],[287,6],[489,6],[536,8],[620,6],[748,6]]},"292":{"position":[[216,6]]},"294":{"position":[[217,6],[248,6],[543,9],[744,6],[794,6]]},"296":{"position":[[516,6],[1212,6]]},"304":{"position":[[60,6],[502,6]]},"334":{"position":[[27,6]]},"343":{"position":[[253,6]]},"351":{"position":[[122,6]]},"353":{"position":[[488,6]]},"355":{"position":[[99,6]]},"363":{"position":[[214,6]]},"367":{"position":[[21,6],[215,6]]},"371":{"position":[[1952,6]]},"375":{"position":[[30,6],[974,8],[1038,6]]},"396":{"position":[[403,6]]},"411":{"position":[[58,6]]},"417":{"position":[[299,8]]},"427":{"position":[[184,6]]},"439":{"position":[[473,6]]},"443":{"position":[[59,6]]},"451":{"position":[[154,6]]},"453":{"position":[[46,6],[322,6],[406,6],[453,6],[1736,6],[1820,6],[1867,6]]},"475":{"position":[[68,6]]},"495":{"position":[[1253,6]]},"501":{"position":[[31,6]]},"509":{"position":[[0,6],[53,6]]},"515":{"position":[[93,6]]},"521":{"position":[[45,6],[357,6]]},"527":{"position":[[22,6]]},"539":{"position":[[211,6]]},"558":{"position":[[275,6]]},"598":{"position":[[289,6]]},"631":{"position":[[368,6],[400,6],[777,6],[883,6]]},"637":{"position":[[253,6],[361,6],[390,6],[473,6]]},"659":{"position":[[37,10],[135,11],[161,6],[316,7]]},"668":{"position":[[762,6]]},"686":{"position":[[1258,6]]},"690":{"position":[[77,6],[175,6],[291,6],[313,6],[571,6],[622,6],[660,6],[740,6]]},"698":{"position":[[23,8]]},"700":{"position":[[175,6]]},"704":{"position":[[629,6]]},"706":{"position":[[97,6],[340,6],[791,6],[934,6],[1332,6],[1403,6],[1935,6],[1997,8]]},"707":{"position":[[304,6],[632,6]]},"715":{"position":[[566,6],[742,6]]},"717":{"position":[[16,6],[444,6]]},"766":{"position":[[185,6]]},"768":{"position":[[1997,13],[2701,10],[3302,6]]},"777":{"position":[[1273,6],[1472,10],[1747,6]]},"865":{"position":[[357,6]]},"869":{"position":[[89,6]]},"877":{"position":[[1036,6],[1108,6],[2755,10]]}}}],["access_token",{"_index":2215,"t":{"475":{"position":[[705,12],[734,14],[866,14]]}}}],["accesskubectl",{"_index":2049,"t":{"396":{"position":[[756,13]]}}}],["accessmod",{"_index":1218,"t":{"171":{"position":[[606,12]]},"189":{"position":[[2594,12]]}}}],["accident",{"_index":2575,"t":{"678":{"position":[[404,12]]}}}],["accord",{"_index":2553,"t":{"674":{"position":[[0,9]]},"777":{"position":[[1043,9]]}}}],["accordingli",{"_index":1393,"t":{"208":{"position":[[2679,12]]}}}],["account",{"_index":280,"t":{"18":{"position":[[524,7],[605,8]]},"31":{"position":[[25,7],[182,7]]},"159":{"position":[[650,8]]},"208":{"position":[[357,8],[716,7],[1390,9],[1908,8],[2138,7]]},"243":{"position":[[245,7]]},"290":{"position":[[475,8],[869,7]]},"322":{"position":[[808,8]]},"330":{"position":[[1342,8]]},"371":{"position":[[815,8]]},"407":{"position":[[254,7]]},"439":{"position":[[236,8],[617,8]]},"451":{"position":[[25,7],[182,7]]},"470":{"position":[[92,7],[220,8]]},"475":{"position":[[25,7]]},"477":{"position":[[10,7]]},"479":{"position":[[16,7]]},"517":{"position":[[411,8]]},"519":{"position":[[102,8],[130,7],[155,7],[391,7],[499,8]]},"521":{"position":[[447,8]]},"523":{"position":[[408,8]]},"535":{"position":[[233,7]]}}}],["accumsan",{"_index":107,"t":{"2":{"position":[[1551,9],[2302,8]]},"4":{"position":[[1551,9],[2302,8]]}}}],["achiev",{"_index":2320,"t":{"527":{"position":[[995,8],[1592,7]]}}}],["action",{"_index":887,"t":{"99":{"position":[[115,7]]},"141":{"position":[[666,7]]},"247":{"position":[[15,7]]},"296":{"position":[[770,7]]},"357":{"position":[[776,7]]},"371":{"position":[[2003,7]]},"435":{"position":[[694,7]]},"489":{"position":[[76,7]]},"633":{"position":[[52,6]]},"791":{"position":[[7,7],[161,7]]},"865":{"position":[[73,7],[180,7],[232,7],[290,7]]},"869":{"position":[[286,7],[311,7],[712,7],[837,7],[935,7]]},"871":{"position":[[344,7],[369,7]]},"873":{"position":[[23,6]]},"875":{"position":[[15,7]]},"877":{"position":[[207,7],[2022,8]]},"879":{"position":[[76,7],[163,7],[181,6],[321,7]]}}}],["actions.github.io/openshift",{"_index":2932,"t":{"865":{"position":[[262,27]]}}}],["actions/buildah",{"_index":3014,"t":{"877":{"position":[[3602,15],[3674,15],[3819,15]]}}}],["actions/checkout@v2",{"_index":3006,"t":{"877":{"position":[[3347,19]]}}}],["actions/oc",{"_index":2990,"t":{"877":{"position":[[2548,10],[4819,10],[4884,10]]}}}],["actions/openshift",{"_index":3044,"t":{"879":{"position":[[58,17],[145,17],[303,17]]}}}],["actions/push",{"_index":3019,"t":{"877":{"position":[[3995,12],[4086,12]]}}}],["actions/spr",{"_index":2980,"t":{"877":{"position":[[1887,14]]}}}],["activ",{"_index":256,"t":{"18":{"position":[[125,10]]},"157":{"position":[[1315,8]]},"208":{"position":[[402,8]]},"381":{"position":[[718,6]]},"394":{"position":[[74,8]]},"501":{"position":[[67,8]]},"694":{"position":[[100,7]]},"698":{"position":[[155,10]]},"768":{"position":[[623,8]]},"869":{"position":[[989,6]]}}}],["activation='relu",{"_index":775,"t":{"86":{"position":[[231,18]]}}}],["activation='softmax')])model.compile(optimizer='adam",{"_index":778,"t":{"86":{"position":[[296,54]]}}}],["actual",{"_index":2386,"t":{"543":{"position":[[1967,6]]}}}],["acycl",{"_index":2868,"t":{"793":{"position":[[102,7]]}}}],["ad",{"_index":1222,"t":{"175":{"position":[[167,6]]},"208":{"position":[[2149,6]]},"226":{"position":[[66,5]]},"276":{"position":[[120,6]]},"369":{"position":[[402,6]]},"543":{"position":[[505,6]]},"873":{"position":[[163,5]]}}}],["adapt",{"_index":2149,"t":{"445":{"position":[[143,5]]},"678":{"position":[[832,5]]}}}],["add",{"_index":621,"t":{"74":{"position":[[477,3]]},"97":{"position":[[474,3]]},"111":{"position":[[10,4],[35,3],[271,3]]},"141":{"position":[[142,3],[527,3],[809,3]]},"151":{"position":[[102,3],[117,3],[811,3]]},"189":{"position":[[80,3],[123,4],[176,3]]},"191":{"position":[[1929,3],[2276,3]]},"193":{"position":[[8,3]]},"258":{"position":[[0,3],[52,3]]},"296":{"position":[[907,3]]},"320":{"position":[[184,4],[209,3]]},"330":{"position":[[701,5]]},"336":{"position":[[178,3]]},"345":{"position":[[118,4]]},"353":{"position":[[104,3]]},"355":{"position":[[12,3]]},"363":{"position":[[286,3],[565,3],[1159,3]]},"373":{"position":[[648,3]]},"394":{"position":[[353,3]]},"427":{"position":[[368,3]]},"435":{"position":[[6,4],[761,3]]},"458":{"position":[[282,3]]},"466":{"position":[[88,3],[168,3]]},"470":{"position":[[229,3],[263,3]]},"598":{"position":[[1106,3]]},"604":{"position":[[65,3]]},"633":{"position":[[90,3],[179,3],[219,3]]},"666":{"position":[[74,3]]},"686":{"position":[[781,3]]},"690":{"position":[[116,3],[309,3],[323,3],[480,3]]},"694":{"position":[[282,3]]},"706":{"position":[[351,3],[550,4],[575,3]]},"719":{"position":[[419,3],[466,3]]},"776":{"position":[[189,3]]},"826":{"position":[[49,3],[91,3]]},"865":{"position":[[218,3]]},"877":{"position":[[1245,3],[1476,3]]}}}],["addgroup",{"_index":1994,"t":{"381":{"position":[[362,8]]}}}],["addit",{"_index":400,"t":{"35":{"position":[[417,10]]},"82":{"position":[[739,10]]},"155":{"position":[[1219,10]]},"527":{"position":[[195,10]]},"535":{"position":[[328,10]]},"604":{"position":[[89,10]]},"719":{"position":[[895,10]]}}}],["addition",{"_index":1723,"t":{"302":{"position":[[156,12]]},"417":{"position":[[251,13]]}}}],["address",{"_index":2160,"t":{"453":{"position":[[517,7]]},"521":{"position":[[393,8]]},"523":{"position":[[354,8]]},"764":{"position":[[186,7]]},"768":{"position":[[2838,9],[3366,10]]}}}],["addus",{"_index":1996,"t":{"381":{"position":[[384,7]]}}}],["adequ",{"_index":2599,"t":{"686":{"position":[[1198,8]]}}}],["adipisc",{"_index":6,"t":{"2":{"position":[[40,10],[192,10]]},"4":{"position":[[40,10],[192,10]]}}}],["admin",{"_index":660,"t":{"76":{"position":[[1003,5],[1022,5]]},"208":{"position":[[710,5],[906,6],[1168,5]]},"294":{"position":[[312,6],[1124,5]]},"300":{"position":[[14,6]]},"385":{"position":[[89,5]]},"396":{"position":[[505,5],[660,6]]},"407":{"position":[[312,5]]},"466":{"position":[[430,5]]},"598":{"position":[[69,5]]},"690":{"position":[[154,6]]},"704":{"position":[[754,5]]}}}],["admin:org",{"_index":2936,"t":{"865":{"position":[[664,9]]}}}],["admin_access",{"_index":1351,"t":{"206":{"position":[[1174,13]]}}}],["admin_password=mypassword",{"_index":2422,"t":{"592":{"position":[[131,25]]}}}],["admin_us",{"_index":1338,"t":{"206":{"position":[[227,11]]},"208":{"position":[[495,11],[630,11],[1153,12]]}}}],["administr",{"_index":438,"t":{"39":{"position":[[193,13]]},"41":{"position":[[232,13]]},"53":{"position":[[452,13]]},"57":{"position":[[637,13]]},"70":{"position":[[580,13]]},"113":{"position":[[14,13]]},"155":{"position":[[945,13]]},"165":{"position":[[796,13]]},"189":{"position":[[712,13]]},"204":{"position":[[242,13]]},"318":{"position":[[591,13]]},"328":{"position":[[450,13]]},"375":{"position":[[183,14]]},"495":{"position":[[1058,13]]},"598":{"position":[[863,13]]},"631":{"position":[[14,13]]},"635":{"position":[[14,13]]},"686":{"position":[[1290,14]]},"690":{"position":[[810,13]]},"700":{"position":[[9,14],[148,14]]}}}],["adminoc",{"_index":2015,"t":{"385":{"position":[[142,7]]}}}],["adminus",{"_index":2040,"t":{"396":{"position":[[244,9],[359,9]]}}}],["adopt",{"_index":2684,"t":{"739":{"position":[[22,7]]}}}],["advanc",{"_index":272,"t":{"18":{"position":[[424,7]]},"128":{"position":[[12,8]]},"191":{"position":[[22,9]]},"284":{"position":[[293,7]]},"394":{"position":[[429,8]]},"545":{"position":[[22,10]]},"668":{"position":[[445,7],[493,7]]},"690":{"position":[[720,8]]}}}],["aenean",{"_index":121,"t":{"2":{"position":[[1871,6]]},"4":{"position":[[1871,6]]}}}],["afni",{"_index":2447,"t":{"602":{"position":[[45,5],[170,4]]}}}],["again",{"_index":2058,"t":{"396":{"position":[[1246,5]]},"624":{"position":[[0,6]]}}}],["against",{"_index":2315,"t":{"527":{"position":[[135,7]]}}}],["ai",{"_index":586,"t":{"65":{"position":[[255,2],[285,2]]}}}],["aid",{"_index":2558,"t":{"674":{"position":[[287,3]]}}}],["aim",{"_index":2738,"t":{"768":{"position":[[167,5]]}}}],["airflow",{"_index":579,"t":{"65":{"position":[[87,7]]},"774":{"position":[[24,7]]},"776":{"position":[[158,7],[200,7]]},"777":{"position":[[23,7],[299,7],[820,7],[1237,7],[1721,7]]},"779":{"position":[[251,7],[277,7],[502,10],[1042,11]]},"781":{"position":[[15,7]]},"783":{"position":[[54,7],[156,7],[211,7]]},"793":{"position":[[0,7],[138,7],[178,7]]},"801":{"position":[[120,7],[166,7]]},"807":{"position":[[162,7]]}}}],["airflow.contrib.operators.kubernetes_pod_oper",{"_index":2835,"t":{"779":{"position":[[340,49]]}}}],["airflow.operators.dummy_oper",{"_index":2837,"t":{"779":{"position":[[423,32]]}}}],["airflow/airflow",{"_index":2813,"t":{"777":{"position":[[314,15]]}}}],["airflow@example.com",{"_index":2842,"t":{"779":{"position":[[581,24]]}}}],["aka",{"_index":494,"t":{"49":{"position":[[149,5]]},"175":{"position":[[26,5]]},"487":{"position":[[682,5]]},"527":{"position":[[1130,5]]},"556":{"position":[[753,5]]},"768":{"position":[[1693,3]]},"793":{"position":[[82,4]]}}}],["algorithm",{"_index":2327,"t":{"527":{"position":[[1356,10]]},"743":{"position":[[115,9],[164,9]]}}}],["alia",{"_index":2028,"t":{"392":{"position":[[447,5]]}}}],["aliquam",{"_index":16,"t":{"2":{"position":[[133,7],[578,8],[1207,8],[2924,7]]},"4":{"position":[[133,7],[578,8],[1207,8],[2924,7]]}}}],["aliquet",{"_index":22,"t":{"2":{"position":[[244,7],[770,8],[1467,7]]},"4":{"position":[[244,7],[770,8],[1467,7]]}}}],["all,configmap,pvc,serviceaccount,rolebinding,secret,serviceinst",{"_index":2391,"t":{"551":{"position":[[197,67]]},"558":{"position":[[590,67]]}}}],["all,configmap,pvc,serviceaccount,secret,rolebinding,serviceinst",{"_index":2405,"t":{"558":{"position":[[1262,67]]}}}],["all,secret,configmap",{"_index":1772,"t":{"314":{"position":[[109,21]]}}}],["all,secret,configmaps,serviceaccount,rolebind",{"_index":524,"t":{"49":{"position":[[905,48]]},"547":{"position":[[528,48],[756,48]]},"562":{"position":[[154,48],[327,48]]}}}],["allegrograph",{"_index":1001,"t":{"135":{"position":[[0,13],[156,12]]}}}],["alloc",{"_index":735,"t":{"82":{"position":[[877,9]]},"167":{"position":[[849,9]]},"189":{"position":[[2144,9]]},"284":{"position":[[818,11],[853,10]]},"359":{"position":[[8,9]]},"770":{"position":[[365,11]]}}}],["allow",{"_index":646,"t":{"76":{"position":[[351,5]]},"111":{"position":[[193,5]]},"175":{"position":[[518,8]]},"177":{"position":[[167,6]]},"183":{"position":[[38,5]]},"193":{"position":[[130,5]]},"206":{"position":[[269,8]]},"243":{"position":[[392,8]]},"276":{"position":[[630,6]]},"400":{"position":[[134,5],[159,5],[193,5]]},"487":{"position":[[382,5]]},"543":{"position":[[1087,5]]},"558":{"position":[[200,5]]},"631":{"position":[[947,5]]},"704":{"position":[[258,6]]},"766":{"position":[[364,8]]},"768":{"position":[[305,6],[1900,6],[2146,8],[2317,6],[2555,5]]},"791":{"position":[[15,6]]}}}],["allow_us",{"_index":1364,"t":{"208":{"position":[[481,11],[616,11]]}}}],["allowed_us",{"_index":1337,"t":{"206":{"position":[[210,14]]},"208":{"position":[[1174,14]]}}}],["alon",{"_index":1336,"t":{"206":{"position":[[146,5]]}}}],["along",{"_index":1530,"t":{"274":{"position":[[2505,5]]},"766":{"position":[[174,5]]}}}],["alongsid",{"_index":1221,"t":{"175":{"position":[[129,9]]}}}],["alpin",{"_index":1096,"t":{"151":{"position":[[1586,6]]}}}],["alreadi",{"_index":575,"t":{"63":{"position":[[100,7]]},"74":{"position":[[205,7]]},"76":{"position":[[423,7]]},"149":{"position":[[259,7]]},"213":{"position":[[226,7],[269,7]]},"256":{"position":[[226,7],[269,7]]},"392":{"position":[[402,7]]},"417":{"position":[[948,7]]},"441":{"position":[[95,7]]},"479":{"position":[[69,7]]},"519":{"position":[[58,7]]},"527":{"position":[[580,7]]},"620":{"position":[[123,7]]},"631":{"position":[[1019,7]]},"647":{"position":[[423,7]]},"704":{"position":[[424,7]]},"877":{"position":[[4770,7]]}}}],["alter",{"_index":240,"t":{"16":{"position":[[934,7]]}}}],["altern",{"_index":967,"t":{"123":{"position":[[507,14]]},"375":{"position":[[745,13]]},"877":{"position":[[131,13]]}}}],["although",{"_index":1073,"t":{"151":{"position":[[658,9]]}}}],["alway",{"_index":243,"t":{"16":{"position":[[1018,6]]},"159":{"position":[[8,6]]},"169":{"position":[[264,6]]},"175":{"position":[[543,6]]},"678":{"position":[[17,6]]},"713":{"position":[[177,6]]}}}],["amazon",{"_index":2700,"t":{"753":{"position":[[26,7]]}}}],["amd",{"_index":2766,"t":{"768":{"position":[[1218,3]]}}}],["amd64.tar.gz",{"_index":2176,"t":{"458":{"position":[[195,13],[268,13]]}}}],["amd64/minishift",{"_index":2012,"t":{"381":{"position":[[836,15]]}}}],["amd64sudo",{"_index":2880,"t":{"817":{"position":[[105,9]]}}}],["amet",{"_index":4,"t":{"2":{"position":[[22,5],[174,5],[385,4],[536,4],[928,5],[1122,4],[1500,4],[2138,4],[2534,4]]},"4":{"position":[[22,5],[174,5],[385,4],[536,4],[928,5],[1122,4],[1500,4],[2138,4],[2534,4]]}}}],["amount",{"_index":184,"t":{"16":{"position":[[140,6]]},"35":{"position":[[483,6]]},"82":{"position":[[616,6]]},"527":{"position":[[349,6]]},"653":{"position":[[33,6]]},"715":{"position":[[537,6]]},"717":{"position":[[415,6]]},"723":{"position":[[4,6]]}}}],["anacondadepend",{"_index":1136,"t":{"157":{"position":[[1199,21]]},"499":{"position":[[677,21]]},"537":{"position":[[811,21]]}}}],["analysi",{"_index":526,"t":{"53":{"position":[[11,8]]},"373":{"position":[[497,8]]},"674":{"position":[[64,8]]},"770":{"position":[[553,9]]}}}],["analyt",{"_index":1758,"t":{"310":{"position":[[309,10]]},"743":{"position":[[191,9]]}}}],["analyz",{"_index":2583,"t":{"680":{"position":[[23,9]]},"737":{"position":[[23,9]]}}}],["anatomi",{"_index":1160,"t":{"163":{"position":[[20,7]]},"706":{"position":[[1570,7]]}}}],["and/or",{"_index":1402,"t":{"222":{"position":[[169,6]]},"232":{"position":[[203,6]]},"260":{"position":[[169,6]]},"620":{"position":[[33,6]]},"678":{"position":[[569,6]]},"770":{"position":[[236,6]]}}}],["annot",{"_index":1168,"t":{"165":{"position":[[437,12]]},"189":{"position":[[353,12]]},"369":{"position":[[477,12]]},"768":{"position":[[2346,8]]}}}],["anonym",{"_index":2048,"t":{"396":{"position":[[734,9],[811,9]]}}}],["anoth",{"_index":952,"t":{"119":{"position":[[135,7]]},"121":{"position":[[172,7]]},"123":{"position":[[143,7]]},"126":{"position":[[266,7]]},"128":{"position":[[321,7]]},"131":{"position":[[348,7]]},"133":{"position":[[885,7]]},"175":{"position":[[109,7]]}}}],["ansibl",{"_index":2513,"t":{"645":{"position":[[260,8],[302,8]]},"647":{"position":[[206,8],[734,7]]}}}],["ansible.html",{"_index":2531,"t":{"647":{"position":[[897,12]]}}}],["ant",{"_index":109,"t":{"2":{"position":[[1579,5],[1596,4],[1846,4],[2651,4]]},"4":{"position":[[1579,5],[1596,4],[1846,4],[2651,4]]}}}],["anyconnect",{"_index":1813,"t":{"336":{"position":[[542,12]]},"453":{"position":[[724,10],[774,11],[933,11],[1019,10],[1245,11]]}}}],["anymor",{"_index":1801,"t":{"336":{"position":[[134,8]]},"367":{"position":[[28,7],[225,8]]},"686":{"position":[[151,7]]},"715":{"position":[[40,7]]},"717":{"position":[[651,8]]}}}],["anyth",{"_index":226,"t":{"16":{"position":[[683,8]]},"70":{"position":[[70,9]]},"72":{"position":[[129,8]]},"155":{"position":[[115,8]]},"165":{"position":[[582,8]]},"189":{"position":[[498,8]]},"318":{"position":[[109,8]]},"385":{"position":[[118,8]]},"598":{"position":[[104,8]]},"704":{"position":[[0,8],[673,8],[781,8]]},"871":{"position":[[552,8]]}}}],["anyuid",{"_index":495,"t":{"49":{"position":[[155,7]]},"141":{"position":[[865,6]]},"177":{"position":[[115,6],[883,8]]},"189":{"position":[[3357,8]]},"296":{"position":[[963,6]]},"363":{"position":[[1148,6]]},"427":{"position":[[341,13],[977,6]]},"435":{"position":[[626,13],[785,6],[849,6]]},"470":{"position":[[205,6]]},"543":{"position":[[643,6],[963,6],[1352,6]]},"647":{"position":[[344,7]]},"777":{"position":[[1218,13]]}}}],["anywher",{"_index":2156,"t":{"453":{"position":[[380,9],[1794,9]]}}}],["apach",{"_index":485,"t":{"49":{"position":[[0,6],[217,6],[843,6]]},"582":{"position":[[47,6]]},"715":{"position":[[813,6]]},"776":{"position":[[193,6]]},"777":{"position":[[307,6]]},"807":{"position":[[155,6]]}}}],["apart",{"_index":1246,"t":{"177":{"position":[[582,5]]}}}],["api",{"_index":883,"t":{"97":{"position":[[739,3]]},"284":{"position":[[2057,3]]}}}],["apivers",{"_index":1066,"t":{"151":{"position":[[473,11],[1214,11]]},"169":{"position":[[602,11]]},"171":{"position":[[514,11]]},"173":{"position":[[55,11]]},"175":{"position":[[876,11]]},"185":{"position":[[281,11]]},"187":{"position":[[189,11]]},"189":{"position":[[2264,11],[2502,11],[2687,11],[2847,11],[3967,11],[4231,11]]},"191":{"position":[[600,11]]},"503":{"position":[[131,11]]},"543":{"position":[[327,10],[763,11]]},"847":{"position":[[176,11]]}}}],["app",{"_index":1208,"t":{"169":{"position":[[680,4]]},"171":{"position":[[575,4]]},"173":{"position":[[116,4]]},"175":{"position":[[937,4],[1204,4]]},"177":{"position":[[790,4]]},"185":{"position":[[342,4],[449,4]]},"187":{"position":[[250,4]]},"189":{"position":[[2342,4],[2563,4],[2748,4],[2908,4],[3169,4],[3264,4],[4028,4],[4135,4],[4292,4]]},"191":{"position":[[665,4]]},"208":{"position":[[2461,3],[2533,3]]},"302":{"position":[[300,4]]},"359":{"position":[[152,4]]},"361":{"position":[[586,3]]},"363":{"position":[[925,3],[941,3],[979,3],[1017,3],[1060,3],[1071,3],[1199,3]]},"371":{"position":[[120,3],[154,3]]},"427":{"position":[[13,3],[57,3],[103,3],[759,3],[1241,3],[1303,3]]},"433":{"position":[[395,3],[422,3]]},"435":{"position":[[496,3],[556,3]]},"495":{"position":[[348,4]]},"547":{"position":[[308,3]]},"592":{"position":[[7,3],[85,3],[124,3],[211,3]]},"614":{"position":[[447,3]]},"622":{"position":[[143,3]]},"719":{"position":[[1598,3]]},"877":{"position":[[957,3],[3045,4],[3385,3],[5148,3],[5279,4],[5310,3],[5585,3],[5646,3],[6351,3]]}}}],["app.kubernetes.io/instance=freesurf",{"_index":2437,"t":{"598":{"position":[[1463,37]]}}}],["app=::.apps.dsri2.unimaas.nl/hub/oauth_callback",{"_index":1389,"t":{"208":{"position":[[2352,46]]}}}],["nameserv",{"_index":1804,"t":{"336":{"position":[[197,12],[210,10],[233,10],[256,10]]}}}],["namespac",{"_index":1063,"t":{"151":{"position":[[371,9],[409,9]]},"262":{"position":[[274,11],[305,9]]},"264":{"position":[[90,9],[100,11],[147,11],[178,9]]},"266":{"position":[[196,11],[227,9],[271,9],[300,9]]},"407":{"position":[[45,10],[510,9]]},"495":{"position":[[617,10]]},"704":{"position":[[90,10]]},"779":{"position":[[204,9]]},"877":{"position":[[2833,9],[2887,10],[5035,10]]}}}],["namespace='changem",{"_index":2854,"t":{"779":{"position":[[927,21]]}}}],["namespace=::/mnt",{"_index":521,"t":{"49":{"position":[[817,14]]}}}],["pod_id>:: - + - + \ No newline at end of file diff --git a/ticket/index.html b/ticket/index.html index 8cb04b339..d52e5c950 100644 --- a/ticket/index.html +++ b/ticket/index.html @@ -16,13 +16,13 @@ - +
- + \ No newline at end of file diff --git a/training/index.html b/training/index.html index 2b4adb552..de54a0c33 100644 --- a/training/index.html +++ b/training/index.html @@ -16,13 +16,13 @@ - +

📚 Training

The DSRI offers a range of curated training courses to UM students, researchers and support staff at every career level.

Get started with the DSRI training

We organize training to cover essential data science skills needed to start using the DSRI. These include get access to the DSRI web UI, create a new data science application, write reproducible code and learn good programming practices. We also offer more advanced training such as GPU Programming, Parallel Programming and Deep Learning.

Our instrutors

All trainings are taught by Research Software Engineers and Data Scientist from the DSRI, in-house experts with in-depth technical skills, and extensive teaching experience. DSRI instructors are certified Carpentries Instructors, and members of the international Carpentries community.

Training materials

All training materials are collaboratively developed with the research community and are freely available.


Upcoming training

Do you want to get started with the DSRI? Contact us at dsri-support-l@maastrichtuniversity.nl to start preparing a training for your department.

- + \ No newline at end of file