diff --git a/404.html b/404.html index 4583a161..37e50c67 100644 --- a/404.html +++ b/404.html @@ -9,7 +9,7 @@
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/files/novnc-1da118a4e2d37f0457dc898b17df91f2.png b/assets/images/novnc-1da118a4e2d37f0457dc898b17df91f2.png similarity index 100% rename from assets/files/novnc-1da118a4e2d37f0457dc898b17df91f2.png rename to assets/images/novnc-1da118a4e2d37f0457dc898b17df91f2.png diff --git a/assets/files/spice-d6bb68d48d9cfe82dc7d9ccd389ab377.png b/assets/images/spice-d6bb68d48d9cfe82dc7d9ccd389ab377.png similarity index 100% rename from assets/files/spice-d6bb68d48d9cfe82dc7d9ccd389ab377.png rename to assets/images/spice-d6bb68d48d9cfe82dc7d9ccd389ab377.png diff --git a/assets/js/b15e96a2.25f985cf.js b/assets/js/b15e96a2.25f985cf.js deleted file mode 100644 index 543dc9c3..00000000 --- a/assets/js/b15e96a2.25f985cf.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkopenmetal_docs=self.webpackChunkopenmetal_docs||[]).push([[6288],{3905:(e,n,t)=>{t.d(n,{Zo:()=>c,kt:()=>k});var a=t(67294);function o(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(o[t]=e[t]);return o}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(o[t]=e[t])}return o}var s=a.createContext({}),p=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},c=function(e){var n=p(e.components);return a.createElement(s.Provider,{value:n},e.children)},u="mdxType",d={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},m=a.forwardRef((function(e,n){var t=e.components,o=e.mdxType,l=e.originalType,s=e.parentName,c=r(e,["components","mdxType","originalType","parentName"]),u=p(t),m=o,k=u["".concat(s,".").concat(m)]||u[m]||d[m]||l;return t?a.createElement(k,i(i({ref:n},c),{},{components:t})):a.createElement(k,i({ref:n},c))}));function k(e,n){var t=arguments,o=n&&n.mdxType;if("string"==typeof e||o){var l=t.length,i=new Array(l);i[0]=m;var r={};for(var s in n)hasOwnProperty.call(n,s)&&(r[s]=n[s]);r.originalType=e,r[u]="string"==typeof e?e:o,i[1]=r;for(var p=2;p{t.r(n),t.d(n,{assets:()=>s,contentTitle:()=>i,default:()=>u,frontMatter:()=>l,metadata:()=>r,toc:()=>p});var a=t(87462),o=(t(67294),t(3905));const l={},i="Comparison of OpenStack NoVNC and SPICE Console",r={unversionedId:"tutorials/openstack-consoles-explained",id:"tutorials/openstack-consoles-explained",title:"Comparison of OpenStack NoVNC and SPICE Console",description:"Author: Ramon Grullon",source:"@site/docs/tutorials/openstack-consoles-explained.md",sourceDirName:"tutorials",slug:"/tutorials/openstack-consoles-explained",permalink:"/docs/manuals/tutorials/openstack-consoles-explained",draft:!1,editUrl:"https://github.com/openmetalio/openmetal-docs/blob/main/docs/tutorials/openstack-consoles-explained.md",tags:[],version:"current",frontMatter:{},sidebar:"tutorialSidebar",previous:{title:"Manage Flavors in OpenStack",permalink:"/docs/manuals/tutorials/manage-flavors"},next:{title:"How to Handle a Failed OpenStack Control Plane Node",permalink:"/docs/manuals/tutorials/recover-from-failed-control-plane-node"}},s={},p=[{value:"Introduction",id:"introduction",level:2},{value:"Overview",id:"overview",level:2},{value:"NoVNC (HTML5-based VNC client)",id:"novnc-html5-based-vnc-client",level:3},{value:"SPICE (Simple Protocol for Independent Computing Environments)",id:"spice-simple-protocol-for-independent-computing-environments",level:3},{value:"Features Comparison",id:"features-comparison",level:2},{value:"NoVNC",id:"novnc",level:3},{value:"SPICE",id:"spice",level:3},{value:"Scenarios",id:"scenarios",level:2},{value:"NoVNC Usage",id:"novnc-usage",level:3},{value:"SPICE Usage",id:"spice-usage",level:3},{value:"Configuration in OpenStack",id:"configuration-in-openstack",level:2},{value:"NoVNC Configuration",id:"novnc-configuration",level:3},{value:"SPICE Configuration",id:"spice-configuration",level:3},{value:"Kolla Ansible method",id:"kolla-ansible-method",level:4},{value:"Manual Spice Configuration",id:"manual-spice-configuration",level:4},{value:"Summary",id:"summary",level:2},{value:"References",id:"references",level:2}],c={toc:p};function u(e){let{components:n,...l}=e;return(0,o.kt)("wrapper",(0,a.Z)({},c,l,{components:n,mdxType:"MDXLayout"}),(0,o.kt)("h1",{id:"comparison-of-openstack-novnc-and-spice-console"},"Comparison of OpenStack NoVNC and SPICE Console"),(0,o.kt)("p",null,"Author: Ramon Grullon"),(0,o.kt)("h2",{id:"introduction"},"Introduction"),(0,o.kt)("p",null,"OpenStack provides multiple options for accessing virtual machine consoles,\nincluding NoVNC (HTML5-based) and SPICE (Simple Protocol for Independent\nComputing Environments). This document compares these two console access methods\nin the context of OpenStack."),(0,o.kt)("h2",{id:"overview"},"Overview"),(0,o.kt)("h3",{id:"novnc-html5-based-vnc-client"},"NoVNC (HTML5-based VNC client)"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Technology: NoVNC uses HTML5 and WebSockets to provide a VNC client that runs\nentirely in the browser.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Browser Compatibility: Works on most modern web browsers without requiring\nadditional plugins.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Performance: Generally good performance for remote console access.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Configuration: Usually configured as the default console access method in OpenStack."))),(0,o.kt)("p",null,(0,o.kt)("a",{target:"_blank",href:t(39373).Z},"NoVNC")),(0,o.kt)("h3",{id:"spice-simple-protocol-for-independent-computing-environments"},"SPICE (Simple Protocol for Independent Computing Environments)"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Technology: SPICE is a protocol for remote computing environments. SPICE\nclients are available as standalone applications.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Client Installation: Requires a separate SPICE client to be installed on the\nuser's machine.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Performance: Generally known for high-performance remote display capabilities.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Integration: Integrated with QEMU/KVM hypervisors and provides additional\nfeatures like audio and video streaming."))),(0,o.kt)("p",null,(0,o.kt)("a",{target:"_blank",href:t(73576).Z},"Spice")),(0,o.kt)("h2",{id:"features-comparison"},"Features Comparison"),(0,o.kt)("h3",{id:"novnc"},"NoVNC"),(0,o.kt)("p",null,"Pros:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Platform Independence: Works on various platforms without requiring client installation.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Browser Compatibility: Runs in most modern web browsers.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Easy Integration: Default console access method in many OpenStack deployments."))),(0,o.kt)("p",null,"Cons:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"Potential Performance: May experience lower performance compared to SPICE in\ncertain scenarios.")),(0,o.kt)("h3",{id:"spice"},"SPICE"),(0,o.kt)("p",null,"Pros:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"High Performance: Known for delivering high-performance remote display capabilities.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Multimedia Support: Supports audio and video streaming in addition to console access.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Integration: Integrated with QEMU/KVM hypervisors."))),(0,o.kt)("p",null,"Cons:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Client Installation: Requires users to install a separate SPICE client on\ntheir machines.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Limited Browser Support: Requires a standalone client and is not as\nbrowser-friendly as NoVNC."))),(0,o.kt)("h2",{id:"scenarios"},"Scenarios"),(0,o.kt)("h3",{id:"novnc-usage"},"NoVNC Usage"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Suitability:")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Well-suited for users who need quick and easy console access without additional\nclient installations.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Suitable for scenarios where platform independence and browser compatibility\nare crucial."))),(0,o.kt)("h3",{id:"spice-usage"},"SPICE Usage"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Suitability:")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Ideal for users who prioritize high-performance remote display capabilities.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Suitable for multimedia applications and scenarios where advanced features\nlike audio and video streaming are required."))),(0,o.kt)("h2",{id:"configuration-in-openstack"},"Configuration in OpenStack"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"Easy Integration: Default console access method in many OpenStack deployments.")),(0,o.kt)("h3",{id:"novnc-configuration"},"NoVNC Configuration"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Typically configured as the default console access method in OpenStack.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Configured in the Horizon dashboard."))),(0,o.kt)("h3",{id:"spice-configuration"},"SPICE Configuration"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Configuration:")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Requires additional configuration in OpenStack.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Configured in the Horizon dashboard, but users need to ensure SPICE support in\nhypervisors."))),(0,o.kt)("h4",{id:"kolla-ansible-method"},"Kolla Ansible method"),(0,o.kt)("p",null,"Requires using kolla-ansible to deploy Spice support and reconfiguring Nova"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"Edit your /etc/kolla/globals.yaml file for kolla-ansible")),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-bash"},'# Nova\n\nenable_nova: true\nnova_console: "spice"\n\n')),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"Edit your ",(0,o.kt)("inlineCode",{parentName:"li"},"/etc/kolla/nova-compute/nova.conf")," and\n",(0,o.kt)("inlineCode",{parentName:"li"},"/etc/kolla/nova-spicehtml5proxy/nova.conf")," file on each compute hosts to\ninclude the following in the spice section.")),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-bash"},"agent_enabled = true\n\n")),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"Restart both nova-compute and nova-spicehtml5proxy containers")),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-bash"},"docker restart nova_compute nova_spicehtml5proxy\n")),(0,o.kt)("h4",{id:"manual-spice-configuration"},"Manual Spice Configuration"),(0,o.kt)("p",null,"To enable the SPICE console service, you must configure both the\nnova-spicehtml5proxy service and the nova-compute service. Most options are\ndefined in the spice group."),(0,o.kt)("p",null,"Further reading on process to enable Spice manually"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("a",{parentName:"li",href:"https://docs.openstack.org/nova/latest/admin/remote-console-access.html#spice-console"},"https://docs.openstack.org/nova/latest/admin/remote-console-access.html#spice-console"))),(0,o.kt)("h2",{id:"summary"},"Summary"),(0,o.kt)("p",null,"Choosing between NoVNC and SPICE depends on specific use cases, user preferences,\nand the desired level of performance. While NoVNC offers platform independence\nand ease of use, SPICE is known for its high-performance capabilities and\nmultimedia support."),(0,o.kt)("p",null,"Both options provide reliable console access, and the choice may be influenced\nby factors such as the user environment, desired features, and performance\nrequirements. Consider the needs of your users and the nature of your virtualized\nenvironment when selecting the most suitable console access method."),(0,o.kt)("h2",{id:"references"},"References"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("a",{parentName:"li",href:"https://docs.openstack.org/nova/latest/admin/remote-console-access.html"},"https://docs.openstack.org/nova/latest/admin/remote-console-access.html"))))}u.isMDXComponent=!0},39373:(e,n,t)=>{t.d(n,{Z:()=>a});const a=t.p+"assets/files/novnc-1da118a4e2d37f0457dc898b17df91f2.png"},73576:(e,n,t)=>{t.d(n,{Z:()=>a});const a=t.p+"assets/files/spice-d6bb68d48d9cfe82dc7d9ccd389ab377.png"}}]); \ No newline at end of file diff --git a/assets/js/b15e96a2.353d7962.js b/assets/js/b15e96a2.353d7962.js new file mode 100644 index 00000000..e11f7317 --- /dev/null +++ b/assets/js/b15e96a2.353d7962.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkopenmetal_docs=self.webpackChunkopenmetal_docs||[]).push([[6288],{3905:(e,n,t)=>{t.d(n,{Zo:()=>c,kt:()=>k});var a=t(67294);function o(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function i(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function l(e){for(var n=1;n=0||(o[t]=e[t]);return o}(e,n);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(o[t]=e[t])}return o}var s=a.createContext({}),p=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):l(l({},n),e)),t},c=function(e){var n=p(e.components);return a.createElement(s.Provider,{value:n},e.children)},u="mdxType",d={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},m=a.forwardRef((function(e,n){var t=e.components,o=e.mdxType,i=e.originalType,s=e.parentName,c=r(e,["components","mdxType","originalType","parentName"]),u=p(t),m=o,k=u["".concat(s,".").concat(m)]||u[m]||d[m]||i;return t?a.createElement(k,l(l({ref:n},c),{},{components:t})):a.createElement(k,l({ref:n},c))}));function k(e,n){var t=arguments,o=n&&n.mdxType;if("string"==typeof e||o){var i=t.length,l=new Array(i);l[0]=m;var r={};for(var s in n)hasOwnProperty.call(n,s)&&(r[s]=n[s]);r.originalType=e,r[u]="string"==typeof e?e:o,l[1]=r;for(var p=2;p{t.r(n),t.d(n,{assets:()=>s,contentTitle:()=>l,default:()=>u,frontMatter:()=>i,metadata:()=>r,toc:()=>p});var a=t(87462),o=(t(67294),t(3905));const i={},l="Comparison of OpenStack NoVNC and SPICE Console",r={unversionedId:"tutorials/openstack-consoles-explained",id:"tutorials/openstack-consoles-explained",title:"Comparison of OpenStack NoVNC and SPICE Console",description:"Author: Ramon Grullon",source:"@site/docs/tutorials/openstack-consoles-explained.md",sourceDirName:"tutorials",slug:"/tutorials/openstack-consoles-explained",permalink:"/docs/manuals/tutorials/openstack-consoles-explained",draft:!1,editUrl:"https://github.com/openmetalio/openmetal-docs/blob/main/docs/tutorials/openstack-consoles-explained.md",tags:[],version:"current",frontMatter:{},sidebar:"tutorialSidebar",previous:{title:"Manage Flavors in OpenStack",permalink:"/docs/manuals/tutorials/manage-flavors"},next:{title:"How to Handle a Failed OpenStack Control Plane Node",permalink:"/docs/manuals/tutorials/recover-from-failed-control-plane-node"}},s={},p=[{value:"Introduction",id:"introduction",level:2},{value:"Overview",id:"overview",level:2},{value:"NoVNC (HTML5-based VNC client)",id:"novnc-html5-based-vnc-client",level:3},{value:"SPICE (Simple Protocol for Independent Computing Environments)",id:"spice-simple-protocol-for-independent-computing-environments",level:3},{value:"Features Comparison",id:"features-comparison",level:2},{value:"NoVNC",id:"novnc",level:3},{value:"SPICE",id:"spice",level:3},{value:"Scenarios",id:"scenarios",level:2},{value:"NoVNC Usage",id:"novnc-usage",level:3},{value:"SPICE Usage",id:"spice-usage",level:3},{value:"Configuration in OpenStack",id:"configuration-in-openstack",level:2},{value:"NoVNC Configuration",id:"novnc-configuration",level:3},{value:"SPICE Configuration",id:"spice-configuration",level:3},{value:"Kolla Ansible method",id:"kolla-ansible-method",level:4},{value:"Manual Spice Configuration",id:"manual-spice-configuration",level:4},{value:"Summary",id:"summary",level:2},{value:"References",id:"references",level:2}],c={toc:p};function u(e){let{components:n,...i}=e;return(0,o.kt)("wrapper",(0,a.Z)({},c,i,{components:n,mdxType:"MDXLayout"}),(0,o.kt)("h1",{id:"comparison-of-openstack-novnc-and-spice-console"},"Comparison of OpenStack NoVNC and SPICE Console"),(0,o.kt)("p",null,"Author: Ramon Grullon"),(0,o.kt)("h2",{id:"introduction"},"Introduction"),(0,o.kt)("p",null,"OpenStack provides multiple options for accessing virtual machine consoles,\nincluding NoVNC (HTML5-based) and SPICE (Simple Protocol for Independent\nComputing Environments). This document compares these two console access methods\nin the context of OpenStack."),(0,o.kt)("h2",{id:"overview"},"Overview"),(0,o.kt)("h3",{id:"novnc-html5-based-vnc-client"},"NoVNC (HTML5-based VNC client)"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Technology: NoVNC uses HTML5 and WebSockets to provide a VNC client that runs\nentirely in the browser.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Browser Compatibility: Works on most modern web browsers without requiring\nadditional plugins.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Performance: Generally good performance for remote console access.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Configuration: Usually configured as the default console access method in OpenStack."))),(0,o.kt)("p",null,(0,o.kt)("img",{alt:"NoVNC",src:t(72814).Z,width:"971",height:"579"})),(0,o.kt)("h3",{id:"spice-simple-protocol-for-independent-computing-environments"},"SPICE (Simple Protocol for Independent Computing Environments)"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Technology: SPICE is a protocol for remote computing environments. SPICE\nclients are available as standalone applications.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Client Installation: Requires a separate SPICE client to be installed on the\nuser's machine.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Performance: Generally known for high-performance remote display capabilities.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Integration: Integrated with QEMU/KVM hypervisors and provides additional\nfeatures like audio and video streaming."))),(0,o.kt)("p",null,(0,o.kt)("img",{alt:"Spice",src:t(52136).Z,width:"1042",height:"728"})),(0,o.kt)("h2",{id:"features-comparison"},"Features Comparison"),(0,o.kt)("h3",{id:"novnc"},"NoVNC"),(0,o.kt)("p",null,"Pros:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Platform Independence: Works on various platforms without requiring client installation.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Browser Compatibility: Runs in most modern web browsers.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Easy Integration: Default console access method in many OpenStack deployments."))),(0,o.kt)("p",null,"Cons:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"Potential Performance: May experience lower performance compared to SPICE in\ncertain scenarios.")),(0,o.kt)("h3",{id:"spice"},"SPICE"),(0,o.kt)("p",null,"Pros:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"High Performance: Known for delivering high-performance remote display capabilities.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Multimedia Support: Supports audio and video streaming in addition to console access.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Integration: Integrated with QEMU/KVM hypervisors."))),(0,o.kt)("p",null,"Cons:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Client Installation: Requires users to install a separate SPICE client on\ntheir machines.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Limited Browser Support: Requires a standalone client and is not as\nbrowser-friendly as NoVNC."))),(0,o.kt)("h2",{id:"scenarios"},"Scenarios"),(0,o.kt)("h3",{id:"novnc-usage"},"NoVNC Usage"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Suitability:")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Well-suited for users who need quick and easy console access without additional\nclient installations.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Suitable for scenarios where platform independence and browser compatibility\nare crucial."))),(0,o.kt)("h3",{id:"spice-usage"},"SPICE Usage"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Suitability:")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Ideal for users who prioritize high-performance remote display capabilities.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Suitable for multimedia applications and scenarios where advanced features\nlike audio and video streaming are required."))),(0,o.kt)("h2",{id:"configuration-in-openstack"},"Configuration in OpenStack"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"Easy Integration: Default console access method in many OpenStack deployments.")),(0,o.kt)("h3",{id:"novnc-configuration"},"NoVNC Configuration"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Typically configured as the default console access method in OpenStack.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Configured in the Horizon dashboard."))),(0,o.kt)("h3",{id:"spice-configuration"},"SPICE Configuration"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Configuration:")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Requires additional configuration in OpenStack.")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("p",{parentName:"li"},"Configured in the Horizon dashboard, but users need to ensure SPICE support in\nhypervisors."))),(0,o.kt)("h4",{id:"kolla-ansible-method"},"Kolla Ansible method"),(0,o.kt)("p",null,"Requires using kolla-ansible to deploy Spice support and reconfiguring Nova"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"Edit your /etc/kolla/globals.yaml file for kolla-ansible")),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-bash"},'# Nova\n\nenable_nova: true\nnova_console: "spice"\n\n')),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"Edit your ",(0,o.kt)("inlineCode",{parentName:"li"},"/etc/kolla/nova-compute/nova.conf")," and\n",(0,o.kt)("inlineCode",{parentName:"li"},"/etc/kolla/nova-spicehtml5proxy/nova.conf")," file on each compute hosts to\ninclude the following in the spice section.")),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-bash"},"agent_enabled = true\n\n")),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"Restart both nova-compute and nova-spicehtml5proxy containers")),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-bash"},"docker restart nova_compute nova_spicehtml5proxy\n")),(0,o.kt)("h4",{id:"manual-spice-configuration"},"Manual Spice Configuration"),(0,o.kt)("p",null,"To enable the SPICE console service, you must configure both the\nnova-spicehtml5proxy service and the nova-compute service. Most options are\ndefined in the spice group."),(0,o.kt)("p",null,"Further reading on process to enable Spice manually"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("a",{parentName:"li",href:"https://docs.openstack.org/nova/latest/admin/remote-console-access.html#spice-console"},"https://docs.openstack.org/nova/latest/admin/remote-console-access.html#spice-console"))),(0,o.kt)("h2",{id:"summary"},"Summary"),(0,o.kt)("p",null,"Choosing between NoVNC and SPICE depends on specific use cases, user preferences,\nand the desired level of performance. While NoVNC offers platform independence\nand ease of use, SPICE is known for its high-performance capabilities and\nmultimedia support."),(0,o.kt)("p",null,"Both options provide reliable console access, and the choice may be influenced\nby factors such as the user environment, desired features, and performance\nrequirements. Consider the needs of your users and the nature of your virtualized\nenvironment when selecting the most suitable console access method."),(0,o.kt)("h2",{id:"references"},"References"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("a",{parentName:"li",href:"https://docs.openstack.org/nova/latest/admin/remote-console-access.html"},"https://docs.openstack.org/nova/latest/admin/remote-console-access.html"))))}u.isMDXComponent=!0},72814:(e,n,t)=>{t.d(n,{Z:()=>a});const a=t.p+"assets/images/novnc-1da118a4e2d37f0457dc898b17df91f2.png"},52136:(e,n,t)=>{t.d(n,{Z:()=>a});const a=t.p+"assets/images/spice-d6bb68d48d9cfe82dc7d9ccd389ab377.png"}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.dc8f7f8b.js b/assets/js/runtime~main.b2d2c518.js similarity index 99% rename from assets/js/runtime~main.dc8f7f8b.js rename to assets/js/runtime~main.b2d2c518.js index 07ebcbc0..9ae628cb 100644 --- a/assets/js/runtime~main.dc8f7f8b.js +++ b/assets/js/runtime~main.b2d2c518.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,c,f,d,b={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var c=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(c.exports,c,c.exports,r),c.loaded=!0,c.exports}r.m=b,r.c=t,e=[],r.O=(a,c,f,d)=>{if(!c){var b=1/0;for(i=0;i=d)&&Object.keys(r.O).every((e=>r.O[e](c[o])))?c.splice(o--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[c,f,d]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},c=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var d=Object.create(null);r.r(d);var b={};a=a||[null,c({}),c([]),c(c)];for(var t=2&f&&e;"object"==typeof t&&!~a.indexOf(t);t=c(t))Object.getOwnPropertyNames(t).forEach((a=>b[a]=()=>e[a]));return b.default=()=>e,r.d(d,b),d},r.d=(e,a)=>{for(var c in a)r.o(a,c)&&!r.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:a[c]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,c)=>(r.f[c](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"935f2afb",147:"30c007f1",161:"b4c82ce0",321:"c7f56b65",628:"a7b8d390",630:"d3270222",673:"a0ae390e",862:"28b2cf7b",1007:"ce55802d",1053:"b76d92a8",1084:"99cd8e62",1147:"66e3f2b3",1832:"4675f6be",1865:"0bd3c87f",1988:"65268cae",2307:"e729188c",2314:"2be4ac04",2315:"46565dd9",2320:"cd477820",2570:"741a7b45",2686:"18a76523",2689:"100d2314",2786:"cfb89daf",3083:"0aae2b90",3087:"e657abd0",3141:"90b51b29",3201:"dcc40c71",3240:"622d963d",3250:"72d1692d",3332:"a92b3547",3374:"d31315ab",3443:"43dac92d",3666:"a1ca90f5",3786:"f69e99b4",3967:"f8e7c4e7",4052:"b6d47cd3",4059:"98a766c2",4118:"b1cd55c9",4135:"0bc056f5",4195:"c4f5d8e4",4291:"49a3b1f9",4341:"66df924b",4745:"2832733a",4793:"9e626a79",4799:"e11557f0",4848:"f5713fa8",4863:"f3c8529e",5192:"5309ebfc",5403:"9c802b10",5481:"e27d8983",5669:"b7ea0039",5807:"71826adb",5872:"a587d0a5",5916:"70140af6",6048:"74a3b64c",6049:"16d18ee4",6059:"5262408b",6077:"a6ab7b9b",6191:"7288bbe5",6222:"a68b6c9e",6288:"b15e96a2",6564:"7cef7aa6",6740:"f4db4ea8",6793:"1062288c",6836:"fea1970e",6860:"43d142a7",7027:"de218cf0",7146:"4d86d70f",7283:"30f866cd",7483:"a0577689",7537:"0f4e5aa5",7717:"37c925e8",7768:"29f78988",7837:"01df2a46",7875:"c09d8478",7918:"17896441",8121:"81c4a8b4",8134:"85563ccb",8204:"5c6d2fda",8257:"23b7ec59",8267:"afa81f7b",8440:"755ef74b",8492:"a0d1ac5d",8542:"d9356ab2",8624:"ae952dfc",8693:"2456407f",8816:"5cddb846",8848:"3d48a6ef",8861:"0c109468",9029:"09f277c6",9198:"aaa9ec36",9243:"21aff34e",9338:"cd622106",9514:"1be78505",9739:"211c4be1",9827:"7827fc59"}[e]||e)+"."+{53:"1ef73e3e",147:"ca24b882",161:"7afb393a",321:"18e91b3e",628:"32794a00",630:"3bfda471",673:"4c273a72",862:"8b98ee9b",1007:"38c250b6",1053:"07b2de0f",1084:"58854d46",1147:"bcb94f43",1832:"5b1095cd",1865:"b06a175d",1988:"b2e525d1",2307:"975b2859",2314:"1d1c03c2",2315:"81f516c3",2320:"03cbf0f9",2570:"692efb11",2686:"e619f7e8",2689:"58e1b48d",2786:"ccdd6e08",3083:"7e503f5d",3087:"58c2d207",3141:"46f15501",3201:"5dea5b65",3240:"e66073e6",3250:"397a7a09",3332:"dc6b0e33",3374:"e9947fdc",3443:"d8137350",3666:"af42e6b8",3786:"f4637f28",3967:"19813a4a",4052:"6db3ecf2",4059:"44662055",4118:"9ddb8798",4135:"4bd0290c",4195:"ace7e85d",4291:"25c0b1ff",4341:"229619cc",4745:"6ff924f0",4793:"dd7507ee",4799:"912c0002",4848:"264157ef",4863:"5e4b2d72",4972:"420bdbf9",5192:"6b743d6d",5403:"ecf9f56c",5481:"962b515d",5669:"04b34487",5807:"41ab4304",5872:"f87324d9",5916:"1c0e8e41",6048:"7d02fd3d",6049:"a6efb79c",6059:"12ca0553",6077:"e58f2437",6191:"a787ab5c",6222:"352a01c0",6288:"25f985cf",6564:"595b789e",6740:"4ea7fc2c",6793:"430c6e39",6836:"e1570f60",6860:"d758604d",7027:"7225a84c",7146:"900741b2",7283:"6271cfac",7483:"53e7f08c",7537:"93720869",7717:"fc148403",7768:"667a590f",7837:"a158549d",7875:"d70b781a",7918:"3de0c338",8121:"5c51bfd4",8134:"10df6b47",8204:"bbff1d86",8257:"4ecf47b9",8267:"029af9f0",8440:"522e6a47",8492:"d43a07cd",8542:"06320cf4",8624:"fd0cbfef",8693:"0e52807f",8816:"00d7285a",8848:"34456bf3",8861:"bec527ac",9029:"db816dfb",9090:"1ca38501",9198:"8ad77c04",9243:"9eea8ece",9338:"0692c534",9514:"f89b479d",9739:"72d22709",9827:"371a0050"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),f={},d="openmetal-docs:",r.l=(e,a,c,b)=>{if(f[e])f[e].push(a);else{var t,o;if(void 0!==c)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var d=f[e];if(delete f[e],t.parentNode&&t.parentNode.removeChild(t),d&&d.forEach((e=>e(c))),a)return a(c)},s=setTimeout(u.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=u.bind(null,t.onerror),t.onload=u.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/docs/manuals/",r.gca=function(e){return e={17896441:"7918","935f2afb":"53","30c007f1":"147",b4c82ce0:"161",c7f56b65:"321",a7b8d390:"628",d3270222:"630",a0ae390e:"673","28b2cf7b":"862",ce55802d:"1007",b76d92a8:"1053","99cd8e62":"1084","66e3f2b3":"1147","4675f6be":"1832","0bd3c87f":"1865","65268cae":"1988",e729188c:"2307","2be4ac04":"2314","46565dd9":"2315",cd477820:"2320","741a7b45":"2570","18a76523":"2686","100d2314":"2689",cfb89daf:"2786","0aae2b90":"3083",e657abd0:"3087","90b51b29":"3141",dcc40c71:"3201","622d963d":"3240","72d1692d":"3250",a92b3547:"3332",d31315ab:"3374","43dac92d":"3443",a1ca90f5:"3666",f69e99b4:"3786",f8e7c4e7:"3967",b6d47cd3:"4052","98a766c2":"4059",b1cd55c9:"4118","0bc056f5":"4135",c4f5d8e4:"4195","49a3b1f9":"4291","66df924b":"4341","2832733a":"4745","9e626a79":"4793",e11557f0:"4799",f5713fa8:"4848",f3c8529e:"4863","5309ebfc":"5192","9c802b10":"5403",e27d8983:"5481",b7ea0039:"5669","71826adb":"5807",a587d0a5:"5872","70140af6":"5916","74a3b64c":"6048","16d18ee4":"6049","5262408b":"6059",a6ab7b9b:"6077","7288bbe5":"6191",a68b6c9e:"6222",b15e96a2:"6288","7cef7aa6":"6564",f4db4ea8:"6740","1062288c":"6793",fea1970e:"6836","43d142a7":"6860",de218cf0:"7027","4d86d70f":"7146","30f866cd":"7283",a0577689:"7483","0f4e5aa5":"7537","37c925e8":"7717","29f78988":"7768","01df2a46":"7837",c09d8478:"7875","81c4a8b4":"8121","85563ccb":"8134","5c6d2fda":"8204","23b7ec59":"8257",afa81f7b:"8267","755ef74b":"8440",a0d1ac5d:"8492",d9356ab2:"8542",ae952dfc:"8624","2456407f":"8693","5cddb846":"8816","3d48a6ef":"8848","0c109468":"8861","09f277c6":"9029",aaa9ec36:"9198","21aff34e":"9243",cd622106:"9338","1be78505":"9514","211c4be1":"9739","7827fc59":"9827"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,c)=>{var f=r.o(e,a)?e[a]:void 0;if(0!==f)if(f)c.push(f[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((c,d)=>f=e[a]=[c,d]));c.push(f[2]=d);var b=r.p+r.u(a),t=new Error;r.l(b,(c=>{if(r.o(e,a)&&(0!==(f=e[a])&&(e[a]=void 0),f)){var d=c&&("load"===c.type?"missing":c.type),b=c&&c.target&&c.target.src;t.message="Loading chunk "+a+" failed.\n("+d+": "+b+")",t.name="ChunkLoadError",t.type=d,t.request=b,f[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,c)=>{var f,d,b=c[0],t=c[1],o=c[2],n=0;if(b.some((a=>0!==e[a]))){for(f in t)r.o(t,f)&&(r.m[f]=t[f]);if(o)var i=o(r)}for(a&&a(c);n{"use strict";var e,a,c,f,d,b={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var c=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(c.exports,c,c.exports,r),c.loaded=!0,c.exports}r.m=b,r.c=t,e=[],r.O=(a,c,f,d)=>{if(!c){var b=1/0;for(i=0;i=d)&&Object.keys(r.O).every((e=>r.O[e](c[o])))?c.splice(o--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[c,f,d]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},c=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var d=Object.create(null);r.r(d);var b={};a=a||[null,c({}),c([]),c(c)];for(var t=2&f&&e;"object"==typeof t&&!~a.indexOf(t);t=c(t))Object.getOwnPropertyNames(t).forEach((a=>b[a]=()=>e[a]));return b.default=()=>e,r.d(d,b),d},r.d=(e,a)=>{for(var c in a)r.o(a,c)&&!r.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:a[c]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,c)=>(r.f[c](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"935f2afb",147:"30c007f1",161:"b4c82ce0",321:"c7f56b65",628:"a7b8d390",630:"d3270222",673:"a0ae390e",862:"28b2cf7b",1007:"ce55802d",1053:"b76d92a8",1084:"99cd8e62",1147:"66e3f2b3",1832:"4675f6be",1865:"0bd3c87f",1988:"65268cae",2307:"e729188c",2314:"2be4ac04",2315:"46565dd9",2320:"cd477820",2570:"741a7b45",2686:"18a76523",2689:"100d2314",2786:"cfb89daf",3083:"0aae2b90",3087:"e657abd0",3141:"90b51b29",3201:"dcc40c71",3240:"622d963d",3250:"72d1692d",3332:"a92b3547",3374:"d31315ab",3443:"43dac92d",3666:"a1ca90f5",3786:"f69e99b4",3967:"f8e7c4e7",4052:"b6d47cd3",4059:"98a766c2",4118:"b1cd55c9",4135:"0bc056f5",4195:"c4f5d8e4",4291:"49a3b1f9",4341:"66df924b",4745:"2832733a",4793:"9e626a79",4799:"e11557f0",4848:"f5713fa8",4863:"f3c8529e",5192:"5309ebfc",5403:"9c802b10",5481:"e27d8983",5669:"b7ea0039",5807:"71826adb",5872:"a587d0a5",5916:"70140af6",6048:"74a3b64c",6049:"16d18ee4",6059:"5262408b",6077:"a6ab7b9b",6191:"7288bbe5",6222:"a68b6c9e",6288:"b15e96a2",6564:"7cef7aa6",6740:"f4db4ea8",6793:"1062288c",6836:"fea1970e",6860:"43d142a7",7027:"de218cf0",7146:"4d86d70f",7283:"30f866cd",7483:"a0577689",7537:"0f4e5aa5",7717:"37c925e8",7768:"29f78988",7837:"01df2a46",7875:"c09d8478",7918:"17896441",8121:"81c4a8b4",8134:"85563ccb",8204:"5c6d2fda",8257:"23b7ec59",8267:"afa81f7b",8440:"755ef74b",8492:"a0d1ac5d",8542:"d9356ab2",8624:"ae952dfc",8693:"2456407f",8816:"5cddb846",8848:"3d48a6ef",8861:"0c109468",9029:"09f277c6",9198:"aaa9ec36",9243:"21aff34e",9338:"cd622106",9514:"1be78505",9739:"211c4be1",9827:"7827fc59"}[e]||e)+"."+{53:"1ef73e3e",147:"ca24b882",161:"7afb393a",321:"18e91b3e",628:"32794a00",630:"3bfda471",673:"4c273a72",862:"8b98ee9b",1007:"38c250b6",1053:"07b2de0f",1084:"58854d46",1147:"bcb94f43",1832:"5b1095cd",1865:"b06a175d",1988:"b2e525d1",2307:"975b2859",2314:"1d1c03c2",2315:"81f516c3",2320:"03cbf0f9",2570:"692efb11",2686:"e619f7e8",2689:"58e1b48d",2786:"ccdd6e08",3083:"7e503f5d",3087:"58c2d207",3141:"46f15501",3201:"5dea5b65",3240:"e66073e6",3250:"397a7a09",3332:"dc6b0e33",3374:"e9947fdc",3443:"d8137350",3666:"af42e6b8",3786:"f4637f28",3967:"19813a4a",4052:"6db3ecf2",4059:"44662055",4118:"9ddb8798",4135:"4bd0290c",4195:"ace7e85d",4291:"25c0b1ff",4341:"229619cc",4745:"6ff924f0",4793:"dd7507ee",4799:"912c0002",4848:"264157ef",4863:"5e4b2d72",4972:"420bdbf9",5192:"6b743d6d",5403:"ecf9f56c",5481:"962b515d",5669:"04b34487",5807:"41ab4304",5872:"f87324d9",5916:"1c0e8e41",6048:"7d02fd3d",6049:"a6efb79c",6059:"12ca0553",6077:"e58f2437",6191:"a787ab5c",6222:"352a01c0",6288:"353d7962",6564:"595b789e",6740:"4ea7fc2c",6793:"430c6e39",6836:"e1570f60",6860:"d758604d",7027:"7225a84c",7146:"900741b2",7283:"6271cfac",7483:"53e7f08c",7537:"93720869",7717:"fc148403",7768:"667a590f",7837:"a158549d",7875:"d70b781a",7918:"3de0c338",8121:"5c51bfd4",8134:"10df6b47",8204:"bbff1d86",8257:"4ecf47b9",8267:"029af9f0",8440:"522e6a47",8492:"d43a07cd",8542:"06320cf4",8624:"fd0cbfef",8693:"0e52807f",8816:"00d7285a",8848:"34456bf3",8861:"bec527ac",9029:"db816dfb",9090:"1ca38501",9198:"8ad77c04",9243:"9eea8ece",9338:"0692c534",9514:"f89b479d",9739:"72d22709",9827:"371a0050"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),f={},d="openmetal-docs:",r.l=(e,a,c,b)=>{if(f[e])f[e].push(a);else{var t,o;if(void 0!==c)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var d=f[e];if(delete f[e],t.parentNode&&t.parentNode.removeChild(t),d&&d.forEach((e=>e(c))),a)return a(c)},s=setTimeout(u.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=u.bind(null,t.onerror),t.onload=u.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/docs/manuals/",r.gca=function(e){return e={17896441:"7918","935f2afb":"53","30c007f1":"147",b4c82ce0:"161",c7f56b65:"321",a7b8d390:"628",d3270222:"630",a0ae390e:"673","28b2cf7b":"862",ce55802d:"1007",b76d92a8:"1053","99cd8e62":"1084","66e3f2b3":"1147","4675f6be":"1832","0bd3c87f":"1865","65268cae":"1988",e729188c:"2307","2be4ac04":"2314","46565dd9":"2315",cd477820:"2320","741a7b45":"2570","18a76523":"2686","100d2314":"2689",cfb89daf:"2786","0aae2b90":"3083",e657abd0:"3087","90b51b29":"3141",dcc40c71:"3201","622d963d":"3240","72d1692d":"3250",a92b3547:"3332",d31315ab:"3374","43dac92d":"3443",a1ca90f5:"3666",f69e99b4:"3786",f8e7c4e7:"3967",b6d47cd3:"4052","98a766c2":"4059",b1cd55c9:"4118","0bc056f5":"4135",c4f5d8e4:"4195","49a3b1f9":"4291","66df924b":"4341","2832733a":"4745","9e626a79":"4793",e11557f0:"4799",f5713fa8:"4848",f3c8529e:"4863","5309ebfc":"5192","9c802b10":"5403",e27d8983:"5481",b7ea0039:"5669","71826adb":"5807",a587d0a5:"5872","70140af6":"5916","74a3b64c":"6048","16d18ee4":"6049","5262408b":"6059",a6ab7b9b:"6077","7288bbe5":"6191",a68b6c9e:"6222",b15e96a2:"6288","7cef7aa6":"6564",f4db4ea8:"6740","1062288c":"6793",fea1970e:"6836","43d142a7":"6860",de218cf0:"7027","4d86d70f":"7146","30f866cd":"7283",a0577689:"7483","0f4e5aa5":"7537","37c925e8":"7717","29f78988":"7768","01df2a46":"7837",c09d8478:"7875","81c4a8b4":"8121","85563ccb":"8134","5c6d2fda":"8204","23b7ec59":"8257",afa81f7b:"8267","755ef74b":"8440",a0d1ac5d:"8492",d9356ab2:"8542",ae952dfc:"8624","2456407f":"8693","5cddb846":"8816","3d48a6ef":"8848","0c109468":"8861","09f277c6":"9029",aaa9ec36:"9198","21aff34e":"9243",cd622106:"9338","1be78505":"9514","211c4be1":"9739","7827fc59":"9827"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,c)=>{var f=r.o(e,a)?e[a]:void 0;if(0!==f)if(f)c.push(f[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((c,d)=>f=e[a]=[c,d]));c.push(f[2]=d);var b=r.p+r.u(a),t=new Error;r.l(b,(c=>{if(r.o(e,a)&&(0!==(f=e[a])&&(e[a]=void 0),f)){var d=c&&("load"===c.type?"missing":c.type),b=c&&c.target&&c.target.src;t.message="Loading chunk "+a+" failed.\n("+d+": "+b+")",t.name="ChunkLoadError",t.type=d,t.request=b,f[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,c)=>{var f,d,b=c[0],t=c[1],o=c[2],n=0;if(b.some((a=>0!==e[a]))){for(f in t)r.o(t,f)&&(r.m[f]=t[f]);if(o)var i=o(r)}for(a&&a(c);n

Instructions

First, you will want to navigate to the Assets page for the cloud with the node you wish to access.

image

Next, select the menu () next to the server you want to access, then select IPMI Access :

image

On the next menu, select Download JNLP :

image

Once the <hostname>.jnlp has downloaded, open it and hit Run when prompted:

image

You will then enter into the virtual console where you can enter your root credentials:

image

- + \ No newline at end of file diff --git a/cloud-administration/intro-to-cloud-init/index.html b/cloud-administration/intro-to-cloud-init/index.html index c4aa09cd..e19ca0d8 100644 --- a/cloud-administration/intro-to-cloud-init/index.html +++ b/cloud-administration/intro-to-cloud-init/index.html @@ -23,7 +23,7 @@ about to receive.

Below we will go over some examples of cloud-configs.

#cloud-config

users:
- default
- name: omi-admin
groups: sudo
shell: /bin/bash
sudo: ['ALL=(ALL) NOPASSWD:ALL']
SSH-authorized-keys:
- SSH-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDE0PUM9qcLkttJp330AHwp7M5kNQ5YQCU0iE0NhZkK8nTxJ3NelTST739nlaQxCYIdPhMegJQgNqsVkcZ2daaRYlc8fWGnzRYfL+f9AgM5fGAAmveBFYajZe/5Kp+81IYcfLQhfoWTvytoBq9gmn0PwwFsLlwe138r6M2aaWJl80V/mDp2BiAwDh0xJYR2+Ei7AD4O66lAeZJhjqaue/Ctpez4MpXp2XRufErsmCBHX9bN0wVPWNNJgrfTIBhrDqr1JbMHZo73d5iQntxJAmc1+y8qTueUGpiitC5Fl/jKyLycIjOM4OPpKgsvc1DtT+UWudfzG2kpAYJWaA3t6r8IxMS6a/9leavL7TKUcoAdqQahB75iJ38CZKxVB0sF0xxxyBS2JtMokHfex6bHtWS0D0eBwpQZPSKT18egmal4sFEcQwxEHeqK16U+9N01hv7KatImG2pHUQJxmtPmdRMOhltFOQCmIfm21mHxsXgaYY8In5xbZD1Lg05FYmOmwgE= omi-admin@example.io

In this example we are creating a new user, omi-admin, giving that user sudo access, and specifying a public SSH key to simplify SSH access.

#cloud-config
package_upgrade: true
packages:
- python3-pip
- python3-dev
- build-essential
- libssl-dev
- libffi-dev

In the example above we ensure that all packages are up-to-date, and ensure specific packages are installed on the server.

#cloud-config
manage-resolv-conf: true
resolv_conf:
nameservers:
- '9.9.9.9' #quad9
- '8.8.8.8' #google

Our last example sets the server to use the specified resolvers.

- + \ No newline at end of file diff --git a/engineers-notes/building-windows-cloud-images-on-openmetal/index.html b/engineers-notes/building-windows-cloud-images-on-openmetal/index.html index 465223c2..e32f6669 100644 --- a/engineers-notes/building-windows-cloud-images-on-openmetal/index.html +++ b/engineers-notes/building-windows-cloud-images-on-openmetal/index.html @@ -188,7 +188,7 @@ understand all of the things you may need to configure and install to be happy with the end result. We found this to be a learning experience as each time we had to iterate another change or customization to the process.

- + \ No newline at end of file diff --git a/engineers-notes/cloud101-cloud-storage/index.html b/engineers-notes/cloud101-cloud-storage/index.html index 05b49ea8..4532cb3c 100644 --- a/engineers-notes/cloud101-cloud-storage/index.html +++ b/engineers-notes/cloud101-cloud-storage/index.html @@ -63,7 +63,7 @@ critical, and the loss of a single storage node or drive is expected. However, erasure coding requires additional processing power, so it may not be the best choice for smaller, low-compute clusters.

- + \ No newline at end of file diff --git a/engineers-notes/custom-image-packer/index.html b/engineers-notes/custom-image-packer/index.html index aae9ec98..e8a0f18d 100644 --- a/engineers-notes/custom-image-packer/index.html +++ b/engineers-notes/custom-image-packer/index.html @@ -28,7 +28,7 @@ correct any formatting discrepancies:

packer fmt .

Your image.pkr.hcl should now look like this:

$ cat image.pkr.hcl 
packer {
required_plugins {
openstack = {
version = ">= 1.0.1"
source = "github.com/hashicorp/openstack"
}
}
}

source "openstack" "demo" {
flavor = "m1.small"
image_name = "Ubuntu 22.04 (jammy-amd64)"
external_source_image_url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
ssh_username = "ubuntu"
security_groups = ["build-group"]
}

build {
sources = ["source.openstack.demo"]
}

Validate configuration and build image

Now that your configuration file is complete, verify everything is correct with packer validate:

$ packer validate .
The configuration is valid.

Once validated you are ready to build your image:

$ packer build .
openstack.demo: output will be in this color.

==> openstack.demo: Loading flavor: m1.small
openstack.demo: Verified flavor. ID: m1.small
==> openstack.demo: Creating temporary RSA SSH key for instance...
==> openstack.demo: Creating temporary keypair: packer_636aa8e7-85aa-b787-ca40-b494195062d8 ...
==> openstack.demo: Created temporary keypair: packer_636aa8e7-85aa-b787-ca40-b494195062d8
==> openstack.demo: Creating image using external source image with name packer_636aa8e7-5f8a-8c58-b8fd-b415f5d433aa
==> openstack.demo: Using disk format qcow2
==> openstack.demo: Created image with ID 11919271-4c41-4f28-a20e-417dda203c45
==> openstack.demo: Importing External Source Image from URL https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
openstack.demo: Image not Active, retrying in 10 seconds
[...]
==> openstack.demo: Launching server...
==> openstack.demo: Launching server...
openstack.demo: Server ID: 779e2ffe-5c18-4c28-b714-bff62d234129
==> openstack.demo: Waiting for server to become ready...
openstack.demo: Floating IP not required
==> openstack.demo: Using SSH communicator to connect: 173.231.253.58
==> openstack.demo: Waiting for SSH to become available...
==> openstack.demo: Connected to SSH!
==> openstack.demo: Stopping server: 779e2ffe-5c18-4c28-b714-bff62d234129 ...
openstack.demo: Waiting for server to stop: 779e2ffe-5c18-4c28-b714-bff62d234129 ...
==> openstack.demo: Creating the image: Ubuntu 22.04 (jammy-amd64)
openstack.demo: Image: c2ed4308-593c-4a0c-adc5-dbeebaf69dbd
==> openstack.demo: Waiting for image Ubuntu 22.04 (jammy-amd64) (image id: c2ed4308-593c-4a0c-adc5-dbeebaf69dbd) to become ready...
==> openstack.demo: Terminating the source server: 779e2ffe-5c18-4c28-b714-bff62d234129 ...
==> openstack.demo: Deleting temporary external source image: packer_636aa8e7-5f8a-8c58-b8fd-b415f5d433aa ...
==> openstack.demo: Deleting temporary keypair: packer_636aa8e7-85aa-b787-ca40-b494195062d8 ...
Build 'openstack.demo' finished after 1 minute 59 seconds.

==> Wait completed after 1 minute 59 seconds

==> Builds finished. The artifacts of successful builds are:
--> openstack.demo: An image was created: c2ed4308-593c-4a0c-adc5-dbeebaf69dbd

We can see from the output above that the build process completed successfully. We can view the new image properties with openstack image show <new_image_id>:

$ openstack image show c2ed4308-593c-4a0c-adc5-dbeebaf69dbd --fit-width
+------------------+-----------------------------------------------------------------------------------------------+
| Field | Value |
+------------------+-----------------------------------------------------------------------------------------------+
| container_format | bare |
| created_at | 2022-11-08T19:09:07Z |
| disk_format | raw |
| file | /v2/images/c2ed4308-593c-4a0c-adc5-dbeebaf69dbd/file |
| id | c2ed4308-593c-4a0c-adc5-dbeebaf69dbd |
| min_disk | 25 |
| min_ram | 0 |
| name | Ubuntu 22.04 (jammy-amd64) |
| owner | 20de51f9e6714727aabe668cdcf33d67 |
| properties | base_image_ref='11919271-4c41-4f28-a20e-417dda203c45', boot_roles='reader,member,admin', |
| | image_location='snapshot', image_state='available', image_type='image', |
| | instance_uuid='779e2ffe-5c18-4c28-b714-bff62d234129', locations='[{'url': 'rbd://c368d47b-005 |
| | 5-4380-bda9-257e0c662b90/images/c2ed4308-593c-4a0c-adc5-dbeebaf69dbd/snap', 'metadata': |
| | {'store': 'rbd'}}]', os_glance_failed_import='', os_glance_importing_to_stores='', |
| | os_hidden='False', owner_project_name='kubespray-demo', owner_user_name='admin', |
| | stores='rbd', user_id='14ca74f0b2da43d680fdba3910d4ee3a' |
| protected | False |
| schema | /v2/schemas/image |
| size | 26843545600 |
| status | active |
| tags | |
| updated_at | 2022-11-08T19:09:15Z |
| visibility | private |
+------------------+-----------------------------------------------------------------------------------------------+

Resources

- + \ No newline at end of file diff --git a/engineers-notes/kubernetes-failed-to-detach-persistent-volume/index.html b/engineers-notes/kubernetes-failed-to-detach-persistent-volume/index.html index fdf6a80a..8897582a 100644 --- a/engineers-notes/kubernetes-failed-to-detach-persistent-volume/index.html +++ b/engineers-notes/kubernetes-failed-to-detach-persistent-volume/index.html @@ -25,7 +25,7 @@ volumeattachment and remove the finalizers. This allowed the CSI driver to delete the volume attachment from Kubernetes automatically.

List all VolumeAttachments and PersistentVolumes

kubectl get pv
kubectl get volumeattachment

Edit VolumeAttachment with the issue

Verify that the volume attachment doesn't have a volume associated with it.

kubectl edit volumeattachment csi-5321e82004036bca2c98cd2254de8568283a1e72a36cc21b1df53d2667de54e3

Remove the finalizers and save the file

  finalizers:
- external-attacher/.....

Delete the VolumeAttachment

If the volume attachment has not been automatically deleted, you should be able to delete it now

kubectl delete volumeattachment csi-5321e82004036bca2c98cd2254de8568283a1e72a36cc21b1df53d2667de54e3
- + \ No newline at end of file diff --git a/engineers-notes/multiport-manual/index.html b/engineers-notes/multiport-manual/index.html index d9ccb484..47460921 100644 --- a/engineers-notes/multiport-manual/index.html +++ b/engineers-notes/multiport-manual/index.html @@ -98,7 +98,7 @@ the networking service has started.

After creating the unit file, reload systemd to pick up the changes:

systemctl daemon-reload

You can then enable and start the service:

systemctl enable policy-routing.service
systemctl start policy-routing.service

This setup will execute your script during the system startup, applying the specified IP routes. Adjust the script and unit file according to your specific requirements and paths.

- + \ No newline at end of file diff --git a/engineers-notes/vpc-in-the-context-of-openstack/index.html b/engineers-notes/vpc-in-the-context-of-openstack/index.html index c45c4beb..b7170b95 100644 --- a/engineers-notes/vpc-in-the-context-of-openstack/index.html +++ b/engineers-notes/vpc-in-the-context-of-openstack/index.html @@ -32,7 +32,7 @@ resources within a project for further isolation. As projects can span resources across all accessible availability zones they provide the core isolation inherent in the virtual private cloud concept.

- + \ No newline at end of file diff --git a/index.html b/index.html index 9b98dccd..59b7d74c 100644 --- a/index.html +++ b/index.html @@ -9,7 +9,7 @@

Documentation

Browse the latest guides and tutorials.

Cloud Administrators

Learn how to integrate Open Metal into an existing infrastructure or create a new one.
chevron_right
Cloud Operators Manual - Zero to Production in 40 Hours (PDF)
Key document for all IT teams that are managing their own Private Cloud.
chevron_right
Tutorials
Tutorials for OpenStack administrators.

Cloud Users

Guides and tutorials to help users on getting started with Open Metal and Openstack management.
chevron_right
OpenStack Horizon Quick Start for Project Users
Helps new Project Users to self serve their Infrastructure.
chevron_right
Cloud Operators Manual Extended - Kubernetes on OpenStack
Learn how to use your OpenStack to run your Kubernetes deployments.

General Management

Resources that can show how Open Metal can improve your scalability at reduced costs of public clouds.
chevron_right
IT Leadership Buyer's Guide (PDF)
This guide provides a framework for this new technology that can increase competitiveness and lower costs.
chevron_right
Reduce Costs and Drive Innovation (PDF)
Find out how you can break free from the mega-corps and reclaim your revenue, all while paying less for your cloud infrastructure.
- + \ No newline at end of file diff --git a/kubernetes-guides/configuring-openstack-cinder-with-kubernetes/index.html b/kubernetes-guides/configuring-openstack-cinder-with-kubernetes/index.html index e79cf589..d5b12561 100644 --- a/kubernetes-guides/configuring-openstack-cinder-with-kubernetes/index.html +++ b/kubernetes-guides/configuring-openstack-cinder-with-kubernetes/index.html @@ -35,7 +35,7 @@ storageClassName field is optional, but we've included it in the example below for clarity.

kubectl apply -f - <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-volume
namespace: default
spec:
accessModes:
- ReadWriteOnce
storageClassName: csi-sc-cinder
resources:
requests:
storage: 1Gi
EOF

Verify the PVC is created

kubectl get pvc -A

OUTPUT:

NAMESPACE   NAME          STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS    AGE
default test-volume Bound pvc-25cda9ab-6dfd-4aba-b629-f6c78d18cd05 1Gi RWO csi-sc-cinder 67m

Check OpenStack Cinder

Verify that the volume was created in OpenStack Cinder. We can do this by logging into the OpenStack dashboard or by using the OpenStack CLI.

openstack volume list

Output:

+--------------------------------------+------------------------------------------+-----------+------+-------------+
| ID | Name | Status | Size | Attached to |
+--------------------------------------+------------------------------------------+-----------+------+-------------+
| f88eb9fc-3919-4918-b94e-c7ec880eae92 | pvc-25cda9ab-6dfd-4aba-b629-f6c78d18cd05 | available | 1 | |
+--------------------------------------+------------------------------------------+-----------+------+-------------+

- + \ No newline at end of file diff --git a/kubernetes-guides/deploying-a-kubespray-cluster-to-openstack-using-terraform/index.html b/kubernetes-guides/deploying-a-kubespray-cluster-to-openstack-using-terraform/index.html index 22f6e65b..4e904166 100644 --- a/kubernetes-guides/deploying-a-kubespray-cluster-to-openstack-using-terraform/index.html +++ b/kubernetes-guides/deploying-a-kubespray-cluster-to-openstack-using-terraform/index.html @@ -58,7 +58,7 @@ This may take a couple of minutes to complete:

kubectl get svc -A -w

Output:

NAMESPACE     NAME              TYPE           CLUSTER-IP      EXTERNAL-IP      PORT(S)                  AGE
default hostname-server LoadBalancer 10.233.32.201 127.0.0.1 80:32709/TCP 12h

Verify Cinder

Next we'll verify that Cinder volumes are working. First, create a storage class:

kubectl apply -f - <<EOF
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: cinder-csi
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: cinder.csi.openstack.org
parameters:
availability: nova
allowVolumeExpansion: true
volumeBindingMode: Immediate
EOF

Now create a PersistentVolumeClaim by running the following command:

kubectl apply -f - <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-volume
namespace: default
spec:
accessModes:
- ReadWriteOnce
storageClassName: cinder-csi
resources:
requests:
storage: 1Gi
EOF

Deploy a pod that uses the volume

We'll deploy a Redis instance configured to use the volume we created in the previous step.

Warning: This is just an example. Do not use this in production.

kubectl apply -f - <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
volumeMounts:
- mountPath: /var/lib/redis
name: redis-data
volumes:
- name: redis-data
persistentVolumeClaim:
claimName: test-volume
EOF

Verify the volume is bound

kubectl get pvc -A

Output:

NAMESPACE   NAME            STATUS    VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS    AGE
default test-volume Bound pvc-f7ceeaae-86aa-4ab3-9512-bb65f7d6c5f0 1Gi RWO cinder-csi 12h

Verify the volume exists in OpenStack

openstack volume list

All Done

You should now have a working Kubernetes cluster with the OpenStack Cloud Provider enabled. You can now deploy your applications to the cluster.

- + \ No newline at end of file diff --git a/kubernetes-guides/index.html b/kubernetes-guides/index.html index 98560a38..962d4394 100644 --- a/kubernetes-guides/index.html +++ b/kubernetes-guides/index.html @@ -10,7 +10,7 @@
- + \ No newline at end of file diff --git a/kubernetes-guides/installing-a-kops-cluster-on-openstack/index.html b/kubernetes-guides/installing-a-kops-cluster-on-openstack/index.html index 5da9745c..09bf5f17 100644 --- a/kubernetes-guides/installing-a-kops-cluster-on-openstack/index.html +++ b/kubernetes-guides/installing-a-kops-cluster-on-openstack/index.html @@ -50,7 +50,7 @@ status.

$ kops validate cluster --wait 10m

---Output truncated---

Pod kube-system/csi-cinder-controllerplugin-84b9c4955-dmjqv system-cluster-critical pod "csi-cinder-controllerplugin-84b9c4955-dmjqv" is pending

Validation Failed
W1010 22:13:19.203438 2645280 validate_cluster.go:232] (will retry): cluster not yet healthy
INSTANCE GROUPS
NAME ROLE MACHINETYPE MIN MAX SUBNETS
master-nova Master m1.medium 1 1 nova
nodes-nova Node m1.medium 2 2 nova

NODE STATUS
NAME ROLE READY
master-nova-d2gurm master True
nodes-nova-7oi4el node True
nodes-nova-ncdj6g node True

VALIDATION ERRORS
KIND NAME MESSAGE
Pod kube-system/coredns-77bcddd996-jflxd system-cluster-critical pod "coredns-77bcddd996-jflxd" is pending

Validation Failed
W1010 22:13:30.376327 2645280 validate_cluster.go:232] (will retry): cluster not yet healthy
INSTANCE GROUPS
NAME ROLE MACHINETYPE MIN MAX SUBNETS
master-nova Master m1.medium 1 1 nova
nodes-nova Node m1.medium 2 2 nova

NODE STATUS
NAME ROLE READY
master-nova-d2gurm master True
nodes-nova-7oi4el node True
nodes-nova-ncdj6g node True

Your cluster my-cluster.k8s.local is ready

Test your cluster

Get all pods

kubectl get pods --all-namespaces

Output

[root@comfortable-lamprey kops]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel kube-flannel-ds-7tkwv 1/1 Running 0 4m
kube-flannel kube-flannel-ds-95qmb 1/1 Running 1 (2m9s ago) 3m2s
kube-flannel kube-flannel-ds-k4vcs 1/1 Running 0 3m4s
kube-system coredns-77bcddd996-978n5 1/1 Running 0 3m59s
kube-system coredns-77bcddd996-jflxd 1/1 Running 0 2m19s
kube-system coredns-autoscaler-975545559-rl2zl 1/1 Running 0 3m59s
kube-system csi-cinder-controllerplugin-84b9c4955-dmjqv 5/5 Running 0 3m59s
kube-system csi-cinder-nodeplugin-b24tf 3/3 Running 0 3m2s
kube-system csi-cinder-nodeplugin-pbncg 3/3 Running 0 3m59s
kube-system csi-cinder-nodeplugin-pmzb2 3/3 Running 0 3m4s
kube-system dns-controller-6c8954774c-n287p 1/1 Running 0 3m59s
kube-system etcd-manager-events-master-nova-d2gurm 1/1 Running 0 3m38s
kube-system etcd-manager-main-master-nova-d2gurm 1/1 Running 0 2m54s
kube-system kops-controller-ltf7f 1/1 Running 0 4m
kube-system kube-apiserver-master-nova-d2gurm 2/2 Running 2 (5m ago) 3m15s
kube-system kube-controller-manager-master-nova-d2gurm 1/1 Running 4 (4m59s ago) 4m12s
kube-system kube-proxy-master-nova-d2gurm 1/1 Running 0 3m25s
kube-system kube-proxy-nodes-nova-7oi4el 1/1 Running 0 90s
kube-system kube-proxy-nodes-nova-ncdj6g 1/1 Running 0 2m16s
kube-system kube-scheduler-master-nova-d2gurm 1/1 Running 0 3m40s
kube-system openstack-cloud-provider-hlhzx 1/1 Running 0 3m59s

Verify OpenStack cloud provider services

Our installation allows you to provision volumes and load balancers. If you want to validate the installation check out our other guides.

Troubleshooting

The kOps documentation provides helpful troubleshooting steps for common issues: Troubleshooting kOps clusters.

If you need to start over you can delete your cluster with the following command:

kops delete cluster --name my-cluster.k8s.local --yes
- + \ No newline at end of file diff --git a/kubernetes-guides/installing-a-rancher-cluster-on-openstack/index.html b/kubernetes-guides/installing-a-rancher-cluster-on-openstack/index.html index 5bba28af..32746424 100644 --- a/kubernetes-guides/installing-a-rancher-cluster-on-openstack/index.html +++ b/kubernetes-guides/installing-a-rancher-cluster-on-openstack/index.html @@ -45,7 +45,7 @@ balancer and persistent volumes as well.

To do this, you'll need to setup OpenStack Cloud Provider resources. We're in the process of creating guides on how to do this.

You can find more information on the OpenStack Cloud Provider resources here: OpenStack Cloud Provider.

- + \ No newline at end of file diff --git a/kubernetes-guides/installing-an-openshift-cluster-on-openstack/index.html b/kubernetes-guides/installing-an-openshift-cluster-on-openstack/index.html index a78f2562..c41711bc 100644 --- a/kubernetes-guides/installing-an-openshift-cluster-on-openstack/index.html +++ b/kubernetes-guides/installing-an-openshift-cluster-on-openstack/index.html @@ -55,7 +55,7 @@ local machine.

Web Console Login

Navigate to https://console-openshift-console.apps.okd.testing-okd.com/dashboards

Use the username and password that were output after you completed the installation. If you forget to save the password, the password is stored in the auth folder in the current directory in the kubeadmin-password file.

OKD Web Console on OpenMetal

Test Kubectl

Install Kubectl

curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl

Load configuration

 export KUBECONFIG=/root/okd/install-directory/auth/kubeconfig

List Pods

[root@silly-quokka okd]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
openshift-apiserver-operator openshift-apiserver-operator-5686496c75-b6ngv 1/1 Running 1 (84m ago) 88m
openshift-apiserver apiserver-55b764f57-cp7w9 2/2 Running 0 76m
openshift-apiserver apiserver-55b764f57-ktq9c 2/2 Running 0 77m
openshift-apiserver apiserver-55b764f57-tf96q 2/2 Running 0 75m
openshift-authentication-operator authentication-operator-79647548ff-9pv9j 1/1 Running 1 (84m ago) 88m
openshift-authentication oauth-openshift-75cb6567b6-6ltrr 1/1 Running 0 69m
openshift-authentication oauth-openshift-75cb6567b6-n845d 1/1 Running 0 68m
openshift-authentication oauth-openshift-75cb6567b6-qtfcf 1/1 Running 0 69m
openshift-cloud-controller-manager-operator cluster-cloud-controller-manager-operator-54f49d867f-gzjlh 2/2 Running 0 87m
openshift-cloud-credential-operator cloud-credential-operator-7c4bbc8654-c6ckq 2/2 Running 0 88m
openshift-cluster-csi-drivers manila-csi-driver-operator-b4dfc5874-frvlj 1/1 Running 0 84m
openshift-cluster-csi-drivers openstack-cinder-csi-driver-controller-6b56dfff86-f5zfb 10/10 Running 0 82m
openshift-cluster-csi-drivers openstack-cinder-csi-driver-controller-6b56dfff86-wdh98 10/10 Running 0 85m
openshift-cluster-csi-drivers openstack-cinder-csi-driver-node-7v6sj 3/3 Running 0 76m
...

OKD Client

Install OKD Client

To verify operation, download the OKD client from the OKD GitHub releases page:

curl -OL https://github.com/openshift/okd/releases/download/4.11.0-0.okd-2022-07-29-154152/openshift-client-linux-4.11.0-0.okd-2022-07-29-154152.tar.gz
tar -xvf openshift-client-linux-4.11.0-0.okd-2022-07-29-154152.tar.gz

Fetch Kubernetes resources using OKD Client

Run the following OKD client commands using the oc binary:

  • ./oc get nodes
  • ./oc get clusterversion
  • ./oc get clusteroperator
  • ./oc get pods -A

Output:

(venv) [root@silly-quokka test]# ./oc get nodes
NAME STATUS ROLES AGE VERSION
okd-dstmh-master-0 Ready master 3h35m v1.24.0+9546431
okd-dstmh-master-1 Ready master 3h35m v1.24.0+9546431
okd-dstmh-master-2 Ready master 3h35m v1.24.0+9546431
okd-dstmh-worker-0-gbr8f Ready worker 3h24m v1.24.0+9546431
okd-dstmh-worker-0-l868x Ready worker 3h24m v1.24.0+9546431
okd-dstmh-worker-0-mjzlr Ready worker 3h24m v1.24.0+9546431

Troubleshooting

Restart OKD Installation

Delete the cluster

./openshift-install destroy cluster --dir ~/okd/install-directory/ --log-level=info

Copy the config

cp install-config.yaml ~/okd/install-directory/

Create the manifests

./openshift-install --dir ~/okd/install-directory create manifests

Start Install

./openshift-install create cluster --dir ~/okd/install-directory/ --log-level=info
- + \ No newline at end of file diff --git a/kubernetes-guides/installing-the-kubernetes-openstack-cloud-controller-manager/index.html b/kubernetes-guides/installing-the-kubernetes-openstack-cloud-controller-manager/index.html index 3936f497..80dbafb6 100644 --- a/kubernetes-guides/installing-the-kubernetes-openstack-cloud-controller-manager/index.html +++ b/kubernetes-guides/installing-the-kubernetes-openstack-cloud-controller-manager/index.html @@ -39,7 +39,7 @@ set up other cloud provider features. Cinder, Barbican, and Octavia are all supported by the cloud provider.

We'll cover how to configure them in our next Kubernetes guides. For now, please see the Cloud Provider OpenStack.

- + \ No newline at end of file diff --git a/openmetal-central/creating-a-support-request/index.html b/openmetal-central/creating-a-support-request/index.html index abdc1ff0..05791c90 100644 --- a/openmetal-central/creating-a-support-request/index.html +++ b/openmetal-central/creating-a-support-request/index.html @@ -25,7 +25,7 @@ request detail. Here you can see any messages from your support agent, ask additional questions, or send a reply.

OpenMetal Central Request Detail

Need help or have questions that are not covered by support requests? You can also book meetings with our customer teams here.

- + \ No newline at end of file diff --git a/openmetal-central/creating-an-organization/index.html b/openmetal-central/creating-an-organization/index.html index 1e5ce069..98c86ee3 100644 --- a/openmetal-central/creating-an-organization/index.html +++ b/openmetal-central/creating-an-organization/index.html @@ -35,7 +35,7 @@ You can then change your own permissions or actually leave the organization completely.

Need help or have questions that are not covered here? You can also submit a support request or book meetings with our customer teams here.

- + \ No newline at end of file diff --git a/openmetal-central/emergency-node-power-management/index.html b/openmetal-central/emergency-node-power-management/index.html index 86df237e..f7ab0555 100644 --- a/openmetal-central/emergency-node-power-management/index.html +++ b/openmetal-central/emergency-node-power-management/index.html @@ -17,7 +17,7 @@ correct node has been selected. Click confirm.

Confirmation Prompt

  • You will now see the power indicator for the node turn yellow indicating a restart is in progress.

    Power State Indicator

  • When the indicator turns green the restart has completed and your node should be accessible.

  • - + \ No newline at end of file diff --git a/openmetal-central/enable-datadog-monitoring/index.html b/openmetal-central/enable-datadog-monitoring/index.html index 164c81cb..9c935126 100644 --- a/openmetal-central/enable-datadog-monitoring/index.html +++ b/openmetal-central/enable-datadog-monitoring/index.html @@ -13,7 +13,7 @@ deployment complete.

    Info banner

    Access Datadog

    Once deployment has completed and the emailed invitation accepted, you can use the "Single Sign On" option in OpenMetal Central to access your clouds' Datadog organization directly.

    Single Sign On

    Next Steps

    Datadog provides fairly comprehensive guides you will want to review.

    - + \ No newline at end of file diff --git a/openmetal-central/enable-two-factor-authentication/index.html b/openmetal-central/enable-two-factor-authentication/index.html index 58200a82..216f3dfa 100644 --- a/openmetal-central/enable-two-factor-authentication/index.html +++ b/openmetal-central/enable-two-factor-authentication/index.html @@ -15,7 +15,7 @@ the registered device.

    Recovery codes

  • On next login you will be presented with the two-factor challenge prompt, enter the code sent to the enabled device. Optionally, check or uncheck to remember the current device.

    Two-Factor Challenge

  • - + \ No newline at end of file diff --git a/openmetal-central/how-to-add-ssh-keys-in-openmetal-central/index.html b/openmetal-central/how-to-add-ssh-keys-in-openmetal-central/index.html index 37c5d4f2..9a0501fe 100644 --- a/openmetal-central/how-to-add-ssh-keys-in-openmetal-central/index.html +++ b/openmetal-central/how-to-add-ssh-keys-in-openmetal-central/index.html @@ -11,7 +11,7 @@

    Adding SSH Keys to your Private Cloud in OpenMetal Central

    In this article you will learn how to add new SSH keys to your private cloud from your OpenMetal Central portal.

    Access your OpenMetal Central account

    Adding your SSH key

    • Access your cloud settings.

      Cloud Settings

    • Paste the text contents of your SSH public key into the Add SSH Key field and click Add Key.

      Add SSH Key

    • You will see a pop-up at the bottom of the page indicating success.

      Note: The Add SSH Key field does not clear upon task completion.

    Access your hardware nodes

    • Access your servers with the new key:

      ssh -i ~/.ssh/your_key_name root@<server-ip>
    - + \ No newline at end of file diff --git a/openmetal-central/how-to-purchase-a-private-cloud/index.html b/openmetal-central/how-to-purchase-a-private-cloud/index.html index 107530b2..5be745ac 100644 --- a/openmetal-central/how-to-purchase-a-private-cloud/index.html +++ b/openmetal-central/how-to-purchase-a-private-cloud/index.html @@ -31,7 +31,7 @@ inside OpenMetal Central.

  • Submit a Support Request requesting your organizational trial be approved.

  • Need help or have questions that are not covered here? Submit a support request or book a meeting with one of our teams.

    - + \ No newline at end of file diff --git a/openstack-admin/access-swift-s3-api/index.html b/openstack-admin/access-swift-s3-api/index.html index d3a93584..13b2af5b 100644 --- a/openstack-admin/access-swift-s3-api/index.html +++ b/openstack-admin/access-swift-s3-api/index.html @@ -15,7 +15,7 @@ These credentials are stored securely in Keystone and can be managed with the OpenStack CLI client.

    Create credentials

    The command below generates access keys. It defaults to the currently authorized user and project as determined by clouds.yaml, <user>-openrc.sh, etc:

    $ openstack ec2 credentials create
    +------------+---------------------------------------------------------------------------------------------------------+
    | Field | Value |
    +------------+---------------------------------------------------------------------------------------------------------+
    | access | 9a5fc02e2ed64a7cad249a8477d79203 |
    | links | {'self': 'http://173.231.217.68:5000/v3/users/0aa830b5853d4d419cdcab81b9652bc5/credentials/OS- |
    | | EC2/9a5fc02e2ed64a7cad249a8477d79203'} |
    | project_id | fd14980cc0f24e829d51c68ef78ee530 |
    | secret | 3806359744e54aa3828285c3b56f8f83 |
    | trust_id | None |
    | user_id | 0aa830b5853d4d419cdcab81b9652bc5 |
    +------------+---------------------------------------------------------------------------------------------------------+

    Admin users can specify an optional --user and/or --project as required:

    $ openstack ec2 credentials create --user s3demo --project s3-demo --fit-width 
    +------------+-------------------------------------------------------------------------------------------------------+
    | Field | Value |
    +------------+-------------------------------------------------------------------------------------------------------+
    | access | 18b744a314fa4165960e55af2e0539b3 |
    | links | {'self': 'http://192.168.2.254:5000/v3/users/a19f86a6f1044c5bb28a508f8054257c/credentials/OS- |
    | | EC2/18b744a314fa4165960e55af2e0539b3'} |
    | project_id | 6016837b33f64cf38b5214a8aeb7fb8f |
    | secret | f98dfdda3e694360ab052592c9f44d43 |
    | trust_id | None |
    | user_id | a19f86a6f1044c5bb28a508f8054257c |
    +------------+-------------------------------------------------------------------------------------------------------+

    List existing credentials

    List all credentials for the current project:

    $ openstack ec2 credentials list 
    +----------------------------------+----------------------------------+----------------------------------+----------------------------------+
    | Access | Secret | Project ID | User ID |
    +----------------------------------+----------------------------------+----------------------------------+----------------------------------+
    | 981ff83b99024abfaefcbda63b5d48d1 | a12c509175d04962b81183755d1698e3 | fd14980cc0f24e829d51c68ef78ee530 | 0aa830b5853d4d419cdcab81b9652bc5 |
    | 6764bd24e4754b89b30c51c46b5b2311 | 76f09960a6b148acb906604dc269616b | fd14980cc0f24e829d51c68ef78ee530 | 0aa830b5853d4d419cdcab81b9652bc5 |
    | 9a5fc02e2ed64a7cad249a8477d79203 | 3806359744e54aa3828285c3b56f8f83 | fd14980cc0f24e829d51c68ef78ee530 | 0aa830b5853d4d419cdcab81b9652bc5 |
    | cf5ed0e704004991885358fc9f4b118e | cd898b7c8a0c4bad8dac1c3853075612 | fd14980cc0f24e829d51c68ef78ee530 | 0aa830b5853d4d419cdcab81b9652bc5 |
    +----------------------------------+----------------------------------+----------------------------------+----------------------------------+

    List credentials for a specific user:

    $ openstack ec2 credentials list --user s3demo
    +----------------------------------+----------------------------------+----------------------------------+----------------------------------+
    | Access | Secret | Project ID | User ID |
    +----------------------------------+----------------------------------+----------------------------------+----------------------------------+
    | 18b744a314fa4165960e55af2e0539b3 | f98dfdda3e694360ab052592c9f44d43 | 6016837b33f64cf38b5214a8aeb7fb8f | a19f86a6f1044c5bb28a508f8054257c |
    | e2053effd013413fac4a4e320007e827 | 61a632d402884e32b9371b8fb46a91a4 | 6016837b33f64cf38b5214a8aeb7fb8f | a19f86a6f1044c5bb28a508f8054257c |
    +----------------------------------+----------------------------------+----------------------------------+----------------------------------+

    Delete credentials

    Credentials are deleted by calling the access ID. Success returns no output:

    openstack ec2 credentials delete e2053effd013413fac4a4e320007e827

    What's Next?

    Look forward to future use case articles as we expand our documentation.

    Resources

    - + \ No newline at end of file diff --git a/openstack-admin/cloud-resource-usage-dashboard/index.html b/openstack-admin/cloud-resource-usage-dashboard/index.html index 570de44b..b8b6e809 100644 --- a/openstack-admin/cloud-resource-usage-dashboard/index.html +++ b/openstack-admin/cloud-resource-usage-dashboard/index.html @@ -17,7 +17,7 @@ drag and drop, browse for a file, or paste from your clipboard.

    See code block

    Input JSON

  • Confirm overwrite action

    Import confirmation

  • Cloud Physical Resource Usage dashboard

    If all went well you should now see your new dashboard.

    Imported dash

    To find out more about any of the widgets, mouse over its title bar and click the pencil (edit) icon.

    Edit icon

    This opens the editor for the selected widget.

    Traffic monitor edit

    On this page you can view and edit all aspects of the widget.

    Editable fields

    1. Visualization type
    2. Data sources
    3. Display preferences
    4. Graph title

    Here you can also view and/or edit your data source JSON directly.

    Data source JSON

    Dashboard JSON

    This JSON code creates a custom Datadog dashboard monitoring and visualizing the following values for each host:

    • CPU Idle
    • Percent Usable Memory
    • Network Traffic - Ingress + Egress
    • Percent Used Disk Space
    • Total Ceph Disk Usage

    Code block

    {"title":"Cloud Physical Resource Usage","description":"## Title\n\nDescribe this dashboard. Add links to other dashboards, monitors, wikis,  and docs to help your teammates. Markdown is supported.\n\n- [This might link to a dashboard](#)\n- [This might link to a wiki](#)","widgets":[{"id":8462073962564395,"definition":{"title":"CPU Idle for all Hosts","title_size":"16","title_align":"left","show_legend":false,"time":{"live_span":"4h"},"type":"timeseries","requests":[{"formulas":[{"formula":"query1"}],"response_format":"timeseries","queries":[{"query":"sum:system.cpu.idle{*} by {host}","data_source":"metrics","name":"query1"}],"style":{"palette":"dog_classic","line_type":"solid","line_width":"normal"},"display_type":"line"}],"markers":[{"value":"y = 0","display_type":"error dashed"},{"value":"y = 100","display_type":"ok dashed"}]},"layout":{"x":0,"y":0,"width":4,"height":2}},{"id":8190888575322488,"definition":{"title":"Percent Usable Memory per Host","type":"treemap","requests":[{"formulas":[{"formula":"query1"}],"response_format":"scalar","queries":[{"query":"avg:system.mem.pct_usable{*} by {host}","data_source":"metrics","name":"query1","aggregator":"avg"}]}]},"layout":{"x":4,"y":0,"width":4,"height":2}},{"id":6935696381171286,"definition":{"title":"Ingress + Egress Network Traffic for all Hosts","title_size":"16","title_align":"left","show_legend":false,"legend_layout":"auto","legend_columns":["avg","min","max","value","sum"],"type":"timeseries","requests":[{"formulas":[{"formula":"query1"}],"response_format":"timeseries","queries":[{"query":"sum:system.net.bytes_rcvd{*} by {host}","data_source":"metrics","name":"query1"}],"style":{"palette":"dog_classic","line_type":"solid","line_width":"normal"},"display_type":"line"},{"formulas":[{"formula":"0 - query1"}],"response_format":"timeseries","queries":[{"query":"sum:system.net.bytes_sent{*} by {host}","data_source":"metrics","name":"query1"}],"style":{"palette":"dog_classic","line_type":"solid","line_width":"normal"},"display_type":"line"}]},"layout":{"x":8,"y":0,"width":4,"height":2}},{"id":4847638474091271,"definition":{"title":"Total Ceph Disk Usage","title_size":"16","title_align":"left","type":"query_value","requests":[{"formulas":[{"formula":"query1"}],"conditional_formats":[{"comparator":"<=","palette":"white_on_green","value":70},{"comparator":">","palette":"white_on_yellow","value":70},{"comparator":">","palette":"white_on_red","value":80}],"response_format":"scalar","queries":[{"query":"avg:ceph.aggregate_pct_used{*}","data_source":"metrics","name":"query1","aggregator":"avg"}]}],"autoscale":true,"precision":2},"layout":{"x":0,"y":2,"width":4,"height":2}},{"id":4208572359463889,"definition":{"title":"Percent Used Disk Space for each Host","title_size":"16","title_align":"left","show_legend":false,"legend_layout":"auto","legend_columns":["avg","min","max","value","sum"],"type":"timeseries","requests":[{"formulas":[{"formula":"query1"},{"formula":"query1 * 100"}],"response_format":"timeseries","queries":[{"query":"avg:system.disk.in_use{host:*} by {host}","data_source":"metrics","name":"query1"}],"style":{"palette":"dog_classic","line_type":"solid","line_width":"normal"},"display_type":"line"}],"markers":[{"value":"y = 100","display_type":"error dashed"}]},"layout":{"x":4,"y":2,"width":4,"height":2}}],"template_variables":[],"layout_type":"ordered","is_read_only":false,"notify_list":[],"reflow_type":"fixed","id":"xsb-5ef-g4d"}

    Resources

    - + \ No newline at end of file diff --git a/openstack-admin/swift-s3-api-access-with-rclone/index.html b/openstack-admin/swift-s3-api-access-with-rclone/index.html index fa6b24bb..5b4041da 100644 --- a/openstack-admin/swift-s3-api-access-with-rclone/index.html +++ b/openstack-admin/swift-s3-api-access-with-rclone/index.html @@ -12,7 +12,7 @@ CLI tool to interact with your OpenMetal clouds' Ceph-backed Object store via the Swift S3 API.

    Prerequisites

    Install

    Script installation is provided for Linux and related systems

    sudo -v ; curl https://rclone.org/install.sh | sudo bash

    Configure

    Quick Config

    The default location for the configuration file:

    • ~/.config/rclone/rclone.conf

    The minimum required values are provided in the following example:

    [backup-demo] # Name to be used when calling remote
    type = s3
    provider = Other
    access_key_id = <access-key>
    secret_access_key = <secret-key>
    endpoint = http(s)://<cloud-domain-or-ip>:8080/
    acl = private

    Guided config

    Guided configuration is provided with rclone config which will prompt for the following values:

    $ rclone config
    2022/12/12 19:47:27 NOTICE: Config file "/home/ubuntu/.config/rclone/rclone.conf" not found - using defaults
    No remotes found, make a new one?
    n) New remote
    s) Set configuration password
    q) Quit config
    n/s/q> n

    Enter name for new remote.
    name> backup-demo

    Option Storage.
    Type of storage to configure.
    Choose a number from below, or type in your own value.
    [...]
    Storage> 5

    Option provider.
    Choose your S3 provider.
    Choose a number from below, or type in your own value.
    Press Enter to leave empty.
    [...]
    provider> 24

    Option env_auth.
    [...]
    env_auth> false

    Option access_key_id.
    [...]
    access_key_id> <access-key>

    Option secret_access_key.
    [...]
    secret_access_key> <secret-key>

    Option endpoint.
    [...]
    endpoint> http(s)://<cloud-domain-or-ip>:8080/

    Option location_constraint.
    [...]
    Enter a value. Press Enter to leave empty.
    location_constraint>

    Option acl.
    [...]
    acl> 1

    Edit advanced config?
    y) Yes
    n) No (default)
    y/n> n

    Configuration complete.

    Basic usage

    • rclone lsd - List all directories at remote path:

      $ rclone lsd backup-demo:
      -1 2022-11-01 16:08:29 -1 openmetal-bucket
    • rclone mkdir - Make a new path if it doesn't already exist:

      rclone mkdir backup-demo:/test-bucket
    • rclone sync - Make source and dest identical, modifying destination only:

      rclone sync /local/path remote:path

    Reference

    - + \ No newline at end of file diff --git a/openstack-admin/swift-s3-api-access-with-s3cmd/index.html b/openstack-admin/swift-s3-api-access-with-s3cmd/index.html index 041e52dd..12e37747 100644 --- a/openstack-admin/swift-s3-api-access-with-s3cmd/index.html +++ b/openstack-admin/swift-s3-api-access-with-s3cmd/index.html @@ -12,7 +12,7 @@ with your OpenMetal clouds' Ceph-backed Object store via the Swift S3 API.

    Prerequisites

    Install

    • Create and/or activate your virtual environment:

      virtualenv .venv

      source .venv/bin/activate
    • Install s3cmd with pip:

      pip install s3cmd

    Configure

    • s3cmd --configure provides interactive configuration prompts. You will need to enter only Access Key, Secret Key, S3 Endpoint, and Use HTTPS protocol:

    • To gather your exact endpoint run the following command

    openstack endpoint list --interface public --service object-store
      $ s3cmd --configure
    [...]
    Access Key: <access_key>
    Secret Key: <secret_key>
    [...]
    S3 Endpoint: <cloud_ip_or_configured_domain:port>
    [...]
    DNS-style bucket+hostname:port template for accessing a bucket: <FQDN only (ie. mycloud.openmetal.cloud)>
    [...]
    Use HTTPS protocol: <yes/no determined by your clouds TLS status>
    [...]
    Test access with supplied credentials? [Y/n] y
    [...]

    Basic Usage

    • List available buckets and bucket contents with s3cmd ls:

      $ s3cmd ls
      2022-11-01 16:08 s3://openmetal-bucket

      $ s3cmd ls s3://openmetal-bucket
      2022-11-01 16:12 37 s3://openmetal-bucket/important-file.txt
    • Create a new bucket with s3cmd mb:

      $ s3cmd mb s3://test-bucket
      Bucket 's3://test-bucket/' created
    • Upload a local file to your bucket with s3cmd put:

      $ s3cmd put important-archive.zip s3://test-bucket/important-archive.zip
      upload: 'important-archive.zip' -> 's3://test-bucket/important-archive.zip' [part 1 of 4, 15MB] [1 of 1]
      15728640 of 15728640 100% in 1s 9.45 MB/s done
      upload: 'important-archive.zip' -> 's3://test-bucket/important-archive.zip' [part 2 of 4, 15MB] [1 of 1]
      15728640 of 15728640 100% in 1s 11.71 MB/s done
      upload: 'important-archive.zip' -> 's3://test-bucket/important-archive.zip' [part 3 of 4, 15MB] [1 of 1]
      15728640 of 15728640 100% in 1s 13.89 MB/s done
      upload: 'important-archive.zip' -> 's3://test-bucket/important-archive.zip' [part 4 of 4, 122KB] [1 of 1]
      125565 of 125565 100% in 0s 170.41 KB/s done
    • Download a file from your bucket with s3cmd get:

      $ s3cmd get s3://test-bucket/important-file.txt
      download: 's3://test-bucket/important-file.txt' -> './important-file.txt' [1 of 1]
      37 of 37 100% in 0s 50.40 B/s done
    • Delete file from bucket with s3cmd rm:

      $ s3cmd rm s3://test-bucket/important-file.txt
      delete: 's3://test-bucket/important-file.txt'
    • Delete an empty bucket with s3cmd rb:

      $ s3cmd rb s3://test-bucket
      Bucket 's3://test-bucket/' removed

    Reference

    - + \ No newline at end of file diff --git a/openstack-admin/use-aws-client-to-access-swift-s3-api/index.html b/openstack-admin/use-aws-client-to-access-swift-s3-api/index.html index 95644774..68c10d7e 100644 --- a/openstack-admin/use-aws-client-to-access-swift-s3-api/index.html +++ b/openstack-admin/use-aws-client-to-access-swift-s3-api/index.html @@ -13,7 +13,7 @@ backed object storage using a software designed to interact with S3-compatible endpoints.

    Requirements

    Configure AWS client

    Install the AWS client:

    # Activate your virtual environment (optional but recommended)
    source .venv/bin/activate

    # Install the client packages with pip
    pip install awscli awscli-plugin-endpoint

    Configuring the installed client requires two files ~/.aws/credentials and ~/.aws/config. These are defined as follows:

    $ cat ~/.aws/credentials

    [default]
    aws_access_key_id = <access_key>
    aws_secret_access_key = <secret_key>

    $ cat ~/.aws/config

    [plugins]
    endpoint = awscli_plugin_endpoint

    [profile default]
    region = iad3
    s3 =
    endpoint_url = <cloud_ip_or_url>:6780
    signature_version = s3v4
    s3api =
    endpoint_url = <cloud_ip_or_url>:6780

    Basic AWS client usage

    List existing containers (buckets):

    aws --profile default s3 ls

    Create a new bucket:

    aws s3api create-bucket --bucket test-bucket

    Upload a local file to your cloud:

    $ aws --profile default s3 cp test-file-up.txt s3://test-bucket/
    upload: ./test-file-up.txt to s3://test-bucket/test-file-up.txt

    Download an object from your cloud:

    $ aws --profile default s3 cp s3://test-bucket/test-file-down.txt test-file-down.txt 
    download: s3://test-bucket/test-file-down.txt to ./test-file-down.txt

    Delete an object:

    $ aws --profile default s3 rm s3://test-bucket/test-file-down.txt 
    delete: s3://test-bucket/test-file-down.txt

    Delete an empty bucket:

    $ aws --profile default s3 rb s3://test-bucket
    remove_bucket: test-bucket

    Reference

    - + \ No newline at end of file diff --git a/operators-manual/day-1/command-line/create-ssh-key/index.html b/operators-manual/day-1/command-line/create-ssh-key/index.html index f0b14a7a..7d6924d7 100644 --- a/operators-manual/day-1/command-line/create-ssh-key/index.html +++ b/operators-manual/day-1/command-line/create-ssh-key/index.html @@ -22,7 +22,7 @@ guides, this public key is injected into the instance created within How to Create an Instance in OpenStack Horizon

    Conclusion

    This concludes the steps needed to create an SSH key pair for one of your cloud's control plane nodes.

    - + \ No newline at end of file diff --git a/operators-manual/day-1/command-line/openstackclient/index.html b/operators-manual/day-1/command-line/openstackclient/index.html index 0b3d1d7f..be7d2308 100644 --- a/operators-manual/day-1/command-line/openstackclient/index.html +++ b/operators-manual/day-1/command-line/openstackclient/index.html @@ -70,7 +70,7 @@ output to /etc/bash_completion.d/osc.bash_completion using tee:

    openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null

    Next, either log out and back in to your shell or use source to load the autocompletion script for your current shell.

    For example:

    source /etc/bash_completion.d/osc.bash_completion

    Reference

    OpenStack Victoria OpenStackClient Documentation

    - + \ No newline at end of file diff --git a/operators-manual/day-1/horizon/create-first-instance/index.html b/operators-manual/day-1/horizon/create-first-instance/index.html index 16434640..e589034b 100644 --- a/operators-manual/day-1/horizon/create-first-instance/index.html +++ b/operators-manual/day-1/horizon/create-first-instance/index.html @@ -131,7 +131,7 @@ so there's no need to change anything here.
  • Port to be associated: Select the instance created previously. In this case, we use the Jumpstation instance.
  • This concludes allocating the floating IP to the instance. This instance is now accessible over SSH from the first hardware node of your cloud.

    - + \ No newline at end of file diff --git a/operators-manual/day-1/horizon/create-user-project/index.html b/operators-manual/day-1/horizon/create-user-project/index.html index 0541d9e2..effdaba5 100644 --- a/operators-manual/day-1/horizon/create-user-project/index.html +++ b/operators-manual/day-1/horizon/create-user-project/index.html @@ -62,7 +62,7 @@ and your user can be seen at the top right of Horizon.

    image

    Figure 10: New User Login

    For the rest of this guide, we assume you are working out of the newly created project and using the user associated with it.

    Reference

    OpenStack Victoria Horizon Administrator Guide

    - + \ No newline at end of file diff --git a/operators-manual/day-1/horizon/getting-started-with-horizon/index.html b/operators-manual/day-1/horizon/getting-started-with-horizon/index.html index a66eb9d2..e53c5e3c 100644 --- a/operators-manual/day-1/horizon/getting-started-with-horizon/index.html +++ b/operators-manual/day-1/horizon/getting-started-with-horizon/index.html @@ -33,7 +33,7 @@ previous section. The username for the Horizon administrator account is admin.

    Horizon login

    Figure 5: Horizon Login Page

    When you log in to Horizon, your dashboard appears similar to the following:

    Successful login

    Figure 6: Horizon Dashboard

    - + \ No newline at end of file diff --git a/operators-manual/day-1/horizon/images/index.html b/operators-manual/day-1/horizon/images/index.html index 5d38da85..4f82fcd7 100644 --- a/operators-manual/day-1/horizon/images/index.html +++ b/operators-manual/day-1/horizon/images/index.html @@ -45,7 +45,7 @@ your snapshot.

    image

    Figure 4: Create Snapshot Button Within Horizon Dashboard

    Once your snapshot has been created, the snapshot name is added to your list of images. You can use this image to launch new instances.

    Reference

    OpenStack Victoria Glance Documentation

    - + \ No newline at end of file diff --git a/operators-manual/day-1/intro-to-openmetal-private-cloud/index.html b/operators-manual/day-1/intro-to-openmetal-private-cloud/index.html index dc55e7ab..117bba5e 100644 --- a/operators-manual/day-1/intro-to-openmetal-private-cloud/index.html +++ b/operators-manual/day-1/intro-to-openmetal-private-cloud/index.html @@ -51,7 +51,7 @@ to see all request tickets submitted under your user account. If you are the administrator of the Private Cloud, you will be able to see all support requests regarding your Private Cloud.

    - + \ No newline at end of file diff --git a/operators-manual/day-2/check-ceph-status-disk-usage/index.html b/operators-manual/day-2/check-ceph-status-disk-usage/index.html index 51711cd9..9e928a8a 100644 --- a/operators-manual/day-2/check-ceph-status-disk-usage/index.html +++ b/operators-manual/day-2/check-ceph-status-disk-usage/index.html @@ -16,7 +16,7 @@ administrator of this cloud, we aim to provide you information about how you can check the status of your Ceph cluster and see available disk usage using the command line.

    Prerequisites

    • Root access to your cloud's control plane nodes

    Check Ceph Status

    To check the status of your Ceph cluster, use ceph status.

    For example:

    # ceph status
    cluster:
    id: 34fa49b3-fff8-4702-8b17-4e8d873c845f
    health: HEALTH_OK

    services:
    mon: 3 daemons, quorum relaxed-flamingo,focused-capybara,lovely-ladybug (age 2w)
    mgr: relaxed-flamingo(active, since 2w), standbys: focused-capybara, lovely-ladybug
    osd: 4 osds: 4 up (since 3d), 4 in (since 3d)
    rgw: 3 daemons active (focused-capybara.rgw0, lovely-ladybug.rgw0, relaxed-flamingo.rgw0)

    task status:

    data:
    pools: 13 pools, 337 pgs
    objects: 69.28k objects, 250 GiB
    usage: 724 GiB used, 11 TiB / 12 TiB avail
    pgs: 337 active+clean

    io:
    client: 121 KiB/s rd, 1.2 MiB/s wr, 137 op/s rd, 232 op/s wr

    Check Ceph Disk Usage

    To check the available disk space in your Ceph cluster, use ceph df.

    For example:

    # ceph df
    --- RAW STORAGE ---
    CLASS SIZE AVAIL USED RAW USED %RAW USED
    ssd 12 TiB 11 TiB 720 GiB 724 GiB 6.08
    TOTAL 12 TiB 11 TiB 720 GiB 724 GiB 6.08

    --- POOLS ---
    POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
    device_health_metrics 1 1 286 KiB 4 858 KiB 0 3.4 TiB
    images 2 32 7.6 GiB 1.02k 23 GiB 0.22 3.4 TiB
    volumes 3 32 88 GiB 23.61k 264 GiB 2.45 3.4 TiB
    vms 4 32 144 GiB 39.92k 433 GiB 3.96 3.4 TiB
    backups 5 32 0 B 0 0 B 0 3.4 TiB
    metrics 6 32 25 MiB 4.49k 127 MiB 0 3.4 TiB
    manila_data 7 32 0 B 0 0 B 0 3.4 TiB
    manila_metadata 8 32 0 B 0 0 B 0 3.4 TiB
    .rgw.root 9 32 3.6 KiB 8 96 KiB 0 3.4 TiB
    default.rgw.log 10 32 3.4 KiB 207 384 KiB 0 3.4 TiB
    default.rgw.control 11 32 0 B 8 0 B 0 3.4 TiB
    default.rgw.meta 12 8 954 B 4 36 KiB 0 3.4 TiB
    default.rgw.buckets.index 13 8 2.2 MiB 11 6.6 MiB 0 3.4 TiB
    - + \ No newline at end of file diff --git a/operators-manual/day-2/introduction-to-ceph/index.html b/operators-manual/day-2/introduction-to-ceph/index.html index 61d9c523..e7105132 100644 --- a/operators-manual/day-2/introduction-to-ceph/index.html +++ b/operators-manual/day-2/introduction-to-ceph/index.html @@ -51,7 +51,7 @@ your Account Manager should this apply to you.

    Your Ceph cluster was deployed using Ceph Ansible. Any configuration changes must be made using Ceph Ansible. For more information, see How to Prepare and Use Ceph Ansible

    - + \ No newline at end of file diff --git a/operators-manual/day-2/live-migrate-instances/index.html b/operators-manual/day-2/live-migrate-instances/index.html index 1d305982..955b33f6 100644 --- a/operators-manual/day-2/live-migrate-instances/index.html +++ b/operators-manual/day-2/live-migrate-instances/index.html @@ -31,7 +31,7 @@ made your selection, click submit. During the live migration process, a status bar appears under tasks, upon completion, the task returns to none and your instance host changes to a new node.

    image

    Figure 5: Task list for migrating instances

    - + \ No newline at end of file diff --git a/operators-manual/day-2/maintenance/index.html b/operators-manual/day-2/maintenance/index.html index 7dd9b346..ca51a839 100644 --- a/operators-manual/day-2/maintenance/index.html +++ b/operators-manual/day-2/maintenance/index.html @@ -39,7 +39,7 @@ ceph status.

    For example:

    [root@smiling-pelican ~]# ceph status
    cluster:
    id: 06bf4555-7c0c-4b96-a3b7-502bf8f6f213
    health: HEALTH_OK
    [...output truncated...]

    The above output shows the status as HEALTH_OK, indicating the Ceph cluster is healthy. Ceph is naturally resilient and should recover from a node being rebooted.

    - + \ No newline at end of file diff --git a/operators-manual/day-2/manage-node-firewall-with-iptables/index.html b/operators-manual/day-2/manage-node-firewall-with-iptables/index.html index e4e19a9f..cf687828 100644 --- a/operators-manual/day-2/manage-node-firewall-with-iptables/index.html +++ b/operators-manual/day-2/manage-node-firewall-with-iptables/index.html @@ -34,7 +34,7 @@ by attempting to access services (SSH etc) from both a whitelisted IP and an undefined network.

  • Once confirmed, enable auto-start for both services.

    [root@exhilarated-firefly ~]# systemctl enable ipset
    Created symlink /etc/systemd/system/basic.target.wants/ipset.service → /usr/lib/systemd/system/ipset.service.
    [root@exhilarated-firefly ~]# systemctl enable iptables
    Created symlink /etc/systemd/system/multi-user.target.wants/iptables.service → /usr/lib/systemd/system/iptables.service.
    [root@exhilarated-firefly ~]#
  • Copy the newly created files to the root users home folder accross the rest of the nodes.

    [root@exhilarated-firefly ~]# for node in $(awk '!/localhost/&&/local/{print$NF}' /etc/hosts| grep -v $(hostname -s)); do echo "scp -i .ssh/fm-deploy /etc/sysconfig/ipset $node:~/new-ipset && scp -i .ssh/fm-deploy /etc/sysconfig/iptables $node:~/new-iptables"; done
    scp -i .ssh/fm-deploy /etc/sysconfig/ipset gifted-wildcat:~/new-ipset && scp -i .ssh/fm-deploy /etc/sysconfig/iptables gifted-wildcat:~/new-iptables
    scp -i .ssh/fm-deploy /etc/sysconfig/ipset upbeat-peacock:~/new-ipset && scp -i .ssh/fm-deploy /etc/sysconfig/iptables upbeat-peacock:~/new-iptables
    [root@exhilarated-firefly ~]#
  • Deploy to the remaining nodes.

  • - + \ No newline at end of file diff --git a/operators-manual/day-2/private-cloud-deployment-overview/index.html b/operators-manual/day-2/private-cloud-deployment-overview/index.html index dbeb8769..082d4d7e 100644 --- a/operators-manual/day-2/private-cloud-deployment-overview/index.html +++ b/operators-manual/day-2/private-cloud-deployment-overview/index.html @@ -41,7 +41,7 @@ recognized as blocks.

    Advantages of using Ceph

    • Data is self-healing and will redistribute data across your cluster in the event of power, hardware, or connectivity issues
    • Data is replicated and highly available
    • Ceph has the ability to run on commodity hardware and to mix hardware from different vendors
    - + \ No newline at end of file diff --git a/operators-manual/day-3/add-provider-ips/index.html b/operators-manual/day-3/add-provider-ips/index.html index e1b26eed..d4b5fef4 100644 --- a/operators-manual/day-3/add-provider-ips/index.html +++ b/operators-manual/day-3/add-provider-ips/index.html @@ -27,7 +27,7 @@ "External" network.

    image

    Figure 1: List of Networks

    Choose the Subnets tab and confirm that the new subnet has been added.

    image

    Figure 2: List of Subnets for the External network

    The new subnet will be prefixed with Internet_ and will have a series of hex values following it.

    How are the new Provider Block IPs Used?

    When creating a resource that requires an IP from this block, specify the name of the newly added provider block to add IPs from it.

    - + \ No newline at end of file diff --git a/operators-manual/day-3/add-remove-hardware-nodes/index.html b/operators-manual/day-3/add-remove-hardware-nodes/index.html index d7f4e57c..f62a5b92 100644 --- a/operators-manual/day-3/add-remove-hardware-nodes/index.html +++ b/operators-manual/day-3/add-remove-hardware-nodes/index.html @@ -78,7 +78,7 @@ the request through OpenMetal Central. Specify the hostname or IP address of the node you wish to be removed and an agent will review the request.

    - + \ No newline at end of file diff --git a/operators-manual/day-3/backup-restore-openstack-databases/index.html b/operators-manual/day-3/backup-restore-openstack-databases/index.html index 878be588..8d565581 100644 --- a/operators-manual/day-3/backup-restore-openstack-databases/index.html +++ b/operators-manual/day-3/backup-restore-openstack-databases/index.html @@ -72,7 +72,7 @@ place.

    In the Docker container, run:

    cd /backup/
    rm -rf /backup/restore
    mkdir -p /backup/restore/full
    mkdir -p /backup/restore/incremental
    gunzip mysqlbackup-10-12-2021-1639166052.qp.xbc.xbs.gz
    gunzip incremental-20-mysqlbackup-10-12-2021-1639169695.qp.xbc.xbs.gz
    mbstream -x -C /backup/restore/full/ < mysqlbackup-10-12-2021-1639166052.qp.xbc.xbs
    mbstream -x -C /backup/restore/incremental/ < incremental-20-mysqlbackup-10-12-2021-1639169695.qp.xbc.xbs
    mariabackup --prepare --target-dir=/backup/restore/full/
    mariabackup --prepare --target-dir=/backup/restore/full/ --incremental-dir=/backup/restore/incremental/

    Load another shell session for the node in which you are working and stop the MariaDB Docker container:

    docker stop mariadb

    Navigate back to the Docker container and run:

    rm -rf /var/lib/mysql/*
    rm -rf /var/lib/mysql/\.[^/.]*
    mariabackup --copy-back --target-dir /backup/restore/full/

    Next, navigate back to the other shell and start the MariaDB Docker container:

    docker start mariadb

    Examine MariaDB's logs to confirm the Galera cluster has synchronized:

    # tail -1 /var/log/kolla/mariadb/mariadb.log
    2021-12-08 22:27:39 2 [Note] WSREP: Synchronized with group, ready for
    connections

    References

    - + \ No newline at end of file diff --git a/operators-manual/day-3/create-openstack-service-backups/index.html b/operators-manual/day-3/create-openstack-service-backups/index.html index 53d5d282..402a08ce 100644 --- a/operators-manual/day-3/create-openstack-service-backups/index.html +++ b/operators-manual/day-3/create-openstack-service-backups/index.html @@ -51,7 +51,7 @@ first ensure you have prepared a Kolla Ansible environment

    Next, use Kolla Ansible's reconfigure function, targeting only the Neutron service by using the flag --tags neutron and limit the run to the host relaxed-flamingo by specifying the flag --limit control[0].

    For example:

    kolla-ansible \
    -i /etc/fm-deploy/kolla-ansible-inventory \
    reconfigure \
    --tags neutron \
    --limit control[0]
    - + \ No newline at end of file diff --git a/operators-manual/day-3/create-volume-backups/index.html b/operators-manual/day-3/create-volume-backups/index.html index 2d79b8d5..59682330 100644 --- a/operators-manual/day-3/create-volume-backups/index.html +++ b/operators-manual/day-3/create-volume-backups/index.html @@ -41,7 +41,7 @@ well as outside of it increases the failure domain.

    With Ceph you can use RBD mirroring, which effectively is a way to mirror your Ceph cluster's data to another Ceph cluster.

    - + \ No newline at end of file diff --git a/operators-manual/day-3/disaster-recovery/index.html b/operators-manual/day-3/disaster-recovery/index.html index 0efa56ca..25737d3d 100644 --- a/operators-manual/day-3/disaster-recovery/index.html +++ b/operators-manual/day-3/disaster-recovery/index.html @@ -69,7 +69,7 @@ contact your Account Manager or submit a support ticket in OpenMetal Central.

    Contact Support

    If you are experiencing hardware failure or any other issues with your Open Metal Private Cloud, Contact Support.

    Additional Reading

    For more regarding OpenStack Disaster Recovery, see:

    - + \ No newline at end of file diff --git a/operators-manual/day-4/automation/heat/index.html b/operators-manual/day-4/automation/heat/index.html index 069ee166..06acd2a0 100644 --- a/operators-manual/day-4/automation/heat/index.html +++ b/operators-manual/day-4/automation/heat/index.html @@ -73,7 +73,7 @@ Additionally, OpenStack has a guide for creating your first stack through heat that can be found at Creating your first stack.

    - + \ No newline at end of file diff --git a/operators-manual/day-4/automation/terraform/index.html b/operators-manual/day-4/automation/terraform/index.html index 00fa5eb8..61a2c21d 100644 --- a/operators-manual/day-4/automation/terraform/index.html +++ b/operators-manual/day-4/automation/terraform/index.html @@ -66,7 +66,7 @@ Cloud.

    View Instance Created by Terraform

    To view your created instance, navigate in Horizon to Project -> Compute -> Instances, where you can view the instance created by Terraform.

    image

    Figure 2: Newly Created Terraform Instance

    - + \ No newline at end of file diff --git a/operators-manual/day-4/ceph-ansible/prepare-ceph-ansible/index.html b/operators-manual/day-4/ceph-ansible/prepare-ceph-ansible/index.html index f0364046..2fb15477 100644 --- a/operators-manual/day-4/ceph-ansible/prepare-ceph-ansible/index.html +++ b/operators-manual/day-4/ceph-ansible/prepare-ceph-ansible/index.html @@ -23,7 +23,7 @@ your Account Manager should this apply to you.

    Prerequisites

    Root Access to OpenStack Control Plane

    Root access to your cloud's control plane nodes is required.

    Path to the Ceph Configuration Files

    New Clouds

    On clouds provisioned after Dec 2022 you will need to open a support ticket to have the configuration saved to your nodes.

    • Ceph Ansible Inventory
      • /etc/fm-deploy/ceph-inventory.yml
    • Ceph Ansible Config
      • /opt/ceph-ansible/group_vars/all.yml

    Preparation

    To prepare Ceph Ansible:

    docker cp fm-deploy:/opt/ceph-ansible /opt/ceph-ansible
    chmod 700 /opt/ceph-ansible
    cd /opt/ceph-ansible
    virtualenv .venv
    source .venv/bin/activate
    pip install -r requirements.txt
    pip install six

    Deploy a Ceph Cluster:

    ansible-playbook \
    -i /etc/fm-deploy/ceph-inventory.yml \
    --private-key /root/.ssh/fm-deploy \
    /opt/ceph-ansible/site.yml

    Attempt to repair a broken Ceph cluster:

    ansible-playbook \
    -i /etc/fm-deploy/ceph-inventory.yml \
    --private-key /root/.ssh/fm-deploy \
    /opt/ceph-ansible/site.yml
    - + \ No newline at end of file diff --git a/operators-manual/day-4/kolla-ansible/enable-elk/index.html b/operators-manual/day-4/kolla-ansible/enable-elk/index.html index 4ec364cf..725c5344 100644 --- a/operators-manual/day-4/kolla-ansible/enable-elk/index.html +++ b/operators-manual/day-4/kolla-ansible/enable-elk/index.html @@ -30,7 +30,7 @@ to prevent your cloud's disks from filling up.

    Reference

    Kolla Ansible's Central Logging guide.

    - + \ No newline at end of file diff --git a/operators-manual/day-4/kolla-ansible/enable-tls/index.html b/operators-manual/day-4/kolla-ansible/enable-tls/index.html index a4b54888..096d10bc 100644 --- a/operators-manual/day-4/kolla-ansible/enable-tls/index.html +++ b/operators-manual/day-4/kolla-ansible/enable-tls/index.html @@ -80,7 +80,7 @@ for Swift needs to include the HTTPS protocol.

    In the file ./group_vars/all.yml, ensure the line with rgw keystone url: now specifies HTTPS instead of HTTP. For example:

    rgw keystone url: https://173.231.254.164:5000

    Next, run Ceph Ansible, using:

    ansible-playbook \
    -i /etc/fm-deploy/ceph-inventory.yml \
    --private-key /root/.ssh/fm-deploy \
    /opt/ceph-ansible/site.yml

    Reference

    TLS Documentation from Kolla Ansible for OpenStack Victoria

    - + \ No newline at end of file diff --git a/operators-manual/day-4/kolla-ansible/prepare-kolla-ansible/index.html b/operators-manual/day-4/kolla-ansible/prepare-kolla-ansible/index.html index 4c9af41a..3400c17f 100644 --- a/operators-manual/day-4/kolla-ansible/prepare-kolla-ansible/index.html +++ b/operators-manual/day-4/kolla-ansible/prepare-kolla-ansible/index.html @@ -79,7 +79,7 @@ Central Logging with an ELK stack:

    - + \ No newline at end of file diff --git a/operators-manual/day-4/security/security-best-practices/index.html b/operators-manual/day-4/security/security-best-practices/index.html index d0b4c62e..515be2dc 100644 --- a/operators-manual/day-4/security/security-best-practices/index.html +++ b/operators-manual/day-4/security/security-best-practices/index.html @@ -105,7 +105,7 @@ contact visit OpenStack Security.

    A list of current Security Advisories for OpenStack can be found Here.

    For a more in-depth look into current best practices with OpenStack see OpenStack Security Guide.

    - + \ No newline at end of file diff --git a/operators-manual/day-4/troubleshooting/ceph/index.html b/operators-manual/day-4/troubleshooting/ceph/index.html index 58fe250b..7c82e20b 100644 --- a/operators-manual/day-4/troubleshooting/ceph/index.html +++ b/operators-manual/day-4/troubleshooting/ceph/index.html @@ -41,7 +41,7 @@ skew issues.

    References

    - + \ No newline at end of file diff --git a/operators-manual/day-4/troubleshooting/log-filtering/index.html b/operators-manual/day-4/troubleshooting/log-filtering/index.html index fdc5dc00..49b176e6 100644 --- a/operators-manual/day-4/troubleshooting/log-filtering/index.html +++ b/operators-manual/day-4/troubleshooting/log-filtering/index.html @@ -41,7 +41,7 @@ example of a terminal multiplexer.

    Viewing Logs

    There are several native ways to view the contents of a log file.

    Applications like less, nano, and vim will suffice. For more colorful output, consider using an application like lnav, which has proven especially useful for examining unfamiliar logs.

    - + \ No newline at end of file diff --git a/operators-manual/day-4/troubleshooting/rabbitmq/index.html b/operators-manual/day-4/troubleshooting/rabbitmq/index.html index 63685f35..5e898e33 100644 --- a/operators-manual/day-4/troubleshooting/rabbitmq/index.html +++ b/operators-manual/day-4/troubleshooting/rabbitmq/index.html @@ -34,7 +34,7 @@ functioning again. Kolla Ansible is used to redeploy a cloud's RabbitMQ cluster. For more, see How to Redeploy RabbitMQ Cluster using Kolla Ansible.

    - + \ No newline at end of file diff --git a/operators-manual/day-4/troubleshooting/redeploy-rabbitmq/index.html b/operators-manual/day-4/troubleshooting/redeploy-rabbitmq/index.html index bff47d04..7ba1312f 100644 --- a/operators-manual/day-4/troubleshooting/redeploy-rabbitmq/index.html +++ b/operators-manual/day-4/troubleshooting/redeploy-rabbitmq/index.html @@ -17,7 +17,7 @@ time you work with Kolla Ansible, you must prepare a shell environment. For more, see How to Prepare and Use Kolla Ansible.

    All commands are to be executed from the control plane node in which Kolla Ansible has been prepared.

    Root Access to OpenStack Control Plane

    Root access to your cloud's control plane nodes is required.

    How to Redeploy RabbitMQ

    For each RabbitMQ cluster member, run:

    docker stop rabbitmq
    cp -Rv /var/lib/docker/volumes/rabbitmq/_data/mnesia{,.bk$(date +%F)}
    rm -rfv /var/lib/docker/volumes/rabbitmq/_data/mnesia/

    Then, use Kolla Ansible's deploy function, targeting RabbitMQ:

    kolla-ansible -i /etc/fm-deploy/kolla-ansible-inventory deploy --tags rabbitmq
    - + \ No newline at end of file diff --git a/operators-manual/day-4/watcher/watcher-demo/index.html b/operators-manual/day-4/watcher/watcher-demo/index.html index b3933788..db79a93c 100644 --- a/operators-manual/day-4/watcher/watcher-demo/index.html +++ b/operators-manual/day-4/watcher/watcher-demo/index.html @@ -44,7 +44,7 @@ with three hypervisors instead of four. Instances were live migrated to free up a single compute host. Then, Watcher disabled the compute service for the freed node.

    - + \ No newline at end of file diff --git a/operators-manual/index.html b/operators-manual/index.html index cf790530..70ed92cd 100644 --- a/operators-manual/index.html +++ b/operators-manual/index.html @@ -43,7 +43,7 @@ solutions. Finally we briefly cover automation techniques possible in your cloud through the use of OpenStack's Heat service and through HashiCorp's Terraform application.

    1. How to Prepare and Use Kolla Ansible
    2. Enable TLS Using Kolla Ansible
    3. Enable ElasticSearch and Kibana Logging using Kolla Ansible
    4. Security and Your OpenMetal Private Cloud
    5. How to Prepare and Use Ceph Ansible
    6. OpenStack Watcher Demonstration
    7. Troubleshooting RabbitMQ
    8. How to Redeploy RabbitMQ using Kolla Ansible
    9. Troubleshooting Ceph
    10. Guidelines for Searching through Logs
    11. OpenStack Automation through Heat
    12. Automate Infrastructure using Terraform
    - + \ No newline at end of file diff --git a/terraform/configure-terraform-to-automate-openstack-resources/index.html b/terraform/configure-terraform-to-automate-openstack-resources/index.html index 436e251e..09eb153c 100644 --- a/terraform/configure-terraform-to-automate-openstack-resources/index.html +++ b/terraform/configure-terraform-to-automate-openstack-resources/index.html @@ -27,7 +27,7 @@ Hosted Private Cloud. It is on-demand and billed by the hour. Combine the power of on demand OpenStack and Terraform to create massively scaled deployments with ease.

    - + \ No newline at end of file diff --git a/tutorials/backing-up-important-data/index.html b/tutorials/backing-up-important-data/index.html index 760ff86c..10591477 100644 --- a/tutorials/backing-up-important-data/index.html +++ b/tutorials/backing-up-important-data/index.html @@ -67,7 +67,7 @@ done by restoring the most recently archived backup into a development environment identical to production, allowing you to confirm everything will function as expected if disaster recovery is ever required.

    - + \ No newline at end of file diff --git a/tutorials/backup-vm-data-to-ceph-with-swift-s3api/index.html b/tutorials/backup-vm-data-to-ceph-with-swift-s3api/index.html index 691c6b4f..d97047db 100644 --- a/tutorials/backup-vm-data-to-ceph-with-swift-s3api/index.html +++ b/tutorials/backup-vm-data-to-ceph-with-swift-s3api/index.html @@ -29,7 +29,7 @@ subdirectory:

    rclone sync /home/ubuntu/ backup-demo:$(hostname)/${USER}/ \
    --exclude="/home/ubuntu/doc_root/wp-content/cache" \
    --backup-dir backup-demo:$(hostname)/$(date -u +%Y-%m-%dT%H:%MZ)/ \
    --update

    Conclusion

    While we have only touched on a very basic backup scenario, the tooling and concepts presented should provide a foundation for you to explore, plan and implement a backup strategy to fit your needs.

    Reference

    - + \ No newline at end of file diff --git a/tutorials/create-site-to-site-vpn/index.html b/tutorials/create-site-to-site-vpn/index.html index 08f290f5..d644f9ad 100644 --- a/tutorials/create-site-to-site-vpn/index.html +++ b/tutorials/create-site-to-site-vpn/index.html @@ -26,7 +26,7 @@ vpn-east) to site west (peer IP address 200.225.46.189) defining the local (subnet-ep-east) and peer (peer-ep-east) endpoint groups.

    $ openstack vpn ipsec site connection create conn-east \
    --vpnservice vpn-east \
    --ikepolicy ikepolicy1 \
    --ipsecpolicy ipsecpolicy1 \
    --peer-address 200.225.46.189 \
    --peer-id 200.225.46.189 \
    --local-endpoint-group subnet-ep-east \
    --peer-endpoint-group peer-ep-east \
    --psk secret 2>/dev/null
    +--------------------------+----------------------------------------------------+
    | Field | Value |
    +--------------------------+----------------------------------------------------+
    | Authentication Algorithm | psk |
    | Description | |
    | ID | 324290df-f959-4a52-945f-a3c77466fb01 |
    | IKE Policy | 2274a56d-c42f-4e60-bdab-a30e572a4baf |
    | IPSec Policy | 64eebf8d-7f79-4118-8b9f-0254a9e487d5 |
    | Initiator | bi-directional |
    | Local Endpoint Group ID | 1b0a3828-1086-411c-bbfc-96195e76b6ba |
    | Local ID | |
    | MTU | 1500 |
    | Name | conn-east |
    | Peer Address | 200.225.46.189 |
    | Peer CIDRs | |
    | Peer Endpoint Group ID | a9f46708-ed80-4a83-b4d4-f7d98bc897e7 |
    | Peer ID | 200.225.46.189 |
    | Pre-shared Key | secret |
    | Project | e6fa12aa82f942f199a8cd6f3ee183d1 |
    | Route Mode | static |
    | State | True |
    | Status | PENDING_CREATE |
    | VPN Service | 7df8bf8c-03e3-4885-89f3-e6cb3f5d16e4 |
    | dpd | {'action': 'hold', 'interval': 30, 'timeout': 120} |
    | project_id | e6fa12aa82f942f199a8cd6f3ee183d1 |
    +--------------------------+----------------------------------------------------+

    Step Seven: Test Site-to-Site Connection

    Create instances connected to subnet-west and subnet-east and confirm they can communicate via ICMP echo requests to local IPs.

    - + \ No newline at end of file diff --git a/tutorials/ephemeral-storage/index.html b/tutorials/ephemeral-storage/index.html index b13e3a35..71c22b63 100644 --- a/tutorials/ephemeral-storage/index.html +++ b/tutorials/ephemeral-storage/index.html @@ -53,7 +53,7 @@ ( / ).

    The application fio is used to test disk performance.

    The tests performed are:

    • Random disk reads and writes
    • Sequential disk reads and writes

    Results

    Above each table are the parameters passed to fio.

    These tests were performed within a Private Cloud Core - Small using an additional Compute - Large node.

    rw=randread bs=4k size=2g numjobs=1 iodepth=2 runtime=60 end_fsync=1 ioengine=posixaio

    Random ReadsBandwidth (MiB/s)IOPS (average)
    LVM Ephemeral34.48819
    Ceph8.52125

    rw=randwrite bs=4k size=2g numjobs=1 iodepth=2 runtime=60 end_fsync=1 ioengine=posixaio

    Random WritesBandwidth (MiB/s)IOPS (average)
    LVM Ephemeral28572998
    Ceph94.139305

    rw=read bs=4k size=2g numjobs=1 iodepth=2 runtime=60 end_fsync=1 ioengine=posixaio

    Sequential ReadsBandwidth (MiB/s)IOPS (average)
    LVM Ephemeral30283314
    Ceph19750210

    rw=write bs=4k size=2g numjobs=1 iodepth=2 runtime=60 end_fsync=1 ioengine=posixaio

    Sequential WritesBandwidth (MiB/s)IOPS (average)
    LVM Ephemeral29476620
    Ceph10440244
    - + \ No newline at end of file diff --git a/tutorials/getting-started-with-ceilometer-and-gnocchi/index.html b/tutorials/getting-started-with-ceilometer-and-gnocchi/index.html index dc446fc4..b11be269 100644 --- a/tutorials/getting-started-with-ceilometer-and-gnocchi/index.html +++ b/tutorials/getting-started-with-ceilometer-and-gnocchi/index.html @@ -48,7 +48,7 @@ Alarms, from the OpenStack documentation page. Setting up alarms or causing specific actions to occur is beyond the scope of this guide.

    - + \ No newline at end of file diff --git a/tutorials/glance-trim-support/index.html b/tutorials/glance-trim-support/index.html index a1ec05bb..c18badd6 100644 --- a/tutorials/glance-trim-support/index.html +++ b/tutorials/glance-trim-support/index.html @@ -32,7 +32,7 @@ “+” next to “Disk Bus” and “SCSI Model

    create-image-metadata.jpg

    Adding fstrim support to an existing image

    Via the OpenStack CLI

    openstack image set <IMAGE_NAME_OR_UUID> \
    --property hw_scsi_model=virtio-scsi \
    --property hw_disk_bus=scsi

    Using Horizon (Web GUI)

    First, select the drop down on the image you want to modify then select “Update Metadata

    select-update-metadata.jpg

    Next, on the left hand tab under “libvirt Driver Options for Images” and select the “+” next to “Disk Bus” and “SCSI Model

    update-image-metadata.jpg

    - + \ No newline at end of file diff --git a/tutorials/https-loadbalancer/index.html b/tutorials/https-loadbalancer/index.html index ca3e393b..497be996 100644 --- a/tutorials/https-loadbalancer/index.html +++ b/tutorials/https-loadbalancer/index.html @@ -16,7 +16,7 @@ directory like so:

    $ ls cert
    ca-certs.pem server.crt server.key

    Procedure

    Create a copy of the certificate in PKCS#12 format using openssl:

    openssl \
    pkcs12 -export \
    -inkey server.key \
    -in server.crt \
    -certfile ca-chain.crt \
    -passout pass: \
    -out server.p12

    Store the SSL certificate as a secret using Barbican:

    openstack secret store \
    --name='tls_secret1' \
    -t 'application/octet-stream' \
    -e 'base64' \
    --payload="$(base64 < server.p12)"

    Create the load balancer and ensure it is on the same network as your VMs:

    openstack loadbalancer create \
    --name lb1 \
    --vip-subnet-id private_net_1

    Create a listener with protocol TERMINATED_HTTPS, listening on port 443, using the certificate secret uploaded earlier:

    openstack loadbalancer listener create \
    lb1 \
    --protocol TERMINATED_HTTPS \
    --protocol-port 443 \
    --name listener1 \
    --default-tls-container-ref $(openstack secret list | awk '/ tls1 / {print $2}')

    Create a ROUND_ROBIN pool using the HTTP protocol:

    openstack loadbalancer pool create \
    --name pool1 \
    --lb-algorithm ROUND_ROBIN \
    --listener listener1 \
    --protocol HTTP

    Create 2 members using the appropriate IPs for your VMs:

    openstack loadbalancer member create \
    --subnet-id private_net_1 \
    --address 10.0.0.250 \
    --protocol-port 80 \
    pool1
    openstack loadbalancer member create \
    --subnet-id private_net_1 \
    --address 10.0.0.57 \
    --protocol-port 80 \
    pool1

    Associate a floating IP to the VIP address of the load balancer:

    openstack floating ip set --port d77f97aa-9d33-40c1-b191-1ca549a95075 173.231.202.91

    Show the details of the load balancer:

    $ openstack loadbalancer show lb1
    +---------------------+--------------------------------------+
    | Field | Value |
    +---------------------+--------------------------------------+
    | admin_state_up | True |
    | availability_zone | None |
    | created_at | 2023-10-10T14:41:53 |
    | description | |
    | flavor_id | None |
    | id | 5028cbc9-1c72-4873-b423-ffcb1c2f1887 |
    | listeners | 75e82ae6-f1a4-43e3-a7ea-9909afc5ae59 |
    | name | lb1 |
    | operating_status | ONLINE |
    | pools | 6ae46b29-0d27-45f5-9d8e-23e2ef82fe84 |
    | project_id | 4993ac59480646b6b15ae7727279ca11 |
    | provider | amphora |
    | provisioning_status | ACTIVE |
    | updated_at | 2023-10-10T18:22:46 |
    | vip_address | 10.0.0.132 |
    | vip_network_id | 0d23b204-993b-4876-8604-a4d0ec76e6ad |
    | vip_port_id | d77f97aa-9d33-40c1-b191-1ca549a95075 |
    | vip_qos_policy_id | None |
    | vip_subnet_id | ad266069-6fa0-4c4d-830d-5d5b913279e2 |
    | tags | |
    +---------------------+--------------------------------------+

    Conclusion

    Update the DNS for your FQDN to point to the floating IP address and verify the load balancer works as expected.

    - + \ No newline at end of file diff --git a/tutorials/index.html b/tutorials/index.html index 3704d6da..9dd7fef5 100644 --- a/tutorials/index.html +++ b/tutorials/index.html @@ -11,7 +11,7 @@
    - + \ No newline at end of file diff --git a/tutorials/install-barbican/index.html b/tutorials/install-barbican/index.html index 9e708355..3b853915 100644 --- a/tutorials/install-barbican/index.html +++ b/tutorials/install-barbican/index.html @@ -16,7 +16,7 @@ and we'll ensure it's copied to the control plane nodes.

    Step 3 - Verify that the Barbican endpoints are there

    (.venv) [root@fierce-possum kolla]# openstack endpoint list --service barbican
    +----------------------------------+--------+--------------+--------------+---------+-----------+---------------------------+
    | ID | Region | Service Name | Service Type | Enabled | Interface | URL |
    +----------------------------------+--------+--------------+--------------+---------+-----------+---------------------------+
    | bbebfdc0f6ce4f9895d536785fa3cfea | iad3 | barbican | key-manager | True | public | http://200.225.44.4:9311 |
    | c7c9861c34ed44de8a3f65b9d74f80fa | iad3 | barbican | key-manager | True | admin | http://192.168.2.254:9311 |
    | da1d9afd031f470d80866256884ef242 | iad3 | barbican | key-manager | True | internal | http://192.168.2.254:9311 |
    +----------------------------------+--------+--------------+--------------+---------+-----------+---------------------------+

    Step 4 - Install the Barbican OpenStack CLI

    While in your virtual environment, install the OpenStack CLI library for secret storage.

    (.venv) [root@fierce-possum kolla]# pip install python-barbicanclient

    Step 5 - Create a test secret

    To validate Barbican's functionality, run the following command to create your first test secret.

    (.venv) [root@fierce-possum kolla]# openstack secret store --name my_secret --payload 'This is a secure statement'
    +---------------+--------------------------------------------------------------------------+
    | Field | Value |
    +---------------+--------------------------------------------------------------------------+
    | Secret href | http://200.225.44.4:9311/v1/secrets/973cffdb-d4b1-418c-befa-9f67d77a982b |
    | Name | my_secret |
    | Created | None |
    | Status | None |
    | Content types | None |
    | Algorithm | aes |
    | Bit length | 256 |
    | Secret type | opaque |
    | Mode | cbc |
    | Expiration | None |
    +---------------+--------------------------------------------------------------------------+
    - + \ No newline at end of file diff --git a/tutorials/intro-to-barbican/index.html b/tutorials/intro-to-barbican/index.html index c398b411..5268b2a5 100644 --- a/tutorials/intro-to-barbican/index.html +++ b/tutorials/intro-to-barbican/index.html @@ -24,7 +24,7 @@ volume, a new volume type in Cinder must be created. Create a new Encrypted Volume type using:

    openstack volume type create --encryption-provider luks --encryption-cipher aes-xts-plain64 --encryption-key-size 256 --encryption-control-location front-end encrypted

    Output:

    +-------------+-----------------------------------------------------------------------------------------------------------------------------------------------+
    | Field | Value |
    +-------------+-----------------------------------------------------------------------------------------------------------------------------------------------+
    | description | None |
    | encryption | cipher='aes-xts-plain64', control_location='front-end', encryption_id='bd8cc91f-877a-4c13-a0b1-65b236f0c3c6', key_size='256', provider='luks' |
    | id | 6677cb0d-b548-4eb5-b789-aadacfd6ec94 |
    | is_public | True |
    | name | encrypted |
    +-------------+-----------------------------------------------------------------------------------------------------------------------------------------------+

    Encrypted Volume

    Create an encrypted volume using:

    openstack volume create --size 10 --type encrypted --image 'Ubuntu 22.04 (Jammy)' --bootable ubuntu22

    Output:

    +---------------------+--------------------------------------+
    | Field | Value |
    +---------------------+--------------------------------------+
    | attachments | [] |
    | availability_zone | nova |
    | bootable | false |
    | consistencygroup_id | None |
    | created_at | 2023-10-05T19:37:09.563568 |
    | description | None |
    | encrypted | True |
    | id | dd4fc56c-d1f1-4700-add2-d06c0183dbe3 |
    | migration_status | None |
    | multiattach | False |
    | name | ubuntu22 |
    | properties | |
    | replication_status | None |
    | size | 10 |
    | snapshot_id | None |
    | source_volid | None |
    | status | creating |
    | type | encrypted |
    | updated_at | None |
    | user_id | 2d1fd7d55b4f49f3b5bde488b6bc949f |
    +---------------------+--------------------------------------+

    Spin up a VM with Encrypted Volume

    Using the encrypted volume created in the previous step, spin up a VM with it using:

    vol=dd4fc56c-d1f1-4700-add2-d06c0183dbe3
    openstack server create --flavor gen.small --volume $vol --network External --security-group "SSH ingress" --key-name key ubuntu22_encrypted

    Output:

    +-------------------------------------+---------------------------------------------+
    | Field | Value |
    +-------------------------------------+---------------------------------------------+
    | OS-DCF:diskConfig | MANUAL |
    | OS-EXT-AZ:availability_zone | |
    | OS-EXT-SRV-ATTR:host | None |
    | OS-EXT-SRV-ATTR:hypervisor_hostname | None |
    | OS-EXT-SRV-ATTR:instance_name | |
    | OS-EXT-STS:power_state | NOSTATE |
    | OS-EXT-STS:task_state | scheduling |
    | OS-EXT-STS:vm_state | building |
    | OS-SRV-USG:launched_at | None |
    | OS-SRV-USG:terminated_at | None |
    | accessIPv4 | |
    | accessIPv6 | |
    | addresses | |
    | adminPass | NWqPR38Qj48L |
    | config_drive | |
    | created | 2023-10-05T19:45:51Z |
    | flavor | gen.small (gen.small) |
    | hostId | |
    | id | ea82e509-7efa-44a7-ba2b-e6364f458958 |
    | image | N/A (booted from volume) |
    | key_name | key |
    | name | ubuntu22_encrypted |
    | progress | 0 |
    | project_id | 4993ac59480646b6b15ae7727279ca11 |
    | properties | |
    | security_groups | name='6ffcdf91-d8dd-4499-98e1-0860267ea08e' |
    | status | BUILD |
    | updated | 2023-10-05T19:45:51Z |
    | user_id | 2d1fd7d55b4f49f3b5bde488b6bc949f |
    | volumes_attached | |
    +-------------------------------------+---------------------------------------------+
    - + \ No newline at end of file diff --git a/tutorials/lb-with-octavia/index.html b/tutorials/lb-with-octavia/index.html index e413be17..436b5b2c 100644 --- a/tutorials/lb-with-octavia/index.html +++ b/tutorials/lb-with-octavia/index.html @@ -59,7 +59,7 @@ hostname of each server oscillate upon each refresh.


    image


    image


    Conclusion

    This concludes the guide on how to create a load balancer in OpenStack. Note this is a rudimentary example, but should be enough to get you acquainted with the process.

    - + \ No newline at end of file diff --git a/tutorials/live-migrate-instance/index.html b/tutorials/live-migrate-instance/index.html index bb22db53..2e83705c 100644 --- a/tutorials/live-migrate-instance/index.html +++ b/tutorials/live-migrate-instance/index.html @@ -22,7 +22,7 @@ the hardware nodes.

    Migration Status

    Back in the instance listing, you will see an indicator the instance is being migrated.

    If the migration succeeded you will see a different host under the Host column for the instance.

    - + \ No newline at end of file diff --git a/tutorials/magnum-and-kubernetes/index.html b/tutorials/magnum-and-kubernetes/index.html index 40821df0..418fde94 100644 --- a/tutorials/magnum-and-kubernetes/index.html +++ b/tutorials/magnum-and-kubernetes/index.html @@ -75,7 +75,7 @@ where <cluster-uuid> should be replaced with the actual UUID:

    openstack coe cluster resize <cluster-uuid> 3

    Troubleshooting

    This section will be filled as common failure scenarios and solutions are collected. For general troubleshooting see the Magnum Troubleshooting Guide from OpenStack's documentation.

    - + \ No newline at end of file diff --git a/tutorials/manage-flavors/index.html b/tutorials/manage-flavors/index.html index 9fc20de9..16e50389 100644 --- a/tutorials/manage-flavors/index.html +++ b/tutorials/manage-flavors/index.html @@ -25,7 +25,7 @@ down on the right, select the option Update Metadata.

    Figure 4

    Figure 4: Update Flavor Metadata

    Deleting a Flavor

    Flavors can also be deleted. This action is irreversible.

    How to Delete a Flavor

    To delete a flavor, first navigate to Admin -> Compute -> Flavors, and locate the flavor in the list. From the drop down on the right, select the option Delete Flavor.

    Figure 5

    Figure 5: Delete Flavor

    References

    - + \ No newline at end of file diff --git a/tutorials/openstack-consoles-explained/index.html b/tutorials/openstack-consoles-explained/index.html index 29c26bd2..1d5f3db4 100644 --- a/tutorials/openstack-consoles-explained/index.html +++ b/tutorials/openstack-consoles-explained/index.html @@ -13,10 +13,10 @@ Computing Environments). This document compares these two console access methods in the context of OpenStack.

    Overview

    NoVNC (HTML5-based VNC client)

    • Technology: NoVNC uses HTML5 and WebSockets to provide a VNC client that runs entirely in the browser.

    • Browser Compatibility: Works on most modern web browsers without requiring -additional plugins.

    • Performance: Generally good performance for remote console access.

    • Configuration: Usually configured as the default console access method in OpenStack.

    NoVNC

    SPICE (Simple Protocol for Independent Computing Environments)

    • Technology: SPICE is a protocol for remote computing environments. SPICE +additional plugins.

    • Performance: Generally good performance for remote console access.

    • Configuration: Usually configured as the default console access method in OpenStack.

    NoVNC

    SPICE (Simple Protocol for Independent Computing Environments)

    • Technology: SPICE is a protocol for remote computing environments. SPICE clients are available as standalone applications.

    • Client Installation: Requires a separate SPICE client to be installed on the user's machine.

    • Performance: Generally known for high-performance remote display capabilities.

    • Integration: Integrated with QEMU/KVM hypervisors and provides additional -features like audio and video streaming.

    Spice

    Features Comparison

    NoVNC

    Pros:

    • Platform Independence: Works on various platforms without requiring client installation.

    • Browser Compatibility: Runs in most modern web browsers.

    • Easy Integration: Default console access method in many OpenStack deployments.

    Cons:

    • Potential Performance: May experience lower performance compared to SPICE in +features like audio and video streaming.

    Spice

    Features Comparison

    NoVNC

    Pros:

    • Platform Independence: Works on various platforms without requiring client installation.

    • Browser Compatibility: Runs in most modern web browsers.

    • Easy Integration: Default console access method in many OpenStack deployments.

    Cons:

    • Potential Performance: May experience lower performance compared to SPICE in certain scenarios.

    SPICE

    Pros:

    • High Performance: Known for delivering high-performance remote display capabilities.

    • Multimedia Support: Supports audio and video streaming in addition to console access.

    • Integration: Integrated with QEMU/KVM hypervisors.

    Cons:

    • Client Installation: Requires users to install a separate SPICE client on their machines.

    • Limited Browser Support: Requires a standalone client and is not as browser-friendly as NoVNC.

    Scenarios

    NoVNC Usage

    Optionally, you can specify requirements, like minimum disk space and RAM, for the image under the Image Requirements heading.

    Click the ? icon for assistance with specifics.


    Next Steps

    With this guide complete, move on to the next guide, which explains how to create an Instance in Horizon.

    - + \ No newline at end of file