diff --git a/.github/workflows/docs-spelling-checks.yml b/.github/workflows/docs-spelling-checks.yml index 96bfa636f..d4f4f9da1 100644 --- a/.github/workflows/docs-spelling-checks.yml +++ b/.github/workflows/docs-spelling-checks.yml @@ -2,9 +2,9 @@ name: Documentation Spelling Check on: workflow_dispatch: - pull_request: - paths: - - 'docs/**' + # pull_request: + # paths: + # - 'docs/**' jobs: spell-check: @@ -16,15 +16,15 @@ jobs: - id: spell-check name: Spell Check run: make spelling - working-directory: docs/tools + working-directory: docs/canonicalk8s continue-on-error: true - - if: ${{ github.event_name == 'pull_request' && steps.spell-check.outcome == 'failure' }} - uses: actions/github-script@v6 - with: - script: | - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: 'Hi, looks like pyspelling job found some issues, you can check it [here](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})' - }) + # - if: ${{ github.event_name == 'pull_request' && steps.spell-check.outcome == 'failure' }} + # uses: actions/github-script@v6 + # with: + # script: | + # github.rest.issues.createComment({ + # issue_number: context.issue.number, + # owner: context.repo.owner, + # repo: context.repo.repo, + # body: 'Hi, looks like pyspelling job found some issues, you can check it [here](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})' + # }) diff --git a/docs/moonray/.custom_wordlist.txt b/docs/moonray/.custom_wordlist.txt index e69de29bb..40495c3f4 100644 --- a/docs/moonray/.custom_wordlist.txt +++ b/docs/moonray/.custom_wordlist.txt @@ -0,0 +1,266 @@ +adapter's +adapters +allocatable +allocator +AlwaysPullImages +api +apiserver +apparmor +AppArmor +args +ARP +asn +ASN +autostart +autosuspend +aws +backend +backported +balancers +benoitblanchon +bgp +BGP +bootloader +CABPCK +CACPCK +capi +CAPI +CAs +Center +ceph +Ceph +cephcsi +cephx +cgroup +cgroups +cidr +CIDR +cidrs +CIDRs +CK8sControlPlane +CLI +CLIs +CloudFormation +ClusterAPI +clusterctl +ClusterRole +ClusterRoleBinding +CMK +CNI +Commenter +config +configMap +ConfigMap +containerd +CoreDNS +Corosync +CPUs +cpuset +crt +csi +CSI +CSRs +cyclictest +daemonset +DaemonSet +datastore +datastores +dbus +de +deallocation +deployable +discoverable +DMA +dns +DNS +DPDK +DRBD +drv +dqlite +EAL +EasyRSA +enp +enum +etcd +EventRateLimit +failover +gapped +GCP +ghcr +Gi +github +GPLv +Graber +Graber's +grafana +haircommander +Harbor +hostname +hostpath +HPC +html +http +https +HugePage +HugePages +iavf +init +initialise +integrations +io +IOMMU +IOV +ip +IPv +IPv4 +IPv6 +IRQs +Jinja +jitter +juju +Juju's +KMS +kube +kube-apiserver +kube-controller-manager +kube-proxy +kube-scheduler +kube-system +kubeconfig +kubectl +kubelet +kubepods +kubernetes +latencies +Latencies +libcontainer +lifecycle +linux +Lite's +LoadBalancer +localhost +Lookaside +lookups +loopback +LPM +lxc +LxcSecurity +LXD +MAAS +macOS +Maskable +MCE +MetalLB +Microbot +MicroCluster +MicroK +MicroK8s +MinIO +modprobe +Moonray +mq +mtu +MTU +multicast +MULTICAST +Multipass +Multus +nameservers +Netplan +NetworkAttachmentDefinition +NFD +NFV +nginx +NGINX +NIC +NMI +nodeport +nohz +NUMA +numactl +OCI +OOM +OpenStack +OSDs +ParseDuration +passthrough +passwordless +pci +PEM +performant +PID +PMD +PMDs +PPA +proc +programmatically +provisioner +PRs +PV +qdisc +qlen +QoS +RADOS +rbac +RBAC +RBD +rc +RCU +README +regctl +regsync +roadmap +Rockcraft +rollout +runtimes +rw +sandboxed +SANs +scalable +SCHED +sControlPlane +sd +SELinux +ServiceAccount +Snapcraft +snapd +SR-IOV +stackexchange +stgraber +STONITH +StorageClass +sudo +sys +systemd +taskset +Telco +throughs +tickless +TLB +tls +TLS +toml +TSC +TTL +ttyS +ubuntu +unix +unschedulable +unsquashed +Velero +vf +VF +vfio +VFIO +VFs +virtualised +VLAN +VMs +VMware +VNFs +VPCs +VSphere +WIP +www +yaml +YAMLs diff --git a/docs/src/.custom_wordlist.txt b/docs/src/.custom_wordlist.txt index 7893c42d2..cde8f16bf 100644 --- a/docs/src/.custom_wordlist.txt +++ b/docs/src/.custom_wordlist.txt @@ -1,66 +1,280 @@ +adapter's +adapters +allocatable +allocator +AlwaysPullImages +api apiserver apparmor AppArmor +args +ARP +asn +ASN autostart +autosuspend aws +backend +backported +balancers +benoitblanchon +bgp +BGP +bootloader +BPF +CABPCK +CACPCK +capi CAPI +CAs +Center +ceph Ceph +cephcsi +cephx cgroup -CIDRS +cgroups +cidr +CIDR +cidrs CIDRs +CIS +CK8sControlPlane +CLI +CLIs +CloudFormation +ClusterAPI +clusterctl +ClusterRole +ClusterRoleBinding +CMK CNI +Commenter config +configMap +ConfigMap containerd CoreDNS +Corosync CPUs cpuset +crt +csi +CSI +CSRs +cyclictest daemonset +DaemonSet datastore +datastores dbus +de +deallocation +deployable +discoverable +DMA +dns DNS +DPDK +DRBD +drv dqlite +EAL EasyRSA +eBPF +enp +enum etcd +eth +EventRateLimit +ExternalIP +failover +gapped GCP ghcr +Gi +github +GPLv +Graber +Graber's grafana +haircommander +Harbor +hostname +hostpath +HPC html http https +HugePage +HugePages +iavf +init initialise +integrations +InternalIP +io +IOMMU +IOV +ip +IPIP +IPIPCrossSubnet +IPv +IPv4 +IPv6 +IRQs +Jinja +jitter juju +Juju's +KMS +kube +kube-apiserver +kube-controller-manager +kube-proxy +kube-scheduler +kube-system kubeconfig kubectl kubelet kubepods kubernetes +latencies +Latencies libcontainer lifecycle linux +Lite's +LoadBalancer localhost +Lookaside +lookups +loopback +LPM lxc +LxcSecurity LXD MAAS +macOS +Maskable +MCE +MetalLB +Microbot +MicroCluster +MicroK +MicroK8s +MinIO +modprobe +Moonray +mq +mtu +MTU +multicast +MULTICAST Multipass +Multus nameservers +Netplan +NetworkAttachmentDefinition +NFD +NFV +nginx NGINX +NIC +NMI +NodeInternalIP +nodeport +nohz +NUMA +numactl OCI +OOM OpenStack +OSDs +ParseDuration +passthrough +passwordless +pci +PEM +performant +PID +PMD +PMDs +PPA proc +programmatically +provisioner +PRs +PV +qdisc +qlen +QoS +RADOS +rbac RBAC +RBD +rc +RCU +README +regctl regsync roadmap Rockcraft +rollout +Runtime +runtimes rw +sandboxed +SANs +scalable +SCHED +sControlPlane +sd +SELinux +ServiceAccount +Snapcraft snapd +SR-IOV +src stackexchange +stgraber +STONITH +StorageClass +sudo sys systemd +taskset +Telco +throughs +tickless +TLB +tls TLS +toml +TSC +TTL +ttyS ubuntu unix +unschedulable +unsquashed +Velero +vf +VF +vfio +VFIO +VFs +virtualised +VLAN +VLANs VMs -VMWare +VMware +VNFs +VPCs VSphere +VXLAN +VXLANCrossSubnet +WIP www yaml +YAMLs diff --git a/docs/src/_parts/bootstrap_config.md b/docs/src/_parts/bootstrap_config.md index b1ed6f477..6594f10ae 100644 --- a/docs/src/_parts/bootstrap_config.md +++ b/docs/src/_parts/bootstrap_config.md @@ -189,7 +189,7 @@ If omitted defaults to `true`. Sets the cloud provider to be used by the cluster. When this is set as `external`, node will wait for an external cloud provider to -do cloud specific setup and finish node initialization. +do cloud specific setup and finish node initialisation. Possible values: `external`. diff --git a/docs/src/capi/explanation/capi-ck8s.md b/docs/src/capi/explanation/capi-ck8s.md index 10b0e0674..5ba3487b3 100644 --- a/docs/src/capi/explanation/capi-ck8s.md +++ b/docs/src/capi/explanation/capi-ck8s.md @@ -3,7 +3,7 @@ ClusterAPI (CAPI) is an open-source Kubernetes project that provides a declarative API for cluster creation, configuration, and management. It is designed to automate the creation and management of Kubernetes clusters in -various environments, including on-premises data centers, public clouds, and +various environments, including on-premises data centres, public clouds, and edge devices. CAPI abstracts away the details of infrastructure provisioning, networking, and @@ -60,8 +60,8 @@ clusters. As a result, the provisioned clusters are referred to as workload clusters. Typically, the management cluster runs in a separate environment from the -clusters it manages, such as a public cloud or an on-premises data center. It -serves as a centralized location for managing the configuration, policies, and +clusters it manages, such as a public cloud or an on-premises data centre. It +serves as a centralised location for managing the configuration, policies, and security of multiple managed clusters. By leveraging the management cluster, users can easily create and manage a fleet of Kubernetes clusters in a consistent and repeatable manner. diff --git a/docs/src/capi/howto/custom-ck8s.md b/docs/src/capi/howto/custom-ck8s.md index d81191980..ba3a1d0fe 100644 --- a/docs/src/capi/howto/custom-ck8s.md +++ b/docs/src/capi/howto/custom-ck8s.md @@ -1,6 +1,6 @@ # Install custom {{product}} on machines -By default, the `version` field in the machine specifications will determine which {{product}} is downloaded from the `stable` rist level. While you can install different versions of the `stable` risk level by changing the `version` field, extra steps should be taken if you're willing to install a specific risk level. +By default, the `version` field in the machine specifications will determine which {{product}} is downloaded from the `stable` risk level. While you can install different versions of the `stable` risk level by changing the `version` field, extra steps should be taken if you're willing to install a specific risk level. This guide walks you through the process of installing custom {{product}} on workload cluster machines. ## Prerequisites @@ -13,7 +13,7 @@ To follow this guide, you will need: Please refer to the [getting-started guide][getting-started] for further details on the required setup. -In this guide we call the generated cluster spec manifrst `cluster.yaml`. +In this guide we call the generated cluster spec manifest `cluster.yaml`. ## Overwrite the existing `install.sh` script diff --git a/docs/src/capi/howto/external-etcd.md b/docs/src/capi/howto/external-etcd.md index f6509fb22..a77600c68 100644 --- a/docs/src/capi/howto/external-etcd.md +++ b/docs/src/capi/howto/external-etcd.md @@ -9,7 +9,7 @@ with an external etcd. To follow this guide, you will need: -- [Clusterctl][clusterctl] installed +- [clusterctl][clusterctl] installed - A CAPI management cluster initialised with the infrastructure, bootstrap and control plane providers of your choice. Please refer to the [getting-started guide][getting-started] for instructions. @@ -78,7 +78,7 @@ kubectl get secrets ## Update etcd cluster template -Please refer to [capi-templates][capi-templates] for the latest templates. +Please refer to [CAPI-templates][CAPI-templates] for the latest templates. Update the control plane resource `CK8sControlPlane` so that it is configured to store the Kubernetes state in etcd. Add the following additional configuration to the cluster template `cluster-template.yaml`: @@ -120,5 +120,5 @@ clusterctl describe cluster peaches ``` [getting-started]: ../tutorial/getting-started.md -[capi-templates]: https://github.com/canonical/cluster-api-k8s/tree/main/templates +[CAPI-templates]: https://github.com/canonical/cluster-api-k8s/tree/main/templates [clusterctl]: https://cluster-api.sigs.k8s.io/clusterctl/overview diff --git a/docs/src/capi/howto/migrate-management.md b/docs/src/capi/howto/migrate-management.md index 11a1474f3..f902a0731 100644 --- a/docs/src/capi/howto/migrate-management.md +++ b/docs/src/capi/howto/migrate-management.md @@ -1,4 +1,4 @@ -# Migrate the managment cluster +# Migrate the management cluster Management cluster migration is a really powerful operation in the cluster’s lifecycle as it allows admins to move the management cluster in a more reliable substrate or perform maintenance tasks without disruptions. diff --git a/docs/src/capi/index.md b/docs/src/capi/index.md index 4190229b6..87b56a3ab 100644 --- a/docs/src/capi/index.md +++ b/docs/src/capi/index.md @@ -10,7 +10,6 @@ Overview :titlesonly: :glob: :caption: Deploy with Cluster API -Overview tutorial/index.md howto/index.md explanation/index.md diff --git a/docs/src/capi/reference/configs.md b/docs/src/capi/reference/configs.md index a304f68c1..870d240f9 100644 --- a/docs/src/capi/reference/configs.md +++ b/docs/src/capi/reference/configs.md @@ -108,7 +108,7 @@ spec: **Required:** no -`airGapped` is used to signal that we are deploying to an airgap environment. In this case, the provider will not attempt to install k8s-snap on the machine. The user is expected to install k8s-snap manually with [`preRunCommands`](#preruncommands), or provide an image with k8s-snap pre-installed. +`airGapped` is used to signal that we are deploying to an air-gapped environment. In this case, the provider will not attempt to install k8s-snap on the machine. The user is expected to install k8s-snap manually with [`preRunCommands`](#preruncommands), or provide an image with k8s-snap pre-installed. **Example Usage:** ```yaml @@ -121,7 +121,7 @@ spec: **Required:** no -`initConfig` is configuration for the initializing the cluster features +`initConfig` is configuration for the initialising the cluster features **Fields:** @@ -193,8 +193,8 @@ spec: | `datastoreType` | `string` | The type of datastore to use for the control plane. | `""` | | `datastoreServersSecretRef` | `struct{name:str, key:str}` | A reference to a secret containing the datastore servers. | `{}` | | `k8sDqlitePort` | `int` | The port to use for k8s-dqlite. If unset, 2379 (etcd) will be used. | `2379` | -| `microclusterAddress` | `string` | The address (or CIDR) to use for microcluster. If unset, the default node interface is chosen. | `""` | -| `microclusterPort` | `int` | The port to use for microcluster. If unset, ":2380" (etcd peer) will be used. | `":2380"` | +| `microclusterAddress` | `string` | The address (or CIDR) to use for MicroCluster. If unset, the default node interface is chosen. | `""` | +| `microclusterPort` | `int` | The port to use for MicroCluster. If unset, ":2380" (etcd peer) will be used. | `":2380"` | | `extraKubeAPIServerArgs` | `map[string]string` | Extra arguments to add to kube-apiserver. | `map[]` | **Example Usage:** diff --git a/docs/src/capi/tutorial/getting-started.md b/docs/src/capi/tutorial/getting-started.md index 8f5554b19..71a8f823a 100644 --- a/docs/src/capi/tutorial/getting-started.md +++ b/docs/src/capi/tutorial/getting-started.md @@ -170,7 +170,7 @@ provision. You can generate a cluster manifest for a selected set of commonly used infrastructures via templates provided by the {{product}} team. -Ensure you have initialized the desired infrastructure provider and fetch +Ensure you have initialised the desired infrastructure provider and fetch the {{product}} provider repository: ``` diff --git a/docs/src/charm/howto/charm.md b/docs/src/charm/howto/charm.md index 5b85f6b62..06c609c9e 100644 --- a/docs/src/charm/howto/charm.md +++ b/docs/src/charm/howto/charm.md @@ -9,7 +9,7 @@ This guide assumes the following: - The rest of this page assumes you already have Juju installed and have added [credentials] for a cloud and bootstrapped a controller. -- If you still need to do this, please take a look at the quickstart +- If you still need to do this, please take a look at the quick-start instructions, or, for custom clouds (OpenStack, MAAS), please consult the [Juju documentation][juju]. - You are not using the Juju 'localhost' cloud (see [localhost diff --git a/docs/src/charm/howto/contribute.md b/docs/src/charm/howto/contribute.md index eda251301..dff142dca 100644 --- a/docs/src/charm/howto/contribute.md +++ b/docs/src/charm/howto/contribute.md @@ -88,7 +88,7 @@ it on the [Diátaxis website]. In essence though, this guides the way we categorise and write our documentation. You can see there are four main categories of documentation: -- **Tutorials** for guided walkthroughs +- **Tutorials** for guided walk-throughs - **How to** pages for specific tasks and goals - **Explanation** pages which give background reasons and, well, explanations - **Reference**, where you will find the commands, the roadmap, etc. diff --git a/docs/src/charm/howto/cos-lite.md b/docs/src/charm/howto/cos-lite.md index 80efeebe3..97838e42a 100644 --- a/docs/src/charm/howto/cos-lite.md +++ b/docs/src/charm/howto/cos-lite.md @@ -28,7 +28,7 @@ juju add-model --config logging-config='=DEBUG' microk8s-ubuntu We also set the logging level to DEBUG so that helpful debug information is shown when you use `juju debug-log` (see [juju debug-log][juju-debug-log]). -Use the Ubuntu charm to deploy an application named “microk8s”: +Use the Ubuntu charm to deploy an application named `microk8s`: ``` juju deploy ubuntu microk8s --series=focal --constraints="mem=8G cores=4 root-disk=30G" @@ -36,13 +36,13 @@ juju deploy ubuntu microk8s --series=focal --constraints="mem=8G cores=4 root-di Deploy MicroK8s on Ubuntu by accessing the unit you created at the last step with `juju ssh microk8s/0` and following the -[Install Microk8s][how-to-install-microk8s] guide for configuration. +[Install MicroK8s][how-to-install-MicroK8s] guide for configuration. ```{note} Make sure to enable the hostpath-storage and MetalLB addons for -Microk8s. +MicroK8s. ``` -Export the Microk8s kubeconfig file to your current directory after +Export the MicroK8s kubeconfig file to your current directory after configuration: ``` @@ -57,9 +57,9 @@ command): KUBECONFIG=microk8s-config.yaml juju add-k8s microk8s-cloud ``` -## Deploying COS Lite on the Microk8s cloud +## Deploying COS Lite on the MicroK8s cloud -On the Microk8s cloud, create a new model and deploy the `cos-lite` bundle: +On the MicroK8s cloud, create a new model and deploy the `cos-lite` bundle: ``` juju add-model cos-lite microk8s-cloud @@ -145,4 +145,4 @@ you can head over to the [COS Lite documentation][cos-lite-docs]. [juju-models]: https://juju.is/docs/juju/model [juju-debug-log]: https://juju.is/docs/juju/juju-debug-log [cross-model-integration]: https://juju.is/docs/juju/relation#heading--cross-model -[how-to-install-microk8s]: https://microk8s.io/docs/getting-started \ No newline at end of file +[how-to-install-MicroK8s]: https://microk8s.io/docs/getting-started \ No newline at end of file diff --git a/docs/src/charm/howto/install-lxd.md b/docs/src/charm/howto/install-lxd.md index 9d3b96605..99a41b7e1 100644 --- a/docs/src/charm/howto/install-lxd.md +++ b/docs/src/charm/howto/install-lxd.md @@ -24,7 +24,7 @@ profiles by running the command: lxc profile list ``` -For example, suppose we have created a model called 'myk8s'. This will +For example, suppose we have created a model called `myk8s`. This will output a table like this: ``` diff --git a/docs/src/charm/howto/proxy.md b/docs/src/charm/howto/proxy.md index 7a514dee9..8a57c4fc7 100644 --- a/docs/src/charm/howto/proxy.md +++ b/docs/src/charm/howto/proxy.md @@ -1,6 +1,6 @@ # Configuring proxy settings for K8s -{{product}} packages a number of utilities (eg curl, helm) which need +{{product}} packages a number of utilities (for example curl, helm) which need to fetch resources they expect to find on the internet. In a constrained network environment, such access is usually controlled through proxies. diff --git a/docs/src/charm/index.md b/docs/src/charm/index.md index bca4b8833..83f34fe72 100644 --- a/docs/src/charm/index.md +++ b/docs/src/charm/index.md @@ -9,8 +9,6 @@ Overview :hidden: :titlesonly: :caption: Deploy with Juju - -Overview tutorial/index.md howto/index.md explanation/index.md diff --git a/docs/src/charm/reference/charms.md b/docs/src/charm/reference/charms.md index cac6f1949..eb889da5e 100644 --- a/docs/src/charm/reference/charms.md +++ b/docs/src/charm/reference/charms.md @@ -24,7 +24,7 @@ The source code for both charms is contained in a single repository: [https://github.com/canonical/k8s-operator][repo] -Please see the [readme file][] there for further specifics of the charm +Please see the [README file][] there for further specifics of the charm implementation. @@ -32,5 +32,5 @@ implementation. [explaining channels]: ../explanation/channels [cs-k8s]: https://charmhub.io/k8s [cs-k8s-worker]: https://charmhub.io/k8s-worker -[readme file]: https://github.com/canonical/k8s-operator#readme +[README file]: https://github.com/canonical/k8s-operator#readme [repo]: https://github.com/canonical/k8s-operator \ No newline at end of file diff --git a/docs/src/snap/explanation/certificates.md b/docs/src/snap/explanation/certificates.md index 5417bb13b..63ee334b6 100644 --- a/docs/src/snap/explanation/certificates.md +++ b/docs/src/snap/explanation/certificates.md @@ -3,7 +3,7 @@ Certificates are a crucial part of Kubernetes' security infrastructure, serving to authenticate and secure communication within the cluster. They play a key role in ensuring that communication between various components (such as the -API server, kubelets, and the datastore) is both encrypted and restricted to +API server, kubelet, and the datastore) is both encrypted and restricted to authorised components only. In Kubernetes, [X.509][] certificates are primarily used for diff --git a/docs/src/snap/explanation/clustering.md b/docs/src/snap/explanation/clustering.md index caa2539e8..374c46d66 100644 --- a/docs/src/snap/explanation/clustering.md +++ b/docs/src/snap/explanation/clustering.md @@ -24,7 +24,7 @@ This is the overview of a {{product}} cluster: `k8sd` plays a vital role in the {{product}} architecture, enhancing the functionality of both the Control Plane and Worker nodes through the use -of [microcluster]. This component simplifies cluster management tasks, such as +of [MicroCluster]. This component simplifies cluster management tasks, such as adding or removing nodes and integrating them into the cluster. It also manages essential features like DNS and networking within the cluster, streamlining the entire process for a more efficient operation. @@ -75,4 +75,4 @@ entire life-cycle. Their components include: [Kubernetes Components]: https://kubernetes.io/docs/concepts/overview/components/ -[microcluster]: https://github.com/canonical/microcluster +[MicroCluster]: https://github.com/canonical/microcluster diff --git a/docs/src/snap/explanation/epa.md b/docs/src/snap/explanation/epa.md index 616da784f..8d3786991 100644 --- a/docs/src/snap/explanation/epa.md +++ b/docs/src/snap/explanation/epa.md @@ -19,7 +19,7 @@ capabilities. This document provides a detailed guide of how EPA applies to - **NUMA topology awareness**: Ensures that CPU and memory allocation are aligned according to the NUMA architecture, reducing memory latency and increasing performance for memory-intensive applications. -- **Single Root I/O Virtualization (SR-IOV)**: Enhances networking by enabling +- **Single Root I/O Virtualisation (SR-IOV)**: Enhances networking by enabling virtualisation of a single physical network device into multiple virtual devices. - **DPDK (Data Plane Development Kit)**: A set of libraries and drivers for @@ -92,19 +92,19 @@ are the key architectural components and their roles: (e.g., Prometheus, Grafana) to monitor and visualise HugePages usage across the cluster. This helps in tracking resource utilisation and performance. Metrics can include HugePages allocation, usage and availability on each - node, aiding in capacity planning and optimization. + node, aiding in capacity planning and optimisation. ## Real-time kernel A real-time kernel ensures that high-priority tasks are run within a -predictable timeframe, crucial for applications requiring low latency and high +predictable time frame, crucial for applications requiring low latency and high determinism. Note that this can also impede applications which were not designed with these considerations. ### Key features - **Predictable task execution**: A real-time kernel ensures that - high-priority tasks are run within a predictable and bounded timeframe, + high-priority tasks are run within a predictable and bounded time frame, reducing the variability in task execution time. - **Low latency**: The kernel is optimised to minimise the time it takes to respond to high-priority tasks, which is crucial for applications that @@ -115,7 +115,7 @@ designed with these considerations. - **Deterministic behaviour**: The kernel guarantees deterministic behaviour, meaning the same task will have the same response time every time it is run, essential for time-sensitive applications. -- **Pre-emption:** The real-time kernel supports preemptive multitasking, +- **Preemption:** The real-time kernel supports preemptive multitasking, allowing high-priority tasks to interrupt lower-priority tasks to ensure critical tasks are run without delay. - **Resource reservation**: System resources (such as CPU and memory) can be @@ -361,7 +361,7 @@ architectural components and their roles: configuring kernel parameters and using tools like `numactl` to bind processes to specific NUMA nodes. -## SR-IOV (Single Root I/O Virtualization) +## SR-IOV (Single Root I/O Virtualisation) SR-IOV enables a single physical network device to appear as multiple separate virtual devices. This can be beneficial for network-intensive applications that @@ -394,7 +394,7 @@ require direct access to the network hardware. - **Kubernetes integration**: Kubernetes supports SR-IOV through the use of network device plugins, enabling the automatic discovery, allocation, and management of virtual functions. -- **Compatibility with Network Functions Virtualization (NFV)**: SR-IOV is +- **Compatibility with Network Functions Virtualisation (NFV)**: SR-IOV is widely used in NFV deployments to meet the high-performance networking requirements of virtual network functions (VNFs), such as firewalls, routers and load balancers. @@ -404,7 +404,7 @@ require direct access to the network hardware. ### Application to Kubernetes -The architecture for SR-IOV (Single Root I/O Virtualization) in Kubernetes +The architecture for SR-IOV (Single Root I/O Virtualisation) in Kubernetes involves several components and configurations to ensure that virtual functions (VFs) from a single physical network device can be managed and allocated efficiently. This setup enhances network performance and provides direct access diff --git a/docs/src/snap/explanation/ingress.md b/docs/src/snap/explanation/ingress.md index a915d701e..09ebf334b 100644 --- a/docs/src/snap/explanation/ingress.md +++ b/docs/src/snap/explanation/ingress.md @@ -19,7 +19,7 @@ CNI (Container Network Interface) called [Cilium][Cilium]. If you wish to use a different network plugin the implementation and configuration falls under your responsibility. -Learn how to use the {{product}} default network in the [networking HowTo guide][Network]. +Learn how to use the {{product}} default network in the [networking how-to guide][Network]. ## Kubernetes Pods and Services diff --git a/docs/src/snap/explanation/security.md b/docs/src/snap/explanation/security.md index c0a7b1dcc..8daeb368f 100644 --- a/docs/src/snap/explanation/security.md +++ b/docs/src/snap/explanation/security.md @@ -44,11 +44,11 @@ have access to your cluster. Describing the security mechanisms of these clouds is out of the scope of this documentation, but you may find the following links useful. -- Amazon Web Services -- Google Cloud Platform -- Metal As A Service(MAAS) -- Microsoft Azure -- VMWare VSphere +- [Amazon Web Services security][] +- [Google Cloud Platform security][] +- [Metal As A Service(MAAS) hardening][] +- [Microsoft Azure security][] +- [VMware VSphere hardening guides][] ## Security Compliance @@ -63,3 +63,9 @@ check the [roadmap][] for current areas of work. [snap documentation]: https://snapcraft.io/docs/security-sandboxing [rocks-security]: https://canonical-rockcraft.readthedocs-hosted.com/en/latest/explanation/rockcraft/ [roadmap]: ../reference/roadmap +[Amazon Web Services security]: https://aws.amazon.com/security/ +[Google Cloud Platform security]:https://cloud.google.com/security/ +[Metal As A Service(MAAS) hardening]:https://maas.io/docs/snap/3.0/ui/hardening-your-maas-installation +[Microsoft Azure security]:https://docs.microsoft.com/en-us/azure/security/azure-security +[VMware VSphere hardening guides]: https://www.vmware.com/security/hardening-guides.html + diff --git a/docs/src/snap/howto/backup-restore.md b/docs/src/snap/howto/backup-restore.md index cb5345ac4..dc54a9cab 100644 --- a/docs/src/snap/howto/backup-restore.md +++ b/docs/src/snap/howto/backup-restore.md @@ -64,7 +64,7 @@ sudo k8s kubectl expose deployment nginx -n workloads --port 80 ## Install Velero Download the Velero binary from the -[releases page on github][releases] and place it in our `PATH`. In this case we +[releases page on GitHub][releases] and place it in our `PATH`. In this case we install the v1.14.1 Linux binary for AMD64 under `/usr/local/bin`: ```bash @@ -100,7 +100,7 @@ EOF ``` We are now ready to install Velero into the cluster, with an aws plugin that -[matches][aws-plugin-matching] the velero release: +[matches][aws-plugin-matching] the Velero release: ```bash SERVICE_URL="http://${SERVICE}.velero.svc:9000" diff --git a/docs/src/snap/howto/cis-hardening.md b/docs/src/snap/howto/cis-hardening.md index dcc476fcd..f44c65cb1 100644 --- a/docs/src/snap/howto/cis-hardening.md +++ b/docs/src/snap/howto/cis-hardening.md @@ -73,7 +73,7 @@ sudo -E kube-bench --version ck8s-dqlite-cis-1.24 --config-dir ./kube-bench-ck8s ## Harden your deployments Before running a CIS Kubernetes audit, it is essential to first harden your -{{product}} deployment to minimize vulnerabilities and ensure +{{product}} deployment to minimise vulnerabilities and ensure compliance with industry best practices, as defined by the CIS Kubernetes Benchmark. diff --git a/docs/src/snap/howto/contribute.md b/docs/src/snap/howto/contribute.md index 05e08f1d2..67f1372b9 100644 --- a/docs/src/snap/howto/contribute.md +++ b/docs/src/snap/howto/contribute.md @@ -88,7 +88,7 @@ it on the [Diátaxis website]. In essence though, this guides the way we categorise and write our documentation. You can see there are four main categories of documentation: -- **Tutorials** for guided walkthroughs +- **Tutorials** for guided walk-throughs - **How to** pages for specific tasks and goals - **Explanation** pages which give background reasons and, well, explanations - **Reference**, where you will find the commands, the roadmap, etc. diff --git a/docs/src/snap/howto/epa.md b/docs/src/snap/howto/epa.md index b030a0585..278cb3420 100644 --- a/docs/src/snap/howto/epa.md +++ b/docs/src/snap/howto/epa.md @@ -7,7 +7,7 @@ page][explain-epa] for details about how EPA applies to {{product}}. The content starts with the setup of the environment (including steps for using [MAAS][MAAS]). Then the setup of {{product}}, including the Multus & SR-IOV/DPDK networking components. Finally, the steps needed to test every EPA feature: -HugePages, Real-time Kernel, CPU Pinning / Numa Topology Awareness and +HugePages, Real-time Kernel, CPU Pinning / NUMA Topology Awareness and SR-IOV/DPDK. ## What you'll need @@ -106,7 +106,7 @@ reboot ```{dropdown} Explanation of boot options -- `intel_iommu=on`: Enables Intel's Input-Output Memory Management Unit (IOMMU), which is used for device virtualization and Direct Memory Access (DMA) remapping. +- `intel_iommu=on`: Enables Intel's Input-Output Memory Management Unit (IOMMU), which is used for device virtualisation and Direct Memory Access (DMA) remapping. - `iommu=pt`: Sets the IOMMU to passthrough mode, allowing devices to directly access physical memory without translation. - `usbcore.autosuspend=-1`: Disables USB autosuspend, preventing USB devices from being automatically suspended to save power. - `selinux=0`: Disables Security-Enhanced Linux (SELinux), a security module that provides mandatory access control. @@ -183,8 +183,8 @@ virtual functions. ``` ```{dropdown} Explanation of steps - * Breakdown of the content of the file /etc/netplan/99-sriov\_vfs.yaml : - * path: /etc/netplan/99-sriov\_vfs.yaml: This specifies the location of the configuration file. The "99" prefix in the filename usually indicates that it will be processed last, potentially overriding other configurations. + * Breakdown of the content of the file `/etc/netplan/99-sriov\_vfs.yaml` : + * path: `/etc/netplan/99-sriov\_vfs.yaml`: This specifies the location of the configuration file. The "99" prefix in the filename usually indicates that it will be processed last, potentially overriding other configurations. * enp152s0f1: This is the name of the physical network interface you want to create VFs on. This name may vary depending on your system. * virtual-function-count: 128: This is the key line that instructs Netplan to create 128 virtual functions on the specified physical interface. Each of these VFs can be assigned to a different virtual machine or container, effectively allowing them to share the physical adapter's bandwidth. * permissions: "0600": This is an optional line that sets the file permissions to 600 (read and write access only for the owner). @@ -212,7 +212,7 @@ virtual functions. ``` -Now enable DPDK, first by cloning the DPDK repo, and then placing the script which +Now enable DPDK, first by cloning the DPDK repository, and then placing the script which will bind the VFs to the VFIO-PCI driver in the location that will run automatically each time the system boots up, so the VFIO (Virtual Function I/O) bindings are applied consistently: @@ -231,9 +231,9 @@ sudo chmod 0755 /var/lib/cloud/scripts/per-boot/dpdk_bind.sh ```{dropdown} Explanation * Load VFIO Module (modprobe vfio-pci): If the DPDK directory exists, the script loads the VFIO-PCI kernel module. This module is necessary for the VFIO driver to function. - * The script uses the dpdk-devbind.py tool (included with DPDK) to list the available network devices and their drivers. + * The script uses the `dpdk-devbind.py` tool (included with DPDK) to list the available network devices and their drivers. * It filters this output using grep drv=iavf to find devices that are currently using the iavf driver (a common driver for Intel network adapters), excluding the physical network interface itself and just focusing on the virtual functions (VFs). - * Bind VFs to VFIO: The script uses dpdk-devbind.py again, this time with the \--bind=vfio-pci option, to bind the identified VFs to the VFIO-PCI driver. This step essentially tells the kernel to relinquish control of these devices to DPDK. + * Bind VFs to VFIO: The script uses `dpdk-devbind.py` again, this time with the \--bind=vfio-pci option, to bind the identified VFs to the VFIO-PCI driver. This step essentially tells the kernel to relinquish control of these devices to DPDK. ``` To test that the VFIO Kernel Module and DPDK are enabled: @@ -276,7 +276,7 @@ With these preparation steps we have enabled the features of EPA: - NUMA and CPU Pinning are available to the first 32 CPUs - Real-Time Kernel is enabled - HugePages are enabled and 1000 1G huge pages are available -- SRIOV is enabled in the enp152s0f1 interface, with 128 virtual +- SR-IOV is enabled in the enp152s0f1 interface, with 128 virtual function interfaces bound to the vfio-pci driver (that could also use the iavf driver) - DPDK is enabled in all the 128 virtual function interfaces @@ -284,8 +284,8 @@ With these preparation steps we have enabled the features of EPA: ````{group-tab} MAAS -To prepare a machine for CPU isolation, Hugepages, real-time kernel, -SRIOV and DPDK we leverage cloud-init through MAAS. +To prepare a machine for CPU isolation, HugePages, real-time kernel, +SR-IOV and DPDK we leverage cloud-init through MAAS. ``` #cloud-config @@ -391,10 +391,10 @@ power_state: ```{note} -In the above file, the realtime kernel 6.8 is installed from a private PPA. +In the above file, the `realtime kernel` 6.8 is installed from a private PPA. It was recently backported from 24.04 to 22.04 and is still going through some validation stages. Once it is officially released, it will be -installable via the Ubuntu Pro cli. +installable via the Ubuntu Pro CLI. ``` -* `datastore-url` expects a comma seperated list of addresses +* `datastore-url` expects a comma separated list of addresses (e.g. `https://10.42.254.192:2379,https://10.42.254.193:2379,https://10.42.254.194:2379`) * `datastore-ca-crt` expects a certificate for the CA in PEM format diff --git a/docs/src/snap/howto/install/lxd.md b/docs/src/snap/howto/install/lxd.md index f72afaa4f..60f8df590 100644 --- a/docs/src/snap/howto/install/lxd.md +++ b/docs/src/snap/howto/install/lxd.md @@ -109,7 +109,7 @@ port assigned by Kubernetes. In this example, we will use [Microbot] as it provides a simple HTTP endpoint to expose. These steps can be applied to any other deployment. -First, initialize the k8s cluster with +First, initialise the k8s cluster with ``` lxc exec k8s -- sudo k8s bootstrap diff --git a/docs/src/snap/howto/install/multipass.md b/docs/src/snap/howto/install/multipass.md index 7c15847c5..d008b9815 100644 --- a/docs/src/snap/howto/install/multipass.md +++ b/docs/src/snap/howto/install/multipass.md @@ -1,6 +1,6 @@ # Install with Multipass (Ubuntu/Mac/Windows) -**Multipass** is a simple way to run Ubuntu in a +[Multipass][]is a simple way to run Ubuntu in a virtual machine, no matter what your underlying OS. It is the recommended way to run {{product}} on Windows and macOS systems, and is equally useful for running multiple instances of the `k8s` snap on Ubuntu too. @@ -26,7 +26,7 @@ Multipass is shipped as a snap for Ubuntu and other OSes which support the Windows users should download and install the Multipass installer from the website. -The latest version is available here , +The [latest Windows version][] is available to download, though you may wish to visit the [Multipass website][] for more details. @@ -37,7 +37,7 @@ though you may wish to visit the [Multipass website][] for more details. Users running macOS should download and install the Multipass installer from the website. -The latest version is available here , +The [latest macOS version] is available to download, though you may wish to visit the [Multipass website][] for more details, including an alternate install method using `brew`. @@ -60,14 +60,14 @@ multipass launch 22.04 --name k8s-node --memory 4G --disk 20G --cpus 2 This command specifies: -- **22.04**: The Ubuntu image used as the basis for the instance -- **--name**: The name by which you will refer to the instance -- **--memory**: The memory to allocate -- **--disk**: The disk space to allocate -- **--cpus**: The number of CPU cores to reserve for this instance +- `22.04`: The Ubuntu image used as the basis for the instance +- `--name`: The name by which you will refer to the instance +- `--memory`: The memory to allocate +- `--disk`: The disk space to allocate +- `--cpus`: The number of CPU cores to reserve for this instance For more details of creating instances with Multipass, please see the -[Multipass documentation][multipass-options] about instance creation. +[Multipass documentation][Multipass-options] about instance creation. ## Access the created instance @@ -111,8 +111,11 @@ multipass purge +[Multipass]:https://multipass.run/ [snap-support]: https://snapcraft.io/docs/installing-snapd -[multipass-options]: https://multipass.run/docs/get-started-with-multipass-linux#heading--create-a-customised-instance +[Multipass-options]: https://multipass.run/docs/get-started-with-multipass-linux#heading--create-a-customised-instance [install instructions]: ./snap [Getting started]: ../../tutorial/getting-started [Multipass website]: https://multipass.run/docs +[latest Window version]:https://multipass.run/download/windows +[latest macOS version]:https://multipass.run/download/macos diff --git a/docs/src/snap/howto/install/offline.md b/docs/src/snap/howto/install/offline.md index d44fb1642..e522efa50 100644 --- a/docs/src/snap/howto/install/offline.md +++ b/docs/src/snap/howto/install/offline.md @@ -91,7 +91,7 @@ All workloads in a Kubernetes cluster are run as an OCI image. Kubernetes needs to be able to fetch these images and load them into the container runtime. For {{product}}, it is also necessary to fetch the images used -by its features (network, dns, etc.) as well as any images that are +by its features (network, DNS, etc.) as well as any images that are needed to run specific workloads. ```{note} @@ -124,8 +124,8 @@ ghcr.io/canonical/metrics-server:0.7.0-ck2 ghcr.io/canonical/rawfile-localpv:0.8.0-ck4 ``` -A list of images can also be found in the `images.txt` file when unsquashing the -downloaded k8s snap. +A list of images can also be found in the `images.txt` file when the +downloaded k8s snap is unsquashed. Please ensure that the images used by workloads are tracked as well. diff --git a/docs/src/snap/howto/networking/default-ingress.md b/docs/src/snap/howto/networking/default-ingress.md index 2340c806d..e70d66157 100644 --- a/docs/src/snap/howto/networking/default-ingress.md +++ b/docs/src/snap/howto/networking/default-ingress.md @@ -55,7 +55,7 @@ You should see three options: ### TLS Secret You can create a TLS secret by following the official -[Kubernetes documentation][kubectl-create-secret-tls/]. +[Kubernetes documentation][kubectl-create-secret-TLS/]. Please remember to use `sudo k8s kubectl` (See the [kubectl-guide]). Tell Ingress to use your new Ingress certificate: @@ -105,7 +105,7 @@ sudo k8s help disable -[kubectl-create-secret-tls/]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_create/kubectl_create_secret_tls/ +[kubectl-create-secret-TLS/]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_create/kubectl_create_secret_tls/ [proxy-protocol]: https://kubernetes.io/docs/reference/networking/service-protocols/#protocol-proxy-special [getting-started-guide]: ../../tutorial/getting-started [kubectl-guide]: ../../tutorial/kubectl diff --git a/docs/src/snap/howto/networking/default-loadbalancer.md b/docs/src/snap/howto/networking/default-loadbalancer.md index 4ca55115a..6552b87a0 100644 --- a/docs/src/snap/howto/networking/default-loadbalancer.md +++ b/docs/src/snap/howto/networking/default-loadbalancer.md @@ -9,7 +9,7 @@ explains how to configure and enable the load-balancer. This guide assumes the following: - You have root or sudo access to the machine. -- You have a bootstraped {{product}} cluster (see the [Getting +- You have a bootstrapped {{product}} cluster (see the [Getting Started][getting-started-guide] guide). ## Check the status and configuration @@ -32,7 +32,7 @@ sudo k8s get load-balancer This should output a list of values like this: -- `cidrs` - a list containing [cidr] or IP address range definitions of the +- `cidrs` - a list containing [CIDR] or IP address range definitions of the pool of IP addresses to use - `l2-mode` - whether L2 mode (failover) is turned on - `l2-interfaces` - optional list of interfaces to announce services over @@ -80,6 +80,5 @@ sudo k8s disable load-balancer - -[cidr]: https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing +[CIDR]: https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing [getting-started-guide]: ../../tutorial/getting-started diff --git a/docs/src/snap/howto/networking/dualstack.md b/docs/src/snap/howto/networking/dualstack.md index 406245ff5..efa1b0c19 100644 --- a/docs/src/snap/howto/networking/dualstack.md +++ b/docs/src/snap/howto/networking/dualstack.md @@ -140,7 +140,7 @@ limitations regarding CIDR size: - **/108 is the maximum size for the Service CIDR** Using a smaller value than `/108` for service CIDRs -may cause issues like failure to initialize the IPv6 allocator. This is due +may cause issues like failure to initialise the IPv6 allocator. This is due to the CIDR size being too large for Kubernetes to handle efficiently. See upstream reference: [kube-apiserver validation][kube-apiserver-test] diff --git a/docs/src/snap/howto/networking/ipv6.md b/docs/src/snap/howto/networking/ipv6.md index da88ef9a5..65bd5cc99 100644 --- a/docs/src/snap/howto/networking/ipv6.md +++ b/docs/src/snap/howto/networking/ipv6.md @@ -138,5 +138,5 @@ connectivity is set up correctly. **Service and Pod CIDR Sizing** Use `/108` as the maximum size for Service CIDRs. Larger ranges (e.g., `/64`) -may lead to allocation errors or Kubernetes failing to initialize the IPv6 +may lead to allocation errors or Kubernetes failing to initialise the IPv6 address allocator. diff --git a/docs/src/snap/howto/networking/proxy.md b/docs/src/snap/howto/networking/proxy.md index c6f78b674..d0f64414d 100644 --- a/docs/src/snap/howto/networking/proxy.md +++ b/docs/src/snap/howto/networking/proxy.md @@ -1,6 +1,6 @@ # Configure proxy settings for K8s -{{product}} packages a number of utilities (eg curl, helm) which need +{{product}} packages a number of utilities (for example curl, helm) which need to fetch resources they expect to find on the internet. In a constrained network environment, such access is usually controlled through proxies. diff --git a/docs/src/snap/howto/restore-quorum.md b/docs/src/snap/howto/restore-quorum.md index 99a8c4e8b..9050797c7 100755 --- a/docs/src/snap/howto/restore-quorum.md +++ b/docs/src/snap/howto/restore-quorum.md @@ -144,7 +144,7 @@ sudo tar xf recovery-k8s-dqlite-$timestamp-post-recovery.tar.gz \ -C /var/snap/k8s/common/var/lib/k8s-dqlite ``` -Node specific files need to be copied back to the k8s-dqlite state dir: +Node specific files need to be copied back to the k8s-dqlite state directory: ``` sudo cp /var/snap/k8s/common/var/lib/k8s-dqlite.bkp/cluster.crt \ diff --git a/docs/src/snap/howto/storage/ceph.md b/docs/src/snap/howto/storage/ceph.md index e318ea8f1..b379d2e5f 100644 --- a/docs/src/snap/howto/storage/ceph.md +++ b/docs/src/snap/howto/storage/ceph.md @@ -29,7 +29,7 @@ this demonstration will have less than 5 OSDs. (See [placement groups]) ceph osd pool create kubernetes 128 ``` -Initialize the pool as a Ceph block device pool. +Initialise the pool as a Ceph block device pool. ``` rbd pool init kubernetes @@ -48,8 +48,7 @@ capabilities to administer your Ceph cluster: ceph auth get-or-create client.kubernetes mon 'profile rbd' osd 'profile rbd pool=kubernetes' mgr 'profile rbd pool=kubernetes' ``` -For more information on user capabilities in Ceph, see -[https://docs.ceph.com/en/latest/rados/operations/user-management/#authorization-capabilities] +For more information on user capabilities in Ceph, see the [authorisation capabilities page][] ``` [client.kubernetes] @@ -60,7 +59,7 @@ Note the generated key, you will need it at a later step. ## Generate csi-config-map.yaml -First, get the fsid and the monitor addresses of your cluster. +First, get the `fsid` and the monitor addresses of your cluster. ``` sudo ceph mon dump @@ -79,7 +78,7 @@ election_strategy: 1 dumped monmap epoch 2 ``` -Keep note of the v1 IP (`10.0.0.136:6789`) and the fsid +Keep note of the v1 IP (`10.0.0.136:6789`) and the `fsid` (`6d5c12c9-6dfb-445a-940f-301aa7de0f29`) as you will need to refer to them soon. ``` @@ -131,11 +130,10 @@ Then apply: kubectl apply -f csi-kms-config-map.yaml ``` -If you do need to configure a KMS provider, an example ConfigMap is available in -the Ceph repository: -[https://github.com/ceph/ceph-csi/blob/devel/examples/kms/vault/kms-config.yaml] +If you do need to configure a KMS provider, an [example ConfigMap][] is available +in the Ceph repository. -Create the `ceph-config-map.yaml` which will be stored inside a ceph.conf file +Create the `ceph-config-map.yaml` which will be stored inside a `ceph.conf` file in the CSI containers. This `ceph.conf` file will be used by Ceph daemons on each container to authenticate with the Ceph cluster. @@ -188,7 +186,7 @@ Then apply: kubectl apply -f csi-rbd-secret.yaml ``` -## Create ceph-csi's custom Kubernetes objects +## Create ceph-csi custom Kubernetes objects Create the ServiceAccount and RBAC ClusterRole/ClusterRoleBinding objects: @@ -251,7 +249,7 @@ Then apply: kubectl apply -f csi-rbd-sc.yaml ``` -## Create a Persistant Volume Claim (PVC) for a RBD-backed file-system +## Create a Persistent Volume Claim (PVC) for a RBD-backed file-system This PVC will allow users to request RBD-backed storage. @@ -279,7 +277,7 @@ Then apply: kubectl apply -f pvc.yaml ``` -## Create a pod that binds to the Rados Block Device PVC +## Create a pod that binds to the RADOS Block Device PVC Finally, create a pod configuration that uses the RBD-backed PVC. @@ -313,7 +311,7 @@ kubectl apply -f pod.yaml ## Verify that the pod is using the RBD PV -To verify that the csi-rbd-demo-pod is indeed using a RBD Persistant Volume, run +To verify that the `csi-rbd-demo-pod` is indeed using a RBD Persistent Volume, run the following commands, you should see information related to attached volumes in both of their outputs: @@ -335,3 +333,5 @@ Ceph documentation: [Intro to Ceph]. [block-devices-and-kubernetes]: https://docs.ceph.com/en/latest/rbd/rbd-kubernetes/ [placement groups]: https://docs.ceph.com/en/mimic/rados/operations/placement-groups/ [Intro to Ceph]: https://docs.ceph.com/en/latest/start/intro/ +[authorisation capabilities page]:[https://docs.ceph.com/en/latest/rados/operations/user-management/#authorization-capabilities] +[example ConfigMap]:https://github.com/ceph/ceph-csi/blob/devel/examples/kms/vault/kms-config.yaml diff --git a/docs/src/snap/howto/two-node-ha.md b/docs/src/snap/howto/two-node-ha.md index cdd894a79..e7b1cdc99 100644 --- a/docs/src/snap/howto/two-node-ha.md +++ b/docs/src/snap/howto/two-node-ha.md @@ -134,7 +134,7 @@ DRBD_MOUNT_DIR=/mnt/drbd0 sudo mkdir -p $DRBD_MOUNT_DIR ``` -Run the following *once* to initialize the filesystem. +Run the following *once* to initialise the file system. ``` sudo drbdadm up r0 @@ -145,7 +145,7 @@ sudo mkfs.ext4 /dev/drbd0 sudo drbdadm down r0 ``` -Add the DRBD device to the ``multipathd`` blacklist, ensuring that the multipath +Add the DRBD device to the ``multipathd`` blacklist, ensuring that the `multipath` service will not attempt to manage this device: ``` @@ -304,7 +304,7 @@ WantedBy=multi-user.target The ``two-node-ha.sh start_service`` command used by the service wrapper automatically detects the expected Dqlite role based on the DRBD state. It then takes the necessary steps to bootstrap the Dqlite state directories, -synchronize with the peer node (if available) and recover the database. +synchronise with the peer node (if available) and recover the database. ``` When a DRBD failover occurs, the ``two-node-ha-k8s`` service needs to be @@ -385,8 +385,8 @@ Remove the offending segments and restart the ``two-node-ha-k8s`` service. ### DRBD split brain -The DRBD cluster may enter a [split brain] state and stop synchronizing. The -chances increase if fencing (stonith) is not enabled. +The DRBD cluster may enter a [split brain] state and stop synchronising. The +chances increase if fencing (STONITH) is not enabled. ``` ubuntu@hatwo:~$ sudo drbdadm status diff --git a/docs/src/snap/index.md b/docs/src/snap/index.md index 10ec043e9..d4b1e4f92 100644 --- a/docs/src/snap/index.md +++ b/docs/src/snap/index.md @@ -9,7 +9,6 @@ Overview :hidden: :titlesonly: :maxdepth: 6 -Overview tutorial/index.md howto/index.md explanation/index.md diff --git a/docs/src/snap/reference/annotations.md b/docs/src/snap/reference/annotations.md index 5a8ebf2b1..0868cda20 100644 --- a/docs/src/snap/reference/annotations.md +++ b/docs/src/snap/reference/annotations.md @@ -6,7 +6,7 @@ the bootstrap configuration. | Name | Description | Values | |---------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------| -| `k8sd/v1alpha/lifecycle/skip-cleanup-kubernetes-node-on-remove` | If set, only microcluster and file cleanup are performed. This is helpful when an external controller (e.g., CAPI) manages the Kubernetes node lifecycle. By default, k8sd will remove the Kubernetes node when it is removed from the cluster. | "true"\|"false" | +| `k8sd/v1alpha/lifecycle/skip-cleanup-kubernetes-node-on-remove` | If set, only MicroCluster and file cleanup are performed. This is helpful when an external controller (e.g., CAPI) manages the Kubernetes node lifecycle. By default, k8sd will remove the Kubernetes node when it is removed from the cluster. | "true"\|"false" | | `k8sd/v1alpha/lifecycle/skip-stop-services-on-remove` | If set, the k8s services will not be stopped on the leaving node when removing the node. This is helpful when an external controller (e.g., CAPI) manages the Kubernetes node lifecycle. By default, all services are stopped on leaving nodes. | "true"\|"false" | | `k8sd/v1alpha1/csrsigning/auto-approve` | If set, certificate signing requests created by worker nodes are auto approved. | "true"\|"false" | | `k8sd/v1alpha1/calico/apiserver-enabled` | Enable the installation of the Calico API server to enable management of Calico APIs using kubectl. | "true"\|"false" | @@ -26,7 +26,7 @@ the bootstrap configuration. | `k8sd/v1alpha1/calico/autodetection-v6/cidrs` | Enable IP auto-detection based on which addresses on the nodes are within one of the provided CIDRs. | []string (comma separated) | | `k8sd/v1alpha1/cilium/devices` | List of devices facing cluster/external network (used for BPF NodePort, BPF masquerading and host firewall); supports `+` as wildcard in device name, e.g. `eth+,ens+` | string | | `k8sd/v1alpha1/cilium/direct-routing-device` | Device name used to connect nodes in direct routing mode (used by BPF NodePort, BPF host routing); if empty, automatically set to a device with k8s InternalIP/ExternalIP or with a default route. Bridge type devices are ignored in automatic selection | string | -| `k8sd/v1alpha1/cilium/vlan-bpf-bypass` | Comma separated list of VLAN tags to bypass eBPF filtering on native devices. Cilium enables firewalling on native devices and filters all unknown traffic, including VLAN 802.1q packets, which pass through the main device with the associated tag (e.g., VLAN device eth0.4000 and its main interface eth0). Supports `0` as wildcard for bypassing all VLANs. e.g. `4001,4002` | []string | +| `k8sd/v1alpha1/cilium/vlan-bpf-bypass` | Comma separated list of VLAN tags to bypass eBPF filtering on native devices. Cilium enables a firewall on native devices and filters all unknown traffic, including VLAN 802.1q packets, which pass through the main device with the associated tag (e.g., VLAN device eth0.4000 and its main interface eth0). Supports `0` as wildcard for bypassing all VLANs. e.g. `4001,4002` | []string | | `k8sd/v1alpha1/metrics-server/image-repo` | Override the default image repository for the metrics-server. | string | | `k8sd/v1alpha1/metrics-server/image-tag` | Override the default image tag for the metrics-server. | string | diff --git a/docs/src/snap/reference/architecture.md b/docs/src/snap/reference/architecture.md index 44981a45b..5cb387843 100644 --- a/docs/src/snap/reference/architecture.md +++ b/docs/src/snap/reference/architecture.md @@ -115,7 +115,7 @@ determines the node's role in the Kubernetes cluster. The `k8s` charm manages directing the `juju` controller to reach the model's eventually consistent state. For more detail on Juju's concepts, see the [Juju docs][]. -The administrator may choose any supported cloud-types (Openstack, MAAS, AWS, +The administrator may choose any supported cloud-types (OpenStack, MAAS, AWS, GCP, Azure...) on which to manage the machines making up the Kubernetes cluster. Juju selects a single leader unit per application to act as a centralised figure with the model. The `k8s` leader oversees Kubernetes diff --git a/docs/src/snap/reference/certificates.md b/docs/src/snap/reference/certificates.md index 29df8bdb0..996d80f6c 100644 --- a/docs/src/snap/reference/certificates.md +++ b/docs/src/snap/reference/certificates.md @@ -26,13 +26,13 @@ their issuance. | **Common Name** | **Purpose** | **File Location** | **Primary Function** | **Signed By** | |--------------------------------------------|-----------|------------------------------------------------------|------------------------------------------------------------------|-----------------------------| | `kube-apiserver` | Server | `/etc/kubernetes/pki/apiserver.crt` | Securing the API server endpoint | `kubernetes-ca` | -| `apiserver-kubelet-client` | Client | `/etc/kubernetes/pki/apiserver-kubelet-client.crt` | API server communication with kubelets | `kubernetes-ca-client` | +| `apiserver-kubelet-client` | Client | `/etc/kubernetes/pki/apiserver-kubelet-client.crt` | API server communication with kubelet | `kubernetes-ca-client` | | `kube-apiserver-etcd-client` | Client | `/etc/kubernetes/pki/apiserver-etcd-client.crt` | API server communication with etcd | `kubernetes-ca-client` | | `front-proxy-client` | Client | `/etc/kubernetes/pki/front-proxy-client.crt` | API server communication with the front-proxy | `kubernetes-front-proxy-ca` | | `system:kube-controller-manager` | Client | `/etc/kubernetes/pki/controller-manager.crt` | Communication between the controller manager and the API server | `kubernetes-ca-client` | | `system:kube-scheduler` | Client | `/etc/kubernetes/pki/scheduler.crt` | Communication between the scheduler and the API server | `kubernetes-ca-client` | | `system:kube-proxy` | Client | `/etc/kubernetes/pki/proxy.crt` | Communication between kube-proxy and the API server | `kubernetes-ca-client` | -| `system:node:$hostname` | Client | `/etc/kubernetes/pki/kubelet-client.crt` | Authentication of kubelets to the API server | `kubernetes-ca-client` | +| `system:node:$hostname` | Client | `/etc/kubernetes/pki/kubelet-client.crt` | Authentication of kubelet to the API server | `kubernetes-ca-client` | | `k8s-dqlite` | Client | `/var/snap/k8s/common/var/lib/k8s-dqlite/cluster.crt`| Communication between k8s-dqlite nodes and API server | `self-signed` | | `root@$hostname` | Client | `/var/snap/k8s/common/var/lib/k8s-dqlite/cluster.crt` | Communication between k8sd nodes | `self-signed` | diff --git a/docs/src/snap/reference/control-plane-join-config-reference.md b/docs/src/snap/reference/control-plane-join-config-reference.md index 855b816a4..06875521a 100755 --- a/docs/src/snap/reference/control-plane-join-config-reference.md +++ b/docs/src/snap/reference/control-plane-join-config-reference.md @@ -1,7 +1,7 @@ # Control plane node join configuration file reference A YAML file can be supplied to the `k8s join-cluster ` command to configure and -customize new nodes. +customise new nodes. This reference section provides all available options for control plane nodes. diff --git a/docs/src/snap/reference/releases.md b/docs/src/snap/reference/releases.md index 2e73630c9..b6035eb58 100644 --- a/docs/src/snap/reference/releases.md +++ b/docs/src/snap/reference/releases.md @@ -18,7 +18,7 @@ Currently {{product}} is working towards general availability, but you can install it now to try: - **Clustering** - need high availability or just an army of worker nodes? - {{product}} is emminently scaleable, see the [tutorial on adding + {{product}} is eminently scalable, see the [tutorial on adding more nodes][nodes]. - **Networking** - Our built-in network component allows cluster administrators to automatically scale and secure network policies across the cluster. Find diff --git a/docs/src/snap/reference/roadmap.md b/docs/src/snap/reference/roadmap.md index 63550f85c..a97ae5657 100644 --- a/docs/src/snap/reference/roadmap.md +++ b/docs/src/snap/reference/roadmap.md @@ -7,7 +7,7 @@ future direction and priorities of the project. Our roadmap matches the cadence of the Ubuntu release cycle, so `24.10` is the same as the release date for Ubuntu 24.10. This does not precisely map to the release cycle of Kubernetes versions, so please consult the [release notes] for -specifics of whatfeatures have been delivered. +specifics of what features have been delivered. ``` {csv-table} {{product}} public roadmap diff --git a/docs/src/snap/reference/troubleshooting.md b/docs/src/snap/reference/troubleshooting.md index f6b44a3f1..a4edf0d94 100644 --- a/docs/src/snap/reference/troubleshooting.md +++ b/docs/src/snap/reference/troubleshooting.md @@ -44,7 +44,7 @@ the kubelet. kubelet needs a feature from cgroup and the kernel may not be set up appropriately to provide the cpuset feature. ``` -E0125 00:20:56.003890 2172 kubelet.go:1466] "Failed to start ContainerManager" err="failed to initialize top level QOS containers: root container [kubepods] doesn't exist" +E0125 00:20:56.003890 2172 kubelet.go:1466] "Failed to start ContainerManager" err="failed to initialise top level QOS containers: root container [kubepods] doesn't exist" ``` ### Explanation @@ -54,7 +54,7 @@ An excellent deep-dive of the issue exists at Commenter [@haircommander][] [states][kubernetes-122955-2020403422] > basically: we've figured out that this issue happens because libcontainer -> doesn't initialize the cpuset cgroup for the kubepods slice when the kubelet +> doesn't initialise the cpuset cgroup for the kubepods slice when the kubelet > initially calls into it to do so. This happens because there isn't a cpuset > defined on the top level of the cgroup. however, we fail to validate all of > the cgroup controllers we need are present. It's possible this is a @@ -68,7 +68,7 @@ Commenter [@haircommander][] [states][kubernetes-122955-2020403422] ### Solution This is in the process of being fixed upstream via -[kubernetes/kuberetes #125923][kubernetes-125923]. +[kubernetes/kubernetes #125923][kubernetes-125923]. In the meantime, the best solution is to create a `Delegate=yes` configuration in systemd. diff --git a/docs/src/snap/reference/worker-join-config-reference.md b/docs/src/snap/reference/worker-join-config-reference.md index 5f170f484..d10ea5ba2 100755 --- a/docs/src/snap/reference/worker-join-config-reference.md +++ b/docs/src/snap/reference/worker-join-config-reference.md @@ -1,7 +1,7 @@ # Worker node join configuration file reference A YAML file can be supplied to the `k8s join-cluster ` command to configure and -customize new worker nodes. +customise new worker nodes. This reference section provides all available options for worker nodes. diff --git a/docs/src/snap/tutorial/add-remove-nodes.md b/docs/src/snap/tutorial/add-remove-nodes.md index 24b2bb88a..72ff32988 100644 --- a/docs/src/snap/tutorial/add-remove-nodes.md +++ b/docs/src/snap/tutorial/add-remove-nodes.md @@ -8,7 +8,7 @@ This tutorial simplifies the concept by creating a cluster within a controlled environment using two Multipass VMs. The approach here allows us to focus on the foundational aspects of clustering using {{product}} without the complexities of a full-scale, production setup. If your nodes are already -installed, you can skip the multipass setup and go to [step 2](step2). +installed, you can skip the Multipass setup and go to [step 2](step2). ## Before starting @@ -76,7 +76,7 @@ A base64 token will be printed to your terminal. Keep it handy as you will need it for the next step. ```{note} It's advisable to name the new node after the hostname of the - worker node (in this case, the VM's hostname is worker). + worker node (in this case, the VM hostname is worker). ``` ### 3. Join the cluster on the worker node diff --git a/docs/tools/.custom_wordlist.txt b/docs/tools/.custom_wordlist.txt index 7893c42d2..40495c3f4 100644 --- a/docs/tools/.custom_wordlist.txt +++ b/docs/tools/.custom_wordlist.txt @@ -1,66 +1,266 @@ +adapter's +adapters +allocatable +allocator +AlwaysPullImages +api apiserver apparmor AppArmor +args +ARP +asn +ASN autostart +autosuspend aws +backend +backported +balancers +benoitblanchon +bgp +BGP +bootloader +CABPCK +CACPCK +capi CAPI +CAs +Center +ceph Ceph +cephcsi +cephx cgroup -CIDRS +cgroups +cidr +CIDR +cidrs CIDRs +CK8sControlPlane +CLI +CLIs +CloudFormation +ClusterAPI +clusterctl +ClusterRole +ClusterRoleBinding +CMK CNI +Commenter config +configMap +ConfigMap containerd CoreDNS +Corosync CPUs cpuset +crt +csi +CSI +CSRs +cyclictest daemonset +DaemonSet datastore +datastores dbus +de +deallocation +deployable +discoverable +DMA +dns DNS +DPDK +DRBD +drv dqlite +EAL EasyRSA +enp +enum etcd +EventRateLimit +failover +gapped GCP ghcr +Gi +github +GPLv +Graber +Graber's grafana +haircommander +Harbor +hostname +hostpath +HPC html http https +HugePage +HugePages +iavf +init initialise +integrations +io +IOMMU +IOV +ip +IPv +IPv4 +IPv6 +IRQs +Jinja +jitter juju +Juju's +KMS +kube +kube-apiserver +kube-controller-manager +kube-proxy +kube-scheduler +kube-system kubeconfig kubectl kubelet kubepods kubernetes +latencies +Latencies libcontainer lifecycle linux +Lite's +LoadBalancer localhost +Lookaside +lookups +loopback +LPM lxc +LxcSecurity LXD MAAS +macOS +Maskable +MCE +MetalLB +Microbot +MicroCluster +MicroK +MicroK8s +MinIO +modprobe +Moonray +mq +mtu +MTU +multicast +MULTICAST Multipass +Multus nameservers +Netplan +NetworkAttachmentDefinition +NFD +NFV +nginx NGINX +NIC +NMI +nodeport +nohz +NUMA +numactl OCI +OOM OpenStack +OSDs +ParseDuration +passthrough +passwordless +pci +PEM +performant +PID +PMD +PMDs +PPA proc +programmatically +provisioner +PRs +PV +qdisc +qlen +QoS +RADOS +rbac RBAC +RBD +rc +RCU +README +regctl regsync roadmap Rockcraft +rollout +runtimes rw +sandboxed +SANs +scalable +SCHED +sControlPlane +sd +SELinux +ServiceAccount +Snapcraft snapd +SR-IOV stackexchange +stgraber +STONITH +StorageClass +sudo sys systemd +taskset +Telco +throughs +tickless +TLB +tls TLS +toml +TSC +TTL +ttyS ubuntu unix +unschedulable +unsquashed +Velero +vf +VF +vfio +VFIO +VFs +virtualised +VLAN VMs -VMWare +VMware +VNFs +VPCs VSphere +WIP www yaml +YAMLs